hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4547f32ba2dd53a8a0e71fc993cc07d7d1a58ed | 2,384 | py | Python | python/handwritten_baseline/pipeline/model/feature_extr/debug.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 10 | 2020-11-28T05:01:04.000Z | 2021-12-21T19:34:00.000Z | python/handwritten_baseline/pipeline/model/feature_extr/debug.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 1 | 2022-03-12T07:20:39.000Z | 2022-03-16T05:11:38.000Z | python/handwritten_baseline/pipeline/model/feature_extr/debug.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 1 | 2021-12-21T19:34:08.000Z | 2021-12-21T19:34:08.000Z | import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from overrides import overrides
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
| 43.345455 | 127 | 0.684983 |
f454b6de1f5f5d7ea1e9cef6495a08d3a75a9606 | 1,253 | py | Python | kunquat/tracker/errorbase.py | cyberixae/kunquat | 06ae72b2c1519686cc510ce887d9d45a5c3fa3a3 | [
"CC0-1.0"
] | null | null | null | kunquat/tracker/errorbase.py | cyberixae/kunquat | 06ae72b2c1519686cc510ce887d9d45a5c3fa3a3 | [
"CC0-1.0"
] | null | null | null | kunquat/tracker/errorbase.py | cyberixae/kunquat | 06ae72b2c1519686cc510ce887d9d45a5c3fa3a3 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Author: Tomi Jylh-Ollila, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from __future__ import print_function
import sys
import traceback
import os
_ERROR_BRIEF = 'Kunquat Tracker encountered an error.'
_SUBMIT_INFO = \
'''Please submit an issue to Kunquat issue tracker at
https://github.com/kunquat/kunquat/issues with the following
information attached.'''
| 24.096154 | 71 | 0.73344 |
f456221256fc52688ca188318ed96a52141502e3 | 4,311 | py | Python | venv/lib/python3.5/site-packages/igraph/test/atlas.py | dtklinh/Protein-Rigid-Domains-Estimation | a27152ef5437eb87ee31c317091356c4787f82a4 | [
"MIT"
] | 2 | 2021-03-04T16:57:06.000Z | 2021-08-11T01:42:29.000Z | venv/lib/python3.5/site-packages/igraph/test/atlas.py | dtklinh/Protein-Rigid-Domains-Estimation | a27152ef5437eb87ee31c317091356c4787f82a4 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/igraph/test/atlas.py | dtklinh/Protein-Rigid-Domains-Estimation | a27152ef5437eb87ee31c317091356c4787f82a4 | [
"MIT"
] | null | null | null |
import warnings
import unittest
from igraph import *
def suite():
atlas_suite = unittest.makeSuite(GraphAtlasTests)
isoclass_suite = unittest.makeSuite(IsoclassTests)
return unittest.TestSuite([atlas_suite, isoclass_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| 38.491071 | 118 | 0.52424 |
f4564217958b77537d1072c7c3fc29f0c202d7e9 | 3,509 | py | Python | pycspr/types/cl.py | momipsl/pycspr | 82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5 | [
"Apache-2.0"
] | 2 | 2021-04-14T13:49:20.000Z | 2021-07-06T22:07:02.000Z | pycspr/types/cl.py | momipsl/pycspr | 82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5 | [
"Apache-2.0"
] | null | null | null | pycspr/types/cl.py | momipsl/pycspr | 82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5 | [
"Apache-2.0"
] | 1 | 2021-04-15T12:52:42.000Z | 2021-04-15T12:52:42.000Z | import dataclasses
import enum
# Set of types considered to be simple.
CL_TYPES_SIMPLE = {
CLType.BOOL,
CLType.I32,
CLType.I64,
CLType.KEY,
CLType.PUBLIC_KEY,
CLType.STRING,
CLType.U8,
CLType.U32,
CLType.U64,
CLType.U128,
CLType.U256,
CLType.U512,
CLType.UNIT,
CLType.UREF,
}
| 21.396341 | 85 | 0.667427 |
f456c15808160c57f2b68cffa03b0cdb9fe05135 | 1,668 | py | Python | google/cloud/aiplatform_v1/types/env_var.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/types/env_var.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | 1 | 2021-02-12T23:56:38.000Z | 2021-02-12T23:56:38.000Z | google/cloud/aiplatform_v1/types/env_var.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"EnvVar",},)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.040816 | 88 | 0.688249 |
f45776dc27791b0b8c76dabaff8a799c99fa956b | 3,119 | py | Python | tools/borplay/packlib.py | MrCoolSpan/openbor | 846cfeb924906849c8a11e76c442e47286b707ea | [
"BSD-3-Clause"
] | 25 | 2015-03-10T06:14:12.000Z | 2021-04-28T03:42:32.000Z | tools/borplay/packlib.py | MrCoolSpan/openbor | 846cfeb924906849c8a11e76c442e47286b707ea | [
"BSD-3-Clause"
] | 2 | 2019-09-29T11:35:30.000Z | 2021-02-08T11:10:32.000Z | tools/borplay/packlib.py | MrCoolSpan/openbor | 846cfeb924906849c8a11e76c442e47286b707ea | [
"BSD-3-Clause"
] | 18 | 2015-03-14T02:43:26.000Z | 2020-07-24T02:08:58.000Z | # Copyright (c) 2009 Bryan Cain ("Plombo")
# Class and functions to read .PAK files.
import struct
from cStringIO import StringIO
def get_file(pak, borfile):
'''Prevents a need to directly use PackFileReader when you only want to get
one file, like in borplay and bor2wav. Returns a file-like object.'''
rdr = PackFileReader(pak)
if ('/' not in borfile) and ('\\' not in borfile): # only the filename is given; search for the file
return rdr.find_file(borfile)
else: # full path given
return rdr.read_file(borfile)
# For testing
if __name__ == '__main__':
rdr = PackFileReader('K:/BOR/OpenBOR/Paks/BOR.PAK')
#keys = rdr.files.keys(); keys.sort()
#print '\n'.join(keys)
#print rdr.read_file('data/chars/yamazaki/yamazaki.txt').read()
#print rdr.find_file('yamazaki.txt').read()
rdr.list_music_files()
| 29.990385 | 102 | 0.655338 |
f4584cfc0d782e8ed0b2d30fb8fdd386a63762a3 | 1,017 | py | Python | artascope/src/web/app.py | magus0219/icloud-photo-downloader | 6334530d971cf61089d031de99a38f204c201837 | [
"MIT"
] | 3 | 2020-09-24T16:19:28.000Z | 2022-02-09T21:10:11.000Z | artascope/src/web/app.py | magus0219/icloud-photo-downloader | 6334530d971cf61089d031de99a38f204c201837 | [
"MIT"
] | null | null | null | artascope/src/web/app.py | magus0219/icloud-photo-downloader | 6334530d971cf61089d031de99a38f204c201837 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[magus0219@gmail.com] on 2020/3/23
from types import FunctionType
from flask import (
Flask,
redirect,
url_for,
)
import artascope.src.web.lib.filter as module_filter
from artascope.src.web.lib.content_processor import inject_version
| 23.113636 | 66 | 0.67355 |
f4584d9b2545719be7d26d0474bfda0fc16fc902 | 2,251 | py | Python | tests/common/test_op/scatter_nd.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | 1 | 2020-08-31T02:43:43.000Z | 2020-08-31T02:43:43.000Z | tests/common/test_op/scatter_nd.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_op/scatter_nd.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: scatter_nd"""
import akg.tvm
from akg.utils import validation_check as vc_util
def scatter_nd(indices, updates, shape):
"""
Scatters input tensor updates to a new tensor according to indices.
Args:
indices(akg.tvm.Tensor): Tensor of type int32.
updates(akg.tvm.Tensor): Tensor of type float16, float32, int32.
shape(list, tuple): Specifies the shape of output tensor.
Returns:
Scattered tensor with same type as input tensor updates and shape specified by parameter shape.
"""
# check shapes dtype
indices_shape = [x.value for x in indices.shape]
data_shape = [x.value for x in updates.shape]
vc_util.check_shape(indices_shape)
vc_util.check_shape(data_shape)
indices_dtype = indices.dtype
if not indices_dtype in "int32":
raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype)
dtype = updates.dtype
support_list = {"float16", "float32", "int32"}
if not (dtype in support_list):
raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype))
n = indices.shape[0].value
reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc")
k = akg.tvm.reduce_axis((0, n))
res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k))
return res
| 37.516667 | 105 | 0.676588 |
f458c5e01d9e2170ec0f7c2f7180c5b33bb75bc9 | 16,446 | py | Python | spc/backend_utils.py | adamnew123456/spc | 8809d1817f66cf8266f145aa0c2474b32dc1087a | [
"MIT"
] | 1 | 2017-10-15T19:55:48.000Z | 2017-10-15T19:55:48.000Z | spc/backend_utils.py | adamnew123456/spc | 8809d1817f66cf8266f145aa0c2474b32dc1087a | [
"MIT"
] | null | null | null | spc/backend_utils.py | adamnew123456/spc | 8809d1817f66cf8266f145aa0c2474b32dc1087a | [
"MIT"
] | null | null | null | """
Utility functions and classes shared by multiple backends
"""
from collections import namedtuple
import logging
from . import symbols
from . import types
LOGGER = logging.getLogger('spc.backend_utils')
# NameContexts encapsulate both the function stack (which holds values) and
# the symbol table context (which binds them)
NameContext = namedtuple('NameContext', ['symbol_ctx', 'func_stack'])
# While loops are identified by two labels - the start label, for re-running
# the condition, and the end label, for exiting when the condition is false
WhileLabels = namedtuple('WhileLabels', ['cond', 'exit'])
# If conditions are identified by two labels - the else label, for when
# the condition is false (to skip the then block) and the end label, for
# when the condition is true (to skip the else block)
IfLabels = namedtuple('IfLabels', ['else_body', 'end'])
# Switch conditionals are handled sort of like if conditionals:
#
# (switch |
# (case T1 B1) | jump-if-not T1, l1prime; ...; jump l4; l1prime:
# (case T2 B2) | jump-if-not T2, l2prime; ...; jump l4; l2prime:
# (else B3)) | ...
# | l4:
def _type_is_defined(self, name):
"""
Returns True if the given type is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_types and
self.ctx_types.is_visible(name))
def _make_func_stack(self):
raise NotImplementedError
def _push_context(self):
"""
Pushes a new binding context.
"""
old_context = self.current_context
self.parent_contexts.append(old_context)
self.current_context = NameContext(
self.current_context.symbol_ctx.enter(),
self._make_func_stack())
def _pop_context(self):
"""
Loads the previous binding context.
"""
self.current_context = self.parent_contexts.pop()
def _resolve_if_type_name(self, name):
"""
Resolves a type name into a concrete type.
"""
try:
return types.resolve_name(name, self.ctx_types)
except PermissionError as exn:
self.error(self.line, self.col,
'Cannot resolve hidden type "{}"', str(exn))
except RecursionError:
self.error(self.line, self.col,
'Type aliases too deep, when resolving "{}"', name)
except KeyError as exn:
self.error(self.line, self.col,
'Invalid type "{}"', str(exn))
def _verify_types(self):
"""
Verifies all the types across all this current context's symbols.
"""
self.verify_context.verify(self)
self.verify_context = VerificationContext()
class ThirtyTwoMixin:
"""
Defines some information about type sizes and alignment which 32-bit
platforms have in common.
Depends upon the user of this mixin to inherit from ContextMixin.
"""
def _type_alignment(self, type_obj):
"""
Returns alignment of the given type (1 for byte, 4 for word, etc.)
"""
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
return self._type_alignment(type_obj.type)
elif isinstance(type_obj, types.Struct):
# The alignment only concerns the first element of the struct -
# the struct's internal alignment doesn't come into play
#
# Also, an OrderdDict's fields are not iterable, for whatever reason
struct_types = list(type_obj.fields.values())
return self._type_alignment(struct_types[0])
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
def _type_size(self, type_obj, depth=0):
"""
Returns the size of a type object in bytes.
"""
MAX_DEPTH = 100
if depth >= MAX_DEPTH:
self.error(self.line, self.col,
"Type nested too deeply - potential self-referential type")
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
# To avoid wasting space on the last element, this pads all the
# elements but the last
base_size = self._type_size(type_obj.type, depth + 1)
return self._array_offset(type_obj, type_obj.count - 1) + base_size
elif isinstance(type_obj, types.Struct):
last_field = list(type_obj.fields)[-1]
last_field_type = type_obj.fields[last_field]
last_field_offset = self._field_offset(type_obj, last_field)
return last_field_offset + self._type_size(last_field_type, depth + 1)
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
| 34.2625 | 93 | 0.623921 |
f45a0afb4a750100d6616bb61de6015d31db9869 | 25 | py | Python | heareval/__init__.py | neuralaudio/hear-eval-kit | f92119592954544dfb417f8e9aea21eadb4a65d0 | [
"Apache-2.0"
] | 24 | 2021-07-26T21:21:46.000Z | 2022-03-30T08:10:13.000Z | heareval/__init__.py | neuralaudio/hear-eval-kit | f92119592954544dfb417f8e9aea21eadb4a65d0 | [
"Apache-2.0"
] | 196 | 2021-07-26T17:58:23.000Z | 2022-01-26T17:40:25.000Z | heareval/__init__.py | neuralaudio/hear-eval-kit | f92119592954544dfb417f8e9aea21eadb4a65d0 | [
"Apache-2.0"
] | 3 | 2021-08-10T13:12:53.000Z | 2022-03-19T05:00:50.000Z | __version__ = "2021.0.6"
| 12.5 | 24 | 0.68 |
f45c36a2a7c87d236af65ffb124e4f77205e7048 | 744 | py | Python | recommender_engine/similarity_measure/__init__.py | tranlyvu/recommender | 4985c355d54ee22ba48f4891077fd7e12bd21b47 | [
"Apache-2.0"
] | 8 | 2019-03-14T07:53:51.000Z | 2021-06-22T06:19:32.000Z | recommender_engine/similarity_measure/__init__.py | tranlyvu/recommender-engine | 4985c355d54ee22ba48f4891077fd7e12bd21b47 | [
"Apache-2.0"
] | 3 | 2018-01-16T06:48:55.000Z | 2020-05-04T01:43:14.000Z | recommender_engine/similarity_measure/__init__.py | tranlyvu/recommender-engine | 4985c355d54ee22ba48f4891077fd7e12bd21b47 | [
"Apache-2.0"
] | 1 | 2019-03-14T07:53:59.000Z | 2019-03-14T07:53:59.000Z | """
recommender_engine
-----
recommender_engine is a recommendation application using either item-based or user-based approaches
:copyright: (c) 2016 - 2019 by Tran Ly Vu. All Rights Reserved.
:license: Apache License 2.0
"""
from .cosine import cosine
from .euclidean_distance import euclidean_distance
from .pearson_correlation import pearson_correlation
name="similarity_measure"
__all__ = ["cosine", "euclidean_distance", "pearson_correlation"]
__author__ = "Tran Ly Vu (vutransingapore@gmail.com)"
__copyright__ = "Copyright (c) 2016 - 2019 Tran Ly Vu. All Rights Reserved."
__license__ = "Apache License 2.0"
__credits__ = ["Tran Ly Vu"]
__maintainer__ = "Tran Ly Vu"
__email__ = "vutransingapore@gmail.com"
__status__ = "Beta"
| 33.818182 | 100 | 0.766129 |
f45caefa61ce261896189f11de67dd4621b4cff1 | 44 | py | Python | code/abc057_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc057_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc057_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | a,b=map(int,input().split())
print((a+b)%24) | 22 | 28 | 0.613636 |
f45d781494a8e177d3301348e5cd3f98b7503c8a | 1,925 | py | Python | 8/8_9.py | kopsh/python_cookbook | 298c092cd20404a0755e2170776c44a04e8648ad | [
"CNRI-Python"
] | null | null | null | 8/8_9.py | kopsh/python_cookbook | 298c092cd20404a0755e2170776c44a04e8648ad | [
"CNRI-Python"
] | null | null | null | 8/8_9.py | kopsh/python_cookbook | 298c092cd20404a0755e2170776c44a04e8648ad | [
"CNRI-Python"
] | null | null | null |
if __name__ == '__main__':
import doctest
doctest.testmod() | 25.666667 | 106 | 0.535584 |
f45dfb481b367182927b34141a1df143252d871f | 7,306 | py | Python | test/examples/test_simple_gp_regression.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | test/examples/test_simple_gp_regression.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | test/examples/test_simple_gp_regression.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | import math
import torch
import unittest
import gpytorch
from torch import optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.random_variables import GaussianRandomVariable
# Simple training data: let's try to learn a sine function
train_x = Variable(torch.linspace(0, 1, 11))
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)))
test_x = Variable(torch.linspace(0, 1, 51))
test_y = Variable(torch.sin(test_x.data * (2 * math.pi)))
if __name__ == '__main__':
unittest.main()
| 38.861702 | 88 | 0.634684 |
f45ec536c2f2748641c051d8785db2394218cb3f | 4,264 | py | Python | samples/RiskManagement/Verification/customer-match-denied-parties-list.py | snavinch/cybersource-rest-samples-python | adb7a6b4b55dff6ac833295192d6677b53003c16 | [
"MIT"
] | 21 | 2019-01-22T17:48:32.000Z | 2022-02-07T17:40:58.000Z | samples/RiskManagement/Verification/customer-match-denied-parties-list.py | broadpay/cybersource-rest-samples-python | f7af6f58c70ea3bf725d34929b40ee4b5fd4d77c | [
"MIT"
] | 10 | 2018-12-03T22:45:17.000Z | 2021-04-19T20:40:14.000Z | samples/RiskManagement/Verification/customer-match-denied-parties-list.py | broadpay/cybersource-rest-samples-python | f7af6f58c70ea3bf725d34929b40ee4b5fd4d77c | [
"MIT"
] | 29 | 2018-11-09T11:44:53.000Z | 2022-03-18T08:56:46.000Z | from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
if __name__ == "__main__":
customer_match_denied_parties_list()
| 38.414414 | 97 | 0.754221 |
f45ec6261b1911d698e1ee71b90cc7668913450f | 936 | py | Python | SimulatePi.py | Lucchese-Anthony/MonteCarloSimulation | 45a625b88dab6658b43b472d49d82aaeb1e847bd | [
"CC0-1.0"
] | null | null | null | SimulatePi.py | Lucchese-Anthony/MonteCarloSimulation | 45a625b88dab6658b43b472d49d82aaeb1e847bd | [
"CC0-1.0"
] | null | null | null | SimulatePi.py | Lucchese-Anthony/MonteCarloSimulation | 45a625b88dab6658b43b472d49d82aaeb1e847bd | [
"CC0-1.0"
] | null | null | null | import numpy as np
import random
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
angle = np.linspace( 0 , 2 * np.pi , 150)
radius = 1
x = radius * np.cos(angle)
y = radius * np.sin(angle)
#prints the circle
style.use('fivethirtyeight')
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
axes.plot( x, y, color="red")
inside = []
outside = []
ani = animation.FuncAnimation(fig, animate, interval=5)
plt.show()
| 21.272727 | 55 | 0.628205 |
f45faefa310c1d7891d6abffc0a5f0a804569172 | 219 | py | Python | run.py | aarvanitii/adminWebsite | cf9a07c287571ebbc9954326806b578f6d19a11b | [
"MIT"
] | null | null | null | run.py | aarvanitii/adminWebsite | cf9a07c287571ebbc9954326806b578f6d19a11b | [
"MIT"
] | null | null | null | run.py | aarvanitii/adminWebsite | cf9a07c287571ebbc9954326806b578f6d19a11b | [
"MIT"
] | null | null | null | """
This is where the web application starts running
"""
from app.index import create_app
app = create_app()
if __name__ == "__main__":
app.secret_key = 'mysecret'
app.run(port=8080, host="0.0.0.0", debug=True) | 24.333333 | 50 | 0.694064 |
f460edaf40609072f5da235373227615b76ded70 | 804 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1)
# Space: O(1)
| 28.714286 | 69 | 0.452736 |
f4647df8f083e67396d2554f67110e5d8f963972 | 7,875 | py | Python | aldryn_people/tests/test_plugins.py | compoundpartners/js-people | a3744c3880f6626e677034a693f337c927baf886 | [
"BSD-3-Clause"
] | null | null | null | aldryn_people/tests/test_plugins.py | compoundpartners/js-people | a3744c3880f6626e677034a693f337c927baf886 | [
"BSD-3-Clause"
] | 1 | 2019-01-15T16:06:44.000Z | 2019-01-15T16:06:44.000Z | aldryn_people/tests/test_plugins.py | compoundpartners/js-people | a3744c3880f6626e677034a693f337c927baf886 | [
"BSD-3-Clause"
] | 1 | 2019-01-09T11:53:59.000Z | 2019-01-09T11:53:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import reverse
except ImportError:
# Django 2.0
from django.urls import reverse
from django.utils.translation import force_text
from cms import api
from cms.utils.i18n import force_language
from aldryn_people import DEFAULT_APP_NAMESPACE
from ..models import Person, Group
from ..cms_plugins import PeoplePlugin
from . import DefaultApphookMixin, BasePeopleTest
| 38.985149 | 80 | 0.657143 |
f464a115da93371471e70429639150e5a6c40508 | 661 | py | Python | turbo_transformers/python/tests/__init__.py | xcnick/TurboTransformers | 48b6ba09af2219616c6b97cc5c09222408e080c2 | [
"BSD-3-Clause"
] | 1,147 | 2020-04-24T06:45:50.000Z | 2022-03-30T15:33:16.000Z | turbo_transformers/python/tests/__init__.py | xcnick/TurboTransformers | 48b6ba09af2219616c6b97cc5c09222408e080c2 | [
"BSD-3-Clause"
] | 140 | 2020-04-25T10:54:15.000Z | 2022-03-11T08:13:11.000Z | turbo_transformers/python/tests/__init__.py | xcnick/TurboTransformers | 48b6ba09af2219616c6b97cc5c09222408e080c2 | [
"BSD-3-Clause"
] | 151 | 2020-04-24T06:49:01.000Z | 2022-03-21T13:48:54.000Z | # Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
| 50.846154 | 69 | 0.774584 |
f465aa8f0880334955fcdd358466dab059344d4b | 355 | py | Python | generate_joke.py | audreymychan/djsmile | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | [
"MIT"
] | 5 | 2019-05-30T20:15:34.000Z | 2020-04-16T08:21:16.000Z | generate_joke.py | audreymychan/djsmile | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | [
"MIT"
] | 5 | 2021-08-25T14:43:34.000Z | 2022-02-10T00:14:09.000Z | generate_joke.py | audreymychan/djsmile | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | [
"MIT"
] | null | null | null | # This script contains the get_joke() function to generate a new dad joke
import requests
def get_joke():
"""Return new joke string from icanhazdadjoke.com."""
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={'Accept': 'application/json'})
raw_joke = response.json()
joke = raw_joke['joke']
return joke
| 27.307692 | 73 | 0.687324 |
f4677aa07e0ad3e8da44d44b35bfb9d27f0006a2 | 136 | py | Python | bot/tests/test_triggers/__init__.py | elihschiff/Rubber-Duck-Python | 24dea3b64a8a46368cd8dd995c800375f355b55e | [
"MIT"
] | 7 | 2020-07-07T20:58:14.000Z | 2021-12-23T02:51:20.000Z | bot/tests/test_triggers/__init__.py | elihschiff/Rubber-Duck-Python | 24dea3b64a8a46368cd8dd995c800375f355b55e | [
"MIT"
] | null | null | null | bot/tests/test_triggers/__init__.py | elihschiff/Rubber-Duck-Python | 24dea3b64a8a46368cd8dd995c800375f355b55e | [
"MIT"
] | 1 | 2020-03-29T13:36:43.000Z | 2020-03-29T13:36:43.000Z | from .test_commands import all_commands
all_triggers = all_commands
from .test_quack import TestQuack
all_triggers.append(TestQuack)
| 17 | 39 | 0.845588 |
f467e6d9c07196905aa29d2a65e967cb8686b8d6 | 445 | py | Python | src/main/scripts/crassus_deployer_lambda.py | Scout24/crassus | 8e3d5ff073181cabaf0e764c3d8be18fc7d27992 | [
"Apache-2.0"
] | null | null | null | src/main/scripts/crassus_deployer_lambda.py | Scout24/crassus | 8e3d5ff073181cabaf0e764c3d8be18fc7d27992 | [
"Apache-2.0"
] | null | null | null | src/main/scripts/crassus_deployer_lambda.py | Scout24/crassus | 8e3d5ff073181cabaf0e764c3d8be18fc7d27992 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from crassus import Crassus
from crassus.output_converter import OutputConverter
def cfn_output_converter(event, context):
"""
Convert an AWS CloudFormation output message to our defined
ResultMessage format.
"""
output_converter = OutputConverter(event, context)
output_converter.convert()
| 24.722222 | 63 | 0.759551 |
f4680fe37289f7c11ee4bd2ba12292268d591a53 | 1,960 | py | Python | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | import sys
success = False
in_ironpython = "IronPython" in sys.version
if in_ironpython:
try:
from ironpython_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
pass
else:
try:
from win32_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
raise
def get_clipboard_text_and_convert(paste_list=False):
"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric"""
txt = GetClipboardText()
if txt:
if paste_list and "\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = "array(%s)" % repr(array)
txt = "".join([c for c in txt if c not in " \t\r\n"])
return txt
| 24.810127 | 79 | 0.558673 |
f4689432e90e3326c569ffdf5beb1c42f606d0c9 | 17,634 | py | Python | mjrl/utils/train_agent.py | YujieLu10/tslam | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | [
"Apache-2.0"
] | null | null | null | mjrl/utils/train_agent.py | YujieLu10/tslam | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | [
"Apache-2.0"
] | null | null | null | mjrl/utils/train_agent.py | YujieLu10/tslam | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | [
"Apache-2.0"
] | null | null | null | import logging
logging.disable(logging.CRITICAL)
import math
from tabulate import tabulate
from mjrl.utils.make_train_plots import make_train_plots
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
import numpy as np
import torch
import pickle
import imageio
import time as timer
import os
import copy
import matplotlib.pyplot as plt
try:
import exptools
from colorsys import hsv_to_rgb
import pyvista as pv
except ImportError:
exptools = None
def _load_latest_policy_and_logs(agent, *, policy_dir, logs_dir):
"""Loads the latest policy.
Returns the next step number to begin with.
"""
assert os.path.isdir(policy_dir), str(policy_dir)
assert os.path.isdir(logs_dir), str(logs_dir)
log_csv_path = os.path.join(logs_dir, 'log.csv')
if not os.path.exists(log_csv_path):
return 0 # fresh start
print("Reading: {}".format(log_csv_path))
agent.logger.read_log(log_csv_path)
last_step = agent.logger.max_len - 1
if last_step <= 0:
return 0 # fresh start
# find latest policy/baseline
i = last_step
while i >= 0:
policy_path = os.path.join(policy_dir, 'policy_{}.pickle'.format(i))
baseline_path = os.path.join(policy_dir, 'baseline_{}.pickle'.format(i))
if not os.path.isfile(policy_path):
i = i -1
continue
else:
print("Loaded last saved iteration: {}".format(i))
with open(policy_path, 'rb') as fp:
agent.policy = pickle.load(fp)
with open(baseline_path, 'rb') as fp:
agent.baseline = pickle.load(fp)
# additional
# global_status_path = os.path.join(policy_dir, 'global_status.pickle')
# with open(global_status_path, 'rb') as fp:
# agent.load_global_status( pickle.load(fp) )
agent.logger.shrink_to(i + 1)
assert agent.logger.max_len == i + 1
return agent.logger.max_len
# cannot find any saved policy
raise RuntimeError("Log file exists, but cannot find any saved policy.")
| 46.898936 | 260 | 0.60304 |
f4691a885e026834c8813dea028eee2eea8dcb79 | 4,499 | py | Python | src/tests/plugins/banktransfer/test_refund_export.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/plugins/banktransfer/test_refund_export.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/plugins/banktransfer/test_refund_export.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import json
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User
from pretix.plugins.banktransfer.models import RefundExport
from pretix.plugins.banktransfer.views import (
_row_key_func, _unite_transaction_rows,
)
url_prefixes = [
"/control/event/dummy/dummy/",
"/control/organizer/dummy/"
]
def test_unite_transaction_rows():
rows = sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("42.23"),
},
{
'payer': "First Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-1",
'amount': Decimal("6.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == rows
rows = sorted(rows + [
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("7.77"),
},
{
'payer': "Another Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-2",
'amount': Decimal("13.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("50.00"),
},
{
'payer': 'Another Last, First Last',
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': 'PARTY-R-1, PARTY-R-2',
'amount': Decimal('20.00'),
}], key=_row_key_func)
| 33.080882 | 100 | 0.608357 |
f469f8b898acc53c702a295cba9f7c500ecfacd0 | 872 | py | Python | datawinners/alldata/urls.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/alldata/urls.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/alldata/urls.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.conf.urls.defaults import patterns, url
from datawinners.alldata.views import get_entity_list_by_type
from datawinners.alldata.views import smart_phone_instruction
from datawinners.alldata.views import index, reports
from datawinners.alldata.views import failed_submissions
urlpatterns = patterns('',
url(r'^alldata/$', index, name="alldata_index"),
url(r'^project/$', index),
(r'^questionnaire/entities/(?P<entity_type>.+?)/$', get_entity_list_by_type),
(r'^questionnaire/reports/$', reports),
(r'^alldata/reports/$', reports),
(r'^allfailedsubmissions/$', failed_submissions),
url(r'^smartphoneinstruction$', smart_phone_instruction, name="smart_phone_instruction"),
url(r'^smartphoneinstruction/(?P<project_id>.+?)/$', smart_phone_instruction, name="smart_phone_instruction"),
)
| 48.444444 | 114 | 0.751147 |
f469fb9c0617beca4380191f4e87136c8e35c588 | 4,804 | py | Python | NewLifeUtils/LoggerModule.py | NewLife1324/NewLifeUtils-Dev | d955ad801da879d2888506853b0d0141c15dfafc | [
"MIT"
] | 2 | 2020-12-12T17:45:34.000Z | 2020-12-16T15:00:05.000Z | NewLifeUtils/LoggerModule.py | NewLife1324/NewLifeUtils | d955ad801da879d2888506853b0d0141c15dfafc | [
"MIT"
] | null | null | null | NewLifeUtils/LoggerModule.py | NewLife1324/NewLifeUtils | d955ad801da879d2888506853b0d0141c15dfafc | [
"MIT"
] | null | null | null | from NewLifeUtils.ColorModule import ACC, MCC
from NewLifeUtils.UtilsModule import hex_to_rgb
from NewLifeUtils.FileModule import DataStorage, LogFile
from NewLifeUtils.StringUtilModule import remove_csi
from datetime import datetime
import sys
log, wrn, err, tip, rea = init_from_cfg() | 39.056911 | 164 | 0.580766 |
f46ac6dc3031a12623e226f71b58aeded4ff617c | 440 | py | Python | config/api_urls.py | elcolie/battleship | 71b0a963c5b24ae243a193749813fec321d5f4d8 | [
"MIT"
] | null | null | null | config/api_urls.py | elcolie/battleship | 71b0a963c5b24ae243a193749813fec321d5f4d8 | [
"MIT"
] | 3 | 2018-04-22T04:40:25.000Z | 2020-06-05T19:10:08.000Z | config/api_urls.py | elcolie/battleship | 71b0a963c5b24ae243a193749813fec321d5f4d8 | [
"MIT"
] | null | null | null | from rest_framework import routers
from boards.api.viewsets import BoardViewSet
from fleets.api.viewsets import FleetViewSet
from missiles.api.viewsets import MissileViewSet
app_name = 'api'
router = routers.DefaultRouter()
router.register(r'boards', BoardViewSet, base_name='board')
router.register(r'fleets', FleetViewSet, base_name='fleet')
router.register(r'missiles', MissileViewSet, base_name='missile')
urlpatterns = router.urls
| 29.333333 | 65 | 0.811364 |
f46b0b539cef945ee6aa318ff4cb5a94326430db | 6,290 | py | Python | mealpy/evolutionary_based/MA.py | Alhassan20/mealpy | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | [
"MIT"
] | 1 | 2021-08-07T16:30:48.000Z | 2021-08-07T16:30:48.000Z | mealpy/evolutionary_based/MA.py | Alhassan20/mealpy | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | [
"MIT"
] | null | null | null | mealpy/evolutionary_based/MA.py | Alhassan20/mealpy | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 14:22, 11/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import time
import numpy as np
from mealpy.optimizer import Optimizer
| 42.214765 | 134 | 0.550397 |
f46c203558ba08eaf57d58a68abbbd1315976d22 | 16,106 | py | Python | src/estimagic/estimation/estimate_ml.py | OpenSourceEconomics/estimagic | 85163b4cdc601d60d654c6ca1f42b9db17a130a3 | [
"MIT"
] | 83 | 2019-09-26T04:44:03.000Z | 2022-03-17T20:24:02.000Z | src/estimagic/estimation/estimate_ml.py | OpenSourceEconomics/estimagic | 85163b4cdc601d60d654c6ca1f42b9db17a130a3 | [
"MIT"
] | 243 | 2019-06-25T18:15:53.000Z | 2022-03-26T09:17:44.000Z | src/estimagic/estimation/estimate_ml.py | OpenSourceEconomics/estimagic | 85163b4cdc601d60d654c6ca1f42b9db17a130a3 | [
"MIT"
] | 23 | 2019-07-03T11:16:55.000Z | 2022-03-07T00:57:38.000Z | from estimagic.inference.ml_covs import cov_cluster_robust
from estimagic.inference.ml_covs import cov_hessian
from estimagic.inference.ml_covs import cov_jacobian
from estimagic.inference.ml_covs import cov_robust
from estimagic.inference.ml_covs import cov_strata_robust
from estimagic.inference.shared import calculate_inference_quantities
from estimagic.inference.shared import check_is_optimized_and_derivative_case
from estimagic.inference.shared import get_derivative_case
from estimagic.inference.shared import get_internal_first_derivative
from estimagic.inference.shared import transform_covariance
from estimagic.optimization.optimize import maximize
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.process_constraints import process_constraints
from estimagic.shared.check_option_dicts import check_numdiff_options
from estimagic.shared.check_option_dicts import check_optimization_options
def estimate_ml(
loglike,
params,
optimize_options,
*,
constraints=None,
logging=False,
log_options=None,
loglike_kwargs=None,
derivative=None,
derivative_kwargs=None,
loglike_and_derivative=None,
loglike_and_derivative_kwargs=None,
numdiff_options=None,
jacobian=None,
jacobian_kwargs=None,
hessian=False,
hessian_kwargs=None,
ci_level=0.95,
n_samples=10_000,
bounds_handling="raise",
design_info=None,
):
"""Do a maximum likelihood (ml) estimation.
This is a high level interface of our lower level functions for maximization,
numerical differentiation and inference. It does the full workflow for maximum
likelihood estimation with just one function call.
While we have good defaults, you can still configure each aspect of each step
via the optional arguments of this function. If you find it easier to do the
"difficult" steps (mainly maximization and calculating numerical derivatives
of a potentially noisy function) separately, you can do so and just provide those
results as ``params``, ``jacobian`` and ``hessian``.
The docstring is aspirational and not all options are supported yet.
Args:
loglike (callable): Likelihood function that takes a params DataFrame (and
potentially other keyword arguments) and returns a dictionary that has at
least the entries "value" (a scalar float) and "contributions" (a 1d numpy
array or pandas Series) with the log likelihood contribution per individual.
params (pd.DataFrame): DataFrame where the "value" column contains the
estimated or start parameters of a likelihood model. See :ref:`params` for
details. If the supplied parameters are estimated parameters, set
optimize_options to False.
optimize_options (dict or False): Keyword arguments that govern the numerical
optimization. Valid entries are all arguments of
:func:`~estimagic.optimization.optimize.minimize` except for criterion,
derivative, criterion_and_derivative and params. If you pass False as
optimize_options you signal that ``params`` are already the optimal
parameters and no numerical optimization is needed.
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
loglike_kwargs (dict): Additional keyword arguments for loglike.
derivative (callable): Function takes params and potentially other keyword
arguments and calculates the first derivative of loglike. It can either
return a numpy array or pandas Series/DataFrame with the derivative or
a dictionary with derivatives of each output of loglike. If loglike
returns a dict but derivative does not, it is your responsibility to
make sure that the correct derivative for the numerical optimizers you are
using is returned.
derivative_kwargs (dict): Additional keyword arguments for loglike.
loglike_and_derivative (callable): Return a tuple consisting of the result
of loglike and the result of derivative. Only use this if you can exploit
synergies in the calculation of loglike and derivative.
loglike_and_derivative_kwargs (dict): Additional keyword arguments for
loglike_and_derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives for the calculation of standard errors. See
:ref:`first_derivative` for details.
jacobian (callable or pandas.DataFrame or False): A function that takes
``params`` and potentially other keyword arguments and returns the jacobian
of loglike["contributions"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Jacobian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. Note that you
only need to pass a Jacobian function if you have a closed form Jacobian but
decided not to return it as part of ``derivative`` (e.g. because you use
a scalar optimizer and can calculate a gradient in a way that is faster
than calculating and summing the Jacobian). If you pass None, a numerical
Jacobian will be calculated. If you pass ``False``, you signal that no
Jacobian should be calculated. Thus, no result that requires the Jacobian
will be calculated.
jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.
hessian (callable or pd.DataFrame): A function that takes
``params`` and potentially other keyword arguments and returns the Hessian
of loglike["value"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Hessian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. If you pass
None, a numerical Hessian will be calculated. If you pass ``False``, you
signal that no Hessian should be calculated. Thus, no result that requires
the Hessian will be calculated.
hessian_kwargs (dict): Additional keyword arguments for the Hessian function.
ci_level (float): Confidence level for the calculation of confidence intervals.
The default is 0.95.
n_samples (int): Number of samples used to transform the covariance matrix of
the internal parameter vector into the covariance matrix of the external
parameters. For background information about internal and external params
see :ref:`implementation_of_constraints`. This is only used if you have
specified constraints.
bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds
are handled. If "clip", confidence intervals are clipped at the bounds.
Standard errors are only adjusted if a sampling step is necessary due to
additional constraints. If "raise" and any lower or upper bound is binding,
we raise an Error. If "ignore", boundary problems are simply ignored.
design_info (pandas.DataFrame): DataFrame with one row per observation that
contains some or all of the variables "psu" (primary sampling unit),
"stratum" and "fpc" (finite population corrector). See
:ref:`robust_likelihood_inference` for details.
Returns:
dict: The estimated parameters, standard errors and covariance matrix of the
parameters.
"""
# ==================================================================================
# Check and process inputs
# ==================================================================================
is_optimized = optimize_options is False
check_optimization_options(
optimize_options,
usage="estimate_ml",
algorithm_mandatory=True,
)
jac_case = get_derivative_case(jacobian)
hess_case = get_derivative_case(hessian)
check_is_optimized_and_derivative_case(is_optimized, jac_case)
check_is_optimized_and_derivative_case(is_optimized, hess_case)
cov_cases = _get_cov_cases(jac_case, hess_case, design_info)
check_numdiff_options(numdiff_options, "estimate_ml")
numdiff_options = {} if numdiff_options in (None, False) else numdiff_options
constraints = [] if constraints is None else constraints
processed_constraints, _ = process_constraints(constraints, params)
# ==================================================================================
# Calculate estimates via maximization (if necessary)
# ==================================================================================
if is_optimized:
estimates = params
else:
opt_res = maximize(
criterion=loglike,
criterion_kwargs=loglike_kwargs,
params=params,
constraints=constraints,
derivative=derivative,
derivative_kwargs=derivative_kwargs,
criterion_and_derivative=loglike_and_derivative,
criterion_and_derivative_kwargs=loglike_and_derivative_kwargs,
logging=logging,
log_options=log_options,
**optimize_options,
)
estimates = opt_res["solution_params"]
# ==================================================================================
# Calculate internal jacobian
# ==================================================================================
deriv_to_internal = get_derivative_conversion_function(
params=params, constraints=constraints
)
if jac_case == "pre-calculated":
int_jac = deriv_to_internal(jacobian)
elif jac_case == "closed-form":
jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs
_jac = jacobian(estimates, **jacobian_kwargs)
int_jac = deriv_to_internal(_jac)
# switch to "numerical" even if jac_case == "skip" because jac is required for ml.
elif jac_case == "numerical":
options = numdiff_options.copy()
options["key"] = "contributions"
deriv_res = get_internal_first_derivative(
func=loglike,
params=estimates,
constraints=constraints,
func_kwargs=loglike_kwargs,
numdiff_options=options,
)
int_jac = deriv_res["derivative"]
jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"}
else:
int_jac = None
# ==================================================================================
# Calculate internal Hessian (most of this is not yet implemented)
# ==================================================================================
if hess_case == "skip":
int_hess = None
elif hess_case == "numerical":
raise NotImplementedError("Numerical Hessian calculation is not yet supported.")
hess_numdiff_info = {}
elif hess_case in ("closed-form", "pre-calculated") and constraints:
raise NotImplementedError(
"Closed-form or pre-calculated Hessians are not yet compatible with "
"constraints."
)
else:
int_hess = hessian(estimates, **hessian_kwargs)
# ==================================================================================
# Calculate all available internal cov types
# ==================================================================================
int_covs = {}
if "jacobian" in cov_cases:
int_covs["cov_jacobian"] = cov_jacobian(int_jac)
if "hessian" in cov_cases:
int_covs["cov_hessian"] = cov_hessian(int_hess)
if "robust" in cov_cases:
int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess)
if "cluster_robust" in cov_cases:
int_covs["cov_cluster_robust"] = cov_cluster_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
if "strata_robust" in cov_cases:
int_covs["cov_strata_robust"] = cov_strata_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
# ==================================================================================
# Calculate all available external covs and summaries
# ==================================================================================
covs = {}
summaries = {}
for case in cov_cases:
cov = transform_covariance(
params=estimates,
internal_cov=int_covs[f"cov_{case}"],
constraints=constraints,
n_samples=n_samples,
bounds_handling=bounds_handling,
)
summary = calculate_inference_quantities(
params=estimates,
free_cov=cov,
ci_level=ci_level,
)
covs[f"cov_{case}"] = cov
summaries[f"summary_{case}"] = summary
# ==================================================================================
# Calculate external jac and hess (if no transforming constraints)
# ==================================================================================
if not processed_constraints:
ext_jac = int_jac
ext_hess = int_hess
else:
ext_jac = "No external Jacobian defined due to constraints."
ext_hess = "No external Hessian defined due to constraints."
# ==================================================================================
# Construct output
# ==================================================================================
out = {
**summaries,
**covs,
"jacobian": ext_jac,
"hessian": ext_hess,
}
if not is_optimized:
out["optimize_res"] = opt_res
if jac_case == "numerical":
out["jacobian_numdiff_info"] = jac_numdiff_info
if hess_case == "numerical":
out["hessian_numdiff_info"] = hess_numdiff_info
return out
| 47.934524 | 88 | 0.626537 |
f46ca3af523c02675160a6c57c283a2d49c86f50 | 6,503 | py | Python | neural_architecture_search_appendix_a.py | NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a | 67e4876d428e5155f5526ee02875b0a89a52305d | [
"MIT"
] | 68 | 2017-01-31T06:35:53.000Z | 2021-02-24T09:39:55.000Z | neural_architecture_search_appendix_a.py | NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a | 67e4876d428e5155f5526ee02875b0a89a52305d | [
"MIT"
] | 3 | 2017-05-14T13:41:39.000Z | 2020-04-21T04:23:50.000Z | neural_architecture_search_appendix_a.py | NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a | 67e4876d428e5155f5526ee02875b0a89a52305d | [
"MIT"
] | 15 | 2017-03-16T03:04:46.000Z | 2018-07-05T15:07:39.000Z | import six
import chainer
import numpy as np
import chainer.links as L
import chainer.functions as F
import nutszebra_chainer
import functools
from collections import defaultdict
def __call__(self, x, train=False):
x = [x]
outputs = []
for i in six.moves.range(len(self.out_channels)):
x = self['conv{}'.format(i)](self.concatenate(x), train=train)
outputs.append(x)
x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]]
x = outputs[-1]
batch, channels, height, width = x.data.shape
x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(x, train), (batch, self.category_num))
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def accuracy(self, y, t, xp=np):
y.to_cpu()
t.to_cpu()
indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0]
accuracy = defaultdict(int)
for i in indices:
accuracy[t.data[i]] += 1
indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0]
false_accuracy = defaultdict(int)
false_y = np.argmax(y.data, axis=1)
for i in indices:
false_accuracy[(t.data[i], false_y[i])] += 1
return accuracy, false_accuracy
| 42.227273 | 138 | 0.531755 |
f46d4201935576f7c5b0f071b01e8b9a5b4caddc | 2,945 | py | Python | test/test_proportions_delta.py | quizlet/abracadabra | eda599bd02f14b96efdc521f53132d93c9100ede | [
"MIT"
] | 24 | 2020-06-12T16:12:32.000Z | 2021-09-01T12:25:38.000Z | test/test_proportions_delta.py | quizlet/abracadabra | eda599bd02f14b96efdc521f53132d93c9100ede | [
"MIT"
] | 20 | 2020-06-12T06:26:08.000Z | 2022-03-12T00:57:51.000Z | test/test_proportions_delta.py | quizlet/abracadabra | eda599bd02f14b96efdc521f53132d93c9100ede | [
"MIT"
] | 4 | 2020-06-14T12:14:11.000Z | 2021-05-28T15:36:44.000Z | import pytest
from abra import Experiment, HypothesisTest
def test_proportions_delta_experiment_t(proportions_data_small):
"""Small sample sizes defautl to t-tests"""
exp = Experiment(proportions_data_small.sample(29), name='proportions-test')
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='unequal',
inference_method='means_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 't' | 29.45 | 80 | 0.69236 |
f46d89c9f6b67abfd2563de23f0ea1549928a68e | 1,441 | py | Python | src/bootils/plugins/core/jsw.py | Build-The-Web/bootils | 8ee88f4d0583352f58fbb89c018e7caef8f07ce3 | [
"Apache-2.0"
] | 3 | 2015-03-25T23:00:58.000Z | 2018-01-03T15:50:41.000Z | src/bootils/plugins/core/jsw.py | Build-The-Web/bootils | 8ee88f4d0583352f58fbb89c018e7caef8f07ce3 | [
"Apache-2.0"
] | 8 | 2015-04-10T14:53:20.000Z | 2015-12-18T09:59:58.000Z | src/bootils/plugins/core/jsw.py | Build-The-Web/bootils | 8ee88f4d0583352f58fbb89c018e7caef8f07ce3 | [
"Apache-2.0"
] | 2 | 2015-09-10T13:01:09.000Z | 2018-03-04T20:46:09.000Z | # -*- coding: utf-8 -*-
# pylint: disable=
""" Tanuki Java Service Wrapper runtime environment.
Debian JSW paths (Wheezy 3.5.3; Jessie 3.5.22)::
/usr/sbin/wrapper ELF executable
/usr/share/wrapper/daemon.sh
/usr/share/wrapper/make-wrapper-init.sh
/usr/share/wrapper/wrapper.conf
"""
# Copyright 2015 1&1 Group <btw-users@googlegroups.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
from ..loader import PluginBase
| 35.146341 | 74 | 0.700208 |
f46df9cfbed7221c6dfc035138710969c22cfd18 | 1,992 | py | Python | MachineLearning/hw1/models/LinearRegression.py | ChoKyuWon/SchoolProjects | 71a5decefc85ae941ba2d537c4507ba8e615cc34 | [
"MIT"
] | null | null | null | MachineLearning/hw1/models/LinearRegression.py | ChoKyuWon/SchoolProjects | 71a5decefc85ae941ba2d537c4507ba8e615cc34 | [
"MIT"
] | null | null | null | MachineLearning/hw1/models/LinearRegression.py | ChoKyuWon/SchoolProjects | 71a5decefc85ae941ba2d537c4507ba8e615cc34 | [
"MIT"
] | null | null | null | import numpy as np
| 33.2 | 89 | 0.449799 |
f46e88c174121a507ecd5ff0eff0efa5c6c1e776 | 1,655 | py | Python | apps/bc_scraper/actions/schedule.py | aurmeneta/ramos-uc | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | [
"MIT"
] | 7 | 2021-07-14T18:13:35.000Z | 2021-11-21T20:10:54.000Z | apps/bc_scraper/actions/schedule.py | aurmeneta/ramos-uc | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | [
"MIT"
] | 57 | 2021-07-10T01:31:56.000Z | 2022-01-14T02:02:58.000Z | apps/bc_scraper/actions/schedule.py | aurmeneta/ramos-uc | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | [
"MIT"
] | 4 | 2021-07-23T16:51:55.000Z | 2021-08-31T02:41:41.000Z | from copy import copy
DEFAULT_SCHEDULE = {}
for day in "lmwjvs":
for mod in "12345678":
DEFAULT_SCHEDULE[day + mod] = "'FREE'"
def process_schedule(text_sc):
"""For a given schedule text in BC format, returns the SQL queries for inserting
the full schedule and schedule info. Those queries have to format ID.
"""
### Full Schedule
data = text_sc.split("\nROW: ")[1:]
# data rows -> day-day:module,module <> type <> room <><>
schedule = copy(DEFAULT_SCHEDULE)
for row in data:
row = row.split("<>")[:2]
horario = row[0].split(":")
days = horario[0].split("-")
modules = horario[1].split(",")
for day in days:
for mod in modules:
if len(day) and len(mod):
schedule[day.lower() + mod] = "'" + row[1] + "'"
cols = ",".join(schedule.keys())
values = ",".join(schedule.values())
full_sc_query = (
f"INSERT INTO courses_fullschedule (section_id, {cols}) VALUES (%s, {values});"
)
### Info Schedule
schedule_info = {"total": 0}
for type in ["AYU", "CLAS", "LAB", "PRA", "SUP", "TAL", "TER", "TES"]:
schedule_info[type] = list(schedule.values()).count("'" + type + "'")
schedule_info["total"] += schedule_info[type]
schedule_info[type] = str(schedule_info[type])
schedule_info["total"] = str(schedule_info["total"])
cols = ",".join(schedule_info.keys())
values = ",".join(schedule_info.values())
info_sc_query = (
f"INSERT INTO courses_scheduleinfo (section_id, {cols}) VALUES (%s, {values});"
)
return full_sc_query, info_sc_query
| 33.77551 | 87 | 0.583686 |
f46f4f4b92656a15af396d51e27d17942b2af4aa | 9,739 | py | Python | openstack_dashboard/dashboards/admin/volumes/views.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | 1 | 2018-04-17T02:32:05.000Z | 2018-04-17T02:32:05.000Z | openstack_dashboard/dashboards/admin/volumes/views.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | 3 | 2021-01-21T14:27:55.000Z | 2021-06-10T23:08:49.000Z | openstack_dashboard/dashboards/admin/volumes/views.py | Surfndez/horizon | a56765b6b3dbc09fd467b83a57bea2433ae3909e | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes and snapshots.
"""
from collections import OrderedDict
from django.conf import settings
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as volumes_tables
from openstack_dashboard.dashboards.admin.volumes \
import tabs as volumes_tabs
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
modal_id = "update_volume_status_modal"
template_name = 'admin/volumes/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:volumes:update_status"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Update Volume Status")
| 38.34252 | 78 | 0.652736 |
f46f5e7244355f200cf15f2877d74e3a5f0c0027 | 26 | py | Python | tests/__init__.py | flowolf/yessssms | 438928967aca38d3d2bb07799d3723757e928553 | [
"MIT"
] | 6 | 2015-02-17T09:51:11.000Z | 2021-01-05T12:39:26.000Z | tests/__init__.py | flowolf/yessssms | 438928967aca38d3d2bb07799d3723757e928553 | [
"MIT"
] | 1 | 2019-09-19T20:07:22.000Z | 2019-09-24T09:24:04.000Z | tests/__init__.py | flowolf/yessssms | 438928967aca38d3d2bb07799d3723757e928553 | [
"MIT"
] | 2 | 2017-05-06T09:14:19.000Z | 2020-03-04T20:43:33.000Z | """Tests for YesssSMS."""
| 13 | 25 | 0.615385 |
f4710b19edd2c97dbafb4bd7d15c47788db38366 | 677 | py | Python | bldr/dep/env.py | bldr-cmd/bldr-cmd | 300750fbccc2987efd23f69b7b2d76d8563e2995 | [
"Apache-2.0"
] | null | null | null | bldr/dep/env.py | bldr-cmd/bldr-cmd | 300750fbccc2987efd23f69b7b2d76d8563e2995 | [
"Apache-2.0"
] | null | null | null | bldr/dep/env.py | bldr-cmd/bldr-cmd | 300750fbccc2987efd23f69b7b2d76d8563e2995 | [
"Apache-2.0"
] | null | null | null | # This is used by Environment to populate its env
# Due to circular dependencies it cannot reference other parts of bldr
import toml | 37.611111 | 72 | 0.703102 |
f471777a68cf3b70989f0f48f2b4ea4d759a30a8 | 5,382 | py | Python | rasa-sample/actions.py | ijufumi/demo-python | b48bdebde172ca581a48346a77b12c30ff202e73 | [
"MIT"
] | null | null | null | rasa-sample/actions.py | ijufumi/demo-python | b48bdebde172ca581a48346a77b12c30ff202e73 | [
"MIT"
] | null | null | null | rasa-sample/actions.py | ijufumi/demo-python | b48bdebde172ca581a48346a77b12c30ff202e73 | [
"MIT"
] | null | null | null | import re
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
import lark_module
| 35.642384 | 120 | 0.556856 |
f471a2c4554505f4474a4ceb98a24f55991c2cdc | 1,557 | py | Python | parsers/politico.py | plympton/newsdiffs | 2a055850bda850b9b6c28c989512d4e4b3e9b64e | [
"MIT"
] | null | null | null | parsers/politico.py | plympton/newsdiffs | 2a055850bda850b9b6c28c989512d4e4b3e9b64e | [
"MIT"
] | null | null | null | parsers/politico.py | plympton/newsdiffs | 2a055850bda850b9b6c28c989512d4e4b3e9b64e | [
"MIT"
] | null | null | null | from baseparser import BaseParser, grab_url, logger
# Different versions of BeautifulSoup have different properties.
# Some work with one site, some with another.
# This is BeautifulSoup 3.2.
from BeautifulSoup import BeautifulSoup
# This is BeautifulSoup 4
import bs4
| 35.386364 | 80 | 0.620424 |
f472e924139d73818eedf6b97de856c2ca049e7a | 6,535 | py | Python | integration-tests/bats/server_multiclient_test.py | fairhopeweb/dolt | 276b85b7b1287f883640ef3fcacb0bdb112749b2 | [
"Apache-2.0"
] | 2 | 2021-03-09T07:32:40.000Z | 2021-06-11T21:41:30.000Z | integration-tests/bats/server_multiclient_test.py | albertusortiz/dolt | 38fc4fcb0357a56eb97abdb25296f45571a5418f | [
"Apache-2.0"
] | null | null | null | integration-tests/bats/server_multiclient_test.py | albertusortiz/dolt | 38fc4fcb0357a56eb97abdb25296f45571a5418f | [
"Apache-2.0"
] | 1 | 2021-08-06T13:05:57.000Z | 2021-08-06T13:05:57.000Z | import os
import sys
from queue import Queue
from threading import Thread
from helper.pytest import DoltConnection
# Utility functions
UPDATE_BRANCH_FAIL_MSG = "Failed to update branch"
# work functions
# test script
MAX_SIMULTANEOUS_CONNECTIONS = 2
PORT_STR = sys.argv[1]
CONNECTIONS = [None]*MAX_SIMULTANEOUS_CONNECTIONS
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
CONNECTIONS[i] = DoltConnection(port=int(PORT_STR), database="repo1", user='dolt', auto_commit=False)
WORK_QUEUE = Queue()
# work item run by workers
# worker thread function
def worker():
while True:
try:
item = WORK_QUEUE.get()
for work_func in item.work_funcs:
work_func(item.dc)
WORK_QUEUE.task_done()
except Exception as e:
work_item.exception = e
WORK_QUEUE.task_done()
# start the worker threads
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
t = Thread(target=worker)
t.daemon = True
t.start()
# This defines the actual test script. Each stage in the script has a list of work items. Each work item
# in a stage should have a different connection associated with it. Each connections work is done in parallel
# each of the work functions for a connection is executed in order.
work_item_stages = [
[WorkItem(CONNECTIONS[0], connect, create_tables)],
[WorkItem(CONNECTIONS[0], seed_master), WorkItem(CONNECTIONS[1], connect, duplicate_table_create)],
[WorkItem(CONNECTIONS[0], modify_pk0_on_master_and_commit), WorkItem(CONNECTIONS[1], modify_pk0_on_master_no_commit)],
[WorkItem(CONNECTIONS[1], fail_to_commit, commit_to_feature, merge_resolve_commit)]
]
# Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads
# and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage.
for stage, work_items in enumerate(work_item_stages):
print("Running stage %d / %d" % (stage,len(work_item_stages)))
for work_item in work_items:
WORK_QUEUE.put(work_item)
WORK_QUEUE.join()
for work_item in work_items:
if work_item.exception is not None:
print_err(work_item.exception)
sys.exit(1)
| 32.839196 | 140 | 0.680643 |
f47301fb50cbf2affb241d7c61d027660a0014ae | 24,598 | py | Python | messenger/client/messenger.py | marik348/python-messenger | 6c1916b0df439cd997cb6e9376221fe587c3f1c1 | [
"MIT"
] | 2 | 2021-05-24T08:44:51.000Z | 2022-03-17T10:41:48.000Z | messenger/client/messenger.py | marik348/python-messenger | 6c1916b0df439cd997cb6e9376221fe587c3f1c1 | [
"MIT"
] | 1 | 2020-11-28T12:08:25.000Z | 2020-11-28T12:08:25.000Z | messenger/client/messenger.py | marik348/python-messegner | 6c1916b0df439cd997cb6e9376221fe587c3f1c1 | [
"MIT"
] | 1 | 2021-05-24T08:50:42.000Z | 2021-05-24T08:50:42.000Z | from requests import get, post, exceptions
from datetime import datetime
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QFont
from qtwidgets import PasswordEdit
from client_commands import (help_client, online, status, myself, reg, role, ban, unban)
from client_content import (get_warning_messages, get_client_commands, get_message_box_text, get_message_style)
from click_label import clickable
from client_ui import Ui_Messenger
from preferences import Preferences
from style_sheet import load_stylesheet
app = QtWidgets.QApplication([])
window = Messenger()
app.setStyleSheet(load_stylesheet())
window.show()
app.exec_()
| 38.982567 | 120 | 0.622693 |
f475a7baedbb00d2706f41a680754762b1e5e2d7 | 6,599 | py | Python | oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import inspect
import os
import signal
import sys
import threading
import weakref
from wcwidth import wcwidth
from six.moves import range
__all__ = (
'Event',
'DummyContext',
'get_cwidth',
'suspend_to_background_supported',
'is_conemu_ansi',
'is_windows',
'in_main_thread',
'take_using_weights',
'test_callable_args',
)
# Cache of signatures. Improves the performance of `test_callable_args`.
_signatures_cache = weakref.WeakKeyDictionary()
def test_callable_args(func, args):
"""
Return True when this function can be called with the given arguments.
"""
assert isinstance(args, (list, tuple))
signature = getattr(inspect, 'signature', None)
if signature is not None:
# For Python 3, use inspect.signature.
try:
sig = _signatures_cache[func]
except KeyError:
sig = signature(func)
_signatures_cache[func] = sig
try:
sig.bind(*args)
except TypeError:
return False
else:
return True
else:
# For older Python versions, fall back to using getargspec.
spec = inspect.getargspec(func)
# Drop the 'self'
spec = drop_self(spec)
# When taking *args, always return True.
if spec.varargs is not None:
return True
# Test whether the given amount of args is between the min and max
# accepted argument counts.
return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args)
_CHAR_SIZES_CACHE = _CharSizesCache()
def get_cwidth(string):
"""
Return width of a string. Wrapper around ``wcwidth``.
"""
return _CHAR_SIZES_CACHE[string]
def suspend_to_background_supported():
"""
Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems.
"""
return hasattr(signal, 'SIGTSTP')
def is_windows():
"""
True when we are using Windows.
"""
return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
def is_conemu_ansi():
"""
True when the ConEmu Windows console is used.
"""
return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread'
| 27.381743 | 88 | 0.575542 |
f476ce15c4cf3ddf393197690eec2e823de61189 | 92,209 | py | Python | lmdb/cffi.py | hirnimeshrampuresoftware/py-lmdb | 9aa7560f8e1a89b437fb3fed7ea36f5888b7a963 | [
"OLDAP-2.8"
] | 185 | 2019-06-18T15:58:49.000Z | 2022-03-09T09:42:57.000Z | lmdb/cffi.py | hirnimeshrampuresoftware/py-lmdb | 9aa7560f8e1a89b437fb3fed7ea36f5888b7a963 | [
"OLDAP-2.8"
] | 114 | 2019-06-15T04:19:04.000Z | 2022-03-30T06:34:44.000Z | lmdb/cffi.py | hirnimeshrampuresoftware/py-lmdb | 9aa7560f8e1a89b437fb3fed7ea36f5888b7a963 | [
"OLDAP-2.8"
] | 32 | 2019-07-03T23:56:58.000Z | 2022-02-12T04:46:16.000Z | #
# Copyright 2013 The py-lmdb authors, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted only as authorized by the OpenLDAP
# Public License.
#
# A copy of this license is available in the file LICENSE in the
# top-level directory of the distribution or, alternatively, at
# <http://www.OpenLDAP.org/license.html>.
#
# OpenLDAP is a registered trademark of the OpenLDAP Foundation.
#
# Individual files and/or contributed packages may be copyright by
# other parties and/or subject to additional restrictions.
#
# This work also contains materials derived from public sources.
#
# Additional information about OpenLDAP can be obtained at
# <http://www.openldap.org/>.
#
"""
CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database.
Please see https://lmdb.readthedocs.io/
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
import inspect
import os
import sys
import threading
is_win32 = sys.platform == 'win32'
if is_win32:
import msvcrt
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # type: ignore
import lmdb
try:
from lmdb import _config
except ImportError:
_config = None # type: ignore
__all__ = [
'Cursor',
'Environment',
'Transaction',
'_Database',
'enable_drop_gil',
'version',
]
__all__ += [
'BadDbiError',
'BadRslotError',
'BadTxnError',
'BadValsizeError',
'CorruptedError',
'CursorFullError',
'DbsFullError',
'DiskError',
'Error',
'IncompatibleError',
'InvalidError',
'InvalidParameterError',
'KeyExistsError',
'LockError',
'MapFullError',
'MapResizedError',
'MemoryError',
'NotFoundError',
'PageFullError',
'PageNotFoundError',
'PanicError',
'ReadersFullError',
'ReadonlyError',
'TlsFullError',
'TxnFullError',
'VersionMismatchError',
]
# Handle moronic Python 3 mess.
UnicodeType = getattr(__builtin__, 'unicode', str)
BytesType = getattr(__builtin__, 'bytes', str)
O_0755 = int('0755', 8)
O_0111 = int('0111', 8)
EMPTY_BYTES = UnicodeType().encode()
# Used to track context across CFFI callbacks.
_callbacks = threading.local()
_CFFI_CDEF = '''
typedef int mode_t;
typedef ... MDB_env;
typedef struct MDB_txn MDB_txn;
typedef struct MDB_cursor MDB_cursor;
typedef unsigned int MDB_dbi;
enum MDB_cursor_op {
MDB_FIRST,
MDB_FIRST_DUP,
MDB_GET_BOTH,
MDB_GET_BOTH_RANGE,
MDB_GET_CURRENT,
MDB_GET_MULTIPLE,
MDB_LAST,
MDB_LAST_DUP,
MDB_NEXT,
MDB_NEXT_DUP,
MDB_NEXT_MULTIPLE,
MDB_NEXT_NODUP,
MDB_PREV,
MDB_PREV_DUP,
MDB_PREV_NODUP,
MDB_SET,
MDB_SET_KEY,
MDB_SET_RANGE,
...
};
typedef enum MDB_cursor_op MDB_cursor_op;
struct MDB_val {
size_t mv_size;
void *mv_data;
...;
};
typedef struct MDB_val MDB_val;
struct MDB_stat {
unsigned int ms_psize;
unsigned int ms_depth;
size_t ms_branch_pages;
size_t ms_leaf_pages;
size_t ms_overflow_pages;
size_t ms_entries;
...;
};
typedef struct MDB_stat MDB_stat;
struct MDB_envinfo {
void *me_mapaddr;
size_t me_mapsize;
size_t me_last_pgno;
size_t me_last_txnid;
unsigned int me_maxreaders;
unsigned int me_numreaders;
...;
};
typedef struct MDB_envinfo MDB_envinfo;
typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr,
void *relctx);
char *mdb_strerror(int err);
int mdb_env_create(MDB_env **env);
int mdb_env_open(MDB_env *env, const char *path, unsigned int flags,
mode_t mode);
int mdb_env_copy2(MDB_env *env, const char *path, int flags);
int mdb_env_copyfd2(MDB_env *env, int fd, int flags);
int mdb_env_stat(MDB_env *env, MDB_stat *stat);
int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
int mdb_env_get_maxkeysize(MDB_env *env);
int mdb_env_sync(MDB_env *env, int force);
void mdb_env_close(MDB_env *env);
int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
int mdb_env_get_path(MDB_env *env, const char **path);
int mdb_env_set_mapsize(MDB_env *env, size_t size);
int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags,
MDB_txn **txn);
int mdb_txn_commit(MDB_txn *txn);
void mdb_txn_reset(MDB_txn *txn);
int mdb_txn_renew(MDB_txn *txn);
void mdb_txn_abort(MDB_txn *txn);
size_t mdb_txn_id(MDB_txn *txn);
int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags,
MDB_dbi *dbi);
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_);
int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
void mdb_cursor_close(MDB_cursor *cursor);
int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op);
typedef int (MDB_msg_func)(const char *msg, void *ctx);
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
int mdb_reader_check(MDB_env *env, int *dead);
int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
#define MDB_VERSION_MAJOR ...
#define MDB_VERSION_MINOR ...
#define MDB_VERSION_PATCH ...
#define EACCES ...
#define EAGAIN ...
#define EINVAL ...
#define ENOMEM ...
#define ENOSPC ...
#define MDB_BAD_RSLOT ...
#define MDB_BAD_DBI ...
#define MDB_BAD_TXN ...
#define MDB_BAD_VALSIZE ...
#define MDB_CORRUPTED ...
#define MDB_CURSOR_FULL ...
#define MDB_DBS_FULL ...
#define MDB_INCOMPATIBLE ...
#define MDB_INVALID ...
#define MDB_KEYEXIST ...
#define MDB_MAP_FULL ...
#define MDB_MAP_RESIZED ...
#define MDB_NOTFOUND ...
#define MDB_PAGE_FULL ...
#define MDB_PAGE_NOTFOUND ...
#define MDB_PANIC ...
#define MDB_READERS_FULL ...
#define MDB_TLS_FULL ...
#define MDB_TXN_FULL ...
#define MDB_VERSION_MISMATCH ...
#define MDB_APPEND ...
#define MDB_APPENDDUP ...
#define MDB_CP_COMPACT ...
#define MDB_CREATE ...
#define MDB_DUPFIXED ...
#define MDB_DUPSORT ...
#define MDB_INTEGERDUP ...
#define MDB_INTEGERKEY ...
#define MDB_MAPASYNC ...
#define MDB_NODUPDATA ...
#define MDB_NOLOCK ...
#define MDB_NOMEMINIT ...
#define MDB_NOMETASYNC ...
#define MDB_NOOVERWRITE ...
#define MDB_NORDAHEAD ...
#define MDB_NOSUBDIR ...
#define MDB_NOSYNC ...
#define MDB_NOTLS ...
#define MDB_RDONLY ...
#define MDB_REVERSEKEY ...
#define MDB_WRITEMAP ...
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen);
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen,
unsigned int flags);
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
MDB_val *val_out);
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op);
static int pymdb_cursor_put(MDB_cursor *cursor,
char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags);
// Prefaults a range
static void preload(int rc, void *x, size_t size);
'''
_CFFI_CDEF_PATCHED = '''
int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn);
int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn);
'''
_CFFI_VERIFY = '''
#include <sys/stat.h>
#include "lmdb.h"
#include "preload.h"
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
MDB_val *val_out)
{
MDB_val key = {keylen, key_s};
int rc = mdb_get(txn, dbi, &key, val_out);
return rc;
}
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen, unsigned int flags)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
return mdb_put(txn, dbi, &key, &val, flags);
}
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
MDB_val *valptr;
if(vallen == 0) {
valptr = NULL;
} else {
valptr = &val;
}
return mdb_del(txn, dbi, &key, valptr);
}
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op)
{
MDB_val tmp_key = {key_len, key_s};
MDB_val tmp_data = {data_len, data_s};
int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op);
if(! rc) {
*key = tmp_key;
*data = tmp_data;
}
return rc;
}
static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags)
{
MDB_val tmpkey = {keylen, key_s};
MDB_val tmpval = {vallen, val_s};
return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags);
}
'''
if not lmdb._reading_docs():
import cffi
# Try to use distutils-bundled CFFI configuration to avoid a recompile and
# potential compile errors during first module import.
_config_vars = _config.CONFIG if _config else {
'extra_compile_args': ['-w'],
'extra_sources': ['lib/mdb.c', 'lib/midl.c'],
'extra_include_dirs': ['lib'],
'extra_library_dirs': [],
'libraries': []
}
_have_patched_lmdb = '-DHAVE_PATCHED_LMDB=1' in _config.CONFIG['extra_compile_args'] # type: ignore
if _have_patched_lmdb:
_CFFI_CDEF += _CFFI_CDEF_PATCHED
_ffi = cffi.FFI()
_ffi.cdef(_CFFI_CDEF)
_lib = _ffi.verify(_CFFI_VERIFY,
modulename='lmdb_cffi',
ext_package='lmdb',
sources=_config_vars['extra_sources'],
extra_compile_args=_config_vars['extra_compile_args'],
include_dirs=_config_vars['extra_include_dirs'],
libraries=_config_vars['libraries'],
library_dirs=_config_vars['extra_library_dirs'])
# Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class.
if not lmdb._reading_docs():
_error_map = {}
for obj in list(globals().values()):
if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error:
_error_map[getattr(_lib, obj.MDB_NAME)] = obj
del obj
def _error(what, rc):
"""Lookup and instantiate the correct exception class for the error code
`rc`, using :py:class:`Error` if no better class exists."""
return _error_map.get(rc, Error)(what, rc)
_invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed()
def _mvbuf(mv):
"""Convert a MDB_val cdata to a CFFI buffer object."""
return _ffi.buffer(mv.mv_data, mv.mv_size)
def _mvstr(mv):
"""Convert a MDB_val cdata to Python bytes."""
return _ffi.buffer(mv.mv_data, mv.mv_size)[:]
def enable_drop_gil():
"""Deprecated."""
def version(subpatch=False):
"""
Return a tuple of integers `(major, minor, patch)` describing the LMDB
library version that the binding is linked against. The version of the
binding itself is available from ``lmdb.__version__``.
`subpatch`:
If true, returns a 4 integer tuple consisting of the same plus
an extra integer that represents any patches applied by py-lmdb
itself (0 representing no patches).
"""
if subpatch:
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH,
1 if _have_patched_lmdb else 0)
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH)
open = Environment
| 37.728723 | 105 | 0.596298 |
f477633c1badf20c6b9aa7cdc1d086ce3dd6b193 | 6,425 | py | Python | .virtual_documents/00_core.ipynb.py | AtomScott/image_folder_datasets | 935580929abc9d8ec9eeaf944a0d3c670a09d04d | [
"Apache-2.0"
] | null | null | null | .virtual_documents/00_core.ipynb.py | AtomScott/image_folder_datasets | 935580929abc9d8ec9eeaf944a0d3c670a09d04d | [
"Apache-2.0"
] | null | null | null | .virtual_documents/00_core.ipynb.py | AtomScott/image_folder_datasets | 935580929abc9d8ec9eeaf944a0d3c670a09d04d | [
"Apache-2.0"
] | null | null | null | # default_exp core
#hide
from nbdev.showdoc import *
from fastcore.test import *
# export
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import warnings
import torchvision
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize
import pytorch_lightning as pl
# from pytorch_lightning.metrics.functional import classification, f1
from pytorch_lightning.loggers import TensorBoardLogger
import fastai.vision.augment
import fastai.vision.data
# from fastai.vision.data import ImageDataLoaders
# from fastai.vision.augment import Resize
#export
data_dir = 'Datasets/cifar10'
transform = Compose([
Resize(256, interpolation=2),
CenterCrop(224),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dm = ImageFolderDataModule(data_dir, 128, transform)
dm.setup()
for x,y in dm.train_dataloader():
test_eq(type(x), torch.Tensor)
test_eq(type(y), torch.Tensor)
break
#export
modelname = 'resnet18'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes))
test_eq(trainer.fit(model, dm), 1)
weight_path = 'FractalDB-1000_resnet50_epoch90.pth'
modelname = 'resnet50'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes), weight_path=weight_path)
test_eq(trainer.fit(model, dm), 1)
| 32.286432 | 127 | 0.651518 |
f477a30d49ec339fb9956b3d20c8d92ea00908ad | 641 | py | Python | src/modules/iam/module.py | pgorecki/python-ddd | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | [
"MIT"
] | 10 | 2022-03-16T19:26:51.000Z | 2022-03-31T23:50:51.000Z | src/modules/iam/module.py | pgorecki/python-ddd | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | [
"MIT"
] | null | null | null | src/modules/iam/module.py | pgorecki/python-ddd | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | [
"MIT"
] | 2 | 2022-03-16T19:26:54.000Z | 2022-03-27T13:21:02.000Z | from seedwork.application.modules import BusinessModule
from modules.iam.application.services import AuthenticationService
| 35.611111 | 94 | 0.730109 |
f477fec40612fa1a5fd9ffbd050a890ebec79d19 | 2,030 | py | Python | test_scripts/pyfora2/containerTests.py | ufora/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 571 | 2015-11-05T20:07:07.000Z | 2022-01-24T22:31:09.000Z | test_scripts/pyfora2/containerTests.py | timgates42/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 218 | 2015-11-05T20:37:55.000Z | 2021-05-30T03:53:50.000Z | test_scripts/pyfora2/containerTests.py | timgates42/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 40 | 2015-11-07T21:42:19.000Z | 2021-05-23T03:48:19.000Z | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyfora
import ufora.config.Setup as Setup
import ufora.FORA.python.PurePython.DictTestCases as DictTestCases
import ufora.FORA.python.PurePython.ListTestCases as ListTestCases
import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases
import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon
import ufora.test.ClusterSimulation as ClusterSimulation
if __name__ == '__main__':
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline()
| 33.833333 | 76 | 0.733005 |
f4782d553047c0d6c83eb8c3ac341a236af78e5e | 597 | py | Python | src/utils/torch_common.py | quochungto/SIIM-COVID19-Detection | 88bc10d7b01d277d223c4dddd4c223a782616611 | [
"MIT"
] | null | null | null | src/utils/torch_common.py | quochungto/SIIM-COVID19-Detection | 88bc10d7b01d277d223c4dddd4c223a782616611 | [
"MIT"
] | null | null | null | src/utils/torch_common.py | quochungto/SIIM-COVID19-Detection | 88bc10d7b01d277d223c4dddd4c223a782616611 | [
"MIT"
] | null | null | null | import os
import gc
import random
import numpy as np
import torch
def memory_cleanup():
"""
Cleans up GPU memory
https://github.com/huggingface/transformers/issues/1742
"""
for obj in gc.get_objects():
if torch.is_tensor(obj):
del obj
gc.collect()
torch.cuda.empty_cache()
| 21.321429 | 59 | 0.673367 |
f4793bd8d4530ee80fabe88563d6a3ddbecb48d2 | 6,713 | py | Python | recipes/freeimage/all/conanfile.py | marsven/conan-center-index | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | [
"MIT"
] | null | null | null | recipes/freeimage/all/conanfile.py | marsven/conan-center-index | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | [
"MIT"
] | null | null | null | recipes/freeimage/all/conanfile.py | marsven/conan-center-index | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
import os
import shutil
required_conan_version = ">=1.43.0"
| 40.439759 | 120 | 0.619097 |
f47944bb4b7b60683bb6b4d4d72854dfc4c98c2a | 110,180 | py | Python | src/google/appengine/datastore/datastore_query.py | myelin/appengine-python-standard | 2a99acd114f7cdd66fbad9bfd185384eef847c84 | [
"Apache-2.0"
] | null | null | null | src/google/appengine/datastore/datastore_query.py | myelin/appengine-python-standard | 2a99acd114f7cdd66fbad9bfd185384eef847c84 | [
"Apache-2.0"
] | null | null | null | src/google/appengine/datastore/datastore_query.py | myelin/appengine-python-standard | 2a99acd114f7cdd66fbad9bfd185384eef847c84 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A thin wrapper around datastore query RPC calls.
This provides wrappers around the internal only datastore_pb library and is
designed to be the lowest-level API to be used by all Python datastore client
libraries for executing queries. It provides a layer of protection so the actual
RPC syntax can change without affecting client libraries.
Any class, function, field or argument starting with an '_' is for INTERNAL use
only and should not be used by developers!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import collections
import functools
import pickle
import six
from google.appengine.api import cmp_compat
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_rpc
from google.protobuf import message
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
__all__ = ['Batch',
'Batcher',
'CompositeFilter',
'CompositeOrder',
'CorrelationFilter',
'Cursor',
'FetchOptions',
'FilterPredicate',
'Order',
'PropertyFilter',
'PropertyOrder',
'Query',
'QueryOptions',
'ResultsIterator',
'make_filter',
'apply_query',
'inject_results']
if datastore_pbs._CLOUD_DATASTORE_ENABLED:
from google.appengine.datastore.datastore_pbs import googledatastore
def make_filter(name, op, values):
"""Constructs a FilterPredicate from the given name, op and values.
Args:
name: A non-empty string, the name of the property to filter.
op: One of PropertyFilter._OPERATORS.keys(), the operator to use.
values: A supported value, the value to compare against.
Returns:
if values is a list, a CompositeFilter that uses AND to combine all
values, otherwise a PropertyFilter for the single value.
Raises:
datastore_errors.BadPropertyError: if the property name is invalid.
datastore_errors.BadValueError: if the property did not validate correctly
or the value was an empty list.
Other exception types (like OverflowError): if the property value does not
meet type-specific criteria.
"""
datastore_types.ValidateProperty(name, values)
properties = datastore_types.ToPropertyPb(name, values)
if isinstance(properties, list):
filters = [PropertyFilter(op, prop) for prop in properties]
return CompositeFilter(CompositeFilter.AND, filters)
else:
return PropertyFilter(op, properties)
def _make_key_value_map(entity, property_names):
"""Extracts key values from the given entity.
Args:
entity: The entity_pb2.EntityProto to extract values from.
property_names: The names of the properties from which to extract values.
Returns:
A dict mapping property names to a lists of key values.
"""
value_map = dict((six.ensure_text(name), []) for name in property_names)
for prop in entity.property:
prop_name = six.ensure_text(prop.name)
if prop_name in value_map:
value_map[prop_name].append(
datastore_types.PropertyValueToKeyValue(prop.value))
key_prop = six.ensure_text(datastore_types.KEY_SPECIAL_PROPERTY)
if key_prop in value_map:
value_map[key_prop] = [datastore_types.ReferenceToKeyValue(entity.key)]
return value_map
def _get_prop_name(self):
return self._filter.property[0].name
def _apply_to_value(self, value):
if not hasattr(self, '_cmp_value'):
if self._filter.op == datastore_pb.Query.Filter.EXISTS:
return True
self._cmp_value = datastore_types.PropertyValueToKeyValue(
self._filter.property[0].value)
self._condition = ('value %s self._cmp_value' %
self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op])
return eval(self._condition)
def _has_inequality(self):
"""Returns True if the filter predicate contains inequalities filters."""
return self._filter.op in self._INEQUALITY_OPERATORS_ENUM
def _to_pb(self):
"""Returns the internal only pb representation."""
return self._filter
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
filter_pb = googledatastore.Filter()
prop_filter_pb = filter_pb.property_filter
adapter.get_query_converter()._v3_filter_to_v1_property_filter(
self._filter, prop_filter_pb)
return filter_pb
class _PropertyRangeFilter(_SinglePropertyFilter):
"""A filter predicate that represents a range of values.
Since we allow multi-valued properties there is a large difference between
"x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the
first but not the second.
Since the datastore only allows a single inequality filter, multiple
in-equality filters are merged into a single range filter in the
datastore (unlike equality filters). This class is used by
datastore_query.CompositeFilter to implement the same logic.
"""
_start_key_value = None
_end_key_value = None
def intersect(self, other):
"""Returns a filter representing the intersection of self and other."""
if isinstance(other, PropertyFilter):
other = self.from_property_filter(other)
elif not isinstance(other, _PropertyRangeFilter):
raise datastore_errors.BadArgumentError(
'other argument should be a _PropertyRangeFilter (%r)' % (other,))
if other._get_prop_name() != self._get_prop_name():
raise datastore_errors.BadArgumentError(
'other argument must be on the same property (%s != %s)' %
(other._get_prop_name(), self._get_prop_name()))
start_source = None
if other._start:
if self._start:
result = cmp_compat.cmp(
self._get_start_key_value(), other._get_start_key_value())
if result == 0:
result = cmp_compat.cmp(other._start_incl, self._start_incl)
if result > 0:
start_source = self
elif result < 0:
start_source = other
else:
start_source = other
elif self._start:
start_source = self
end_source = None
if other._end:
if self._end:
result = cmp_compat.cmp(
self._get_end_key_value(), other._get_end_key_value())
if result == 0:
result = cmp_compat.cmp(self._end_incl, other._end_incl)
if result < 0:
end_source = self
elif result > 0:
end_source = other
else:
end_source = other
elif self._end:
end_source = self
if start_source:
if end_source in (start_source, None):
return start_source
result = _PropertyRangeFilter(start=start_source._start,
start_incl=start_source._start_incl,
end=end_source._end,
end_incl=end_source._end_incl)
result._start_key_value = start_source._start_key_value
result._end_key_value = end_source._end_key_value
return result
else:
return end_source or self
def _get_start_key_value(self):
if self._start_key_value is None:
self._start_key_value = datastore_types.PropertyValueToKeyValue(
self._start.value)
return self._start_key_value
def _get_end_key_value(self):
if self._end_key_value is None:
self._end_key_value = datastore_types.PropertyValueToKeyValue(
self._end.value)
return self._end_key_value
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
if self._start:
result = cmp_compat.cmp(self._get_start_key_value(), value)
if result > 0 or (result == 0 and not self._start_incl):
return False
if self._end:
result = cmp_compat.cmp(self._get_end_key_value(), value)
if result < 0 or (result == 0 and not self._end_incl):
return False
return True
def _get_prop_name(self):
if self._start:
return self._start.name
if self._end:
return self._end.name
assert False
def _to_pbs(self):
pbs = []
if self._start:
if self._start_incl:
op = datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.GREATER_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._start)
pbs.append(pb)
if self._end:
if self._end_incl:
op = datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.LESS_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._end)
pbs.append(pb)
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
filter_pb = googledatastore.Filter()
composite_filter = filter_pb.composite_filter
composite_filter.op = googledatastore.CompositeFilter.AND
if self._start:
if self._start_incl:
op = googledatastore.PropertyFilter.GREATER_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.GREATER_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._start.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._start, True, pb.value)
if self._end:
if self._end_incl:
op = googledatastore.PropertyFilter.LESS_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.LESS_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._end.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._end, True, pb.value)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return (self._start == other._start and
self._end == other._end and
(self._start_incl == other._start_incl or self._start is None) and
(self._end_incl == other._end_incl or self._end is None))
class _PropertyExistsFilter(FilterPredicate):
"""A FilterPredicate that matches entities containing specific properties.
Only works as an in-memory filter. Used internally to filter out entities
that don't have all properties in a given Order.
"""
class CorrelationFilter(FilterPredicate):
"""A filter that isolates correlated values and applies a sub-filter on them.
This filter assumes that every property used by the sub-filter should be
grouped before being passed to the sub-filter. The default grouping puts
each value in its own group. Consider:
e = {a: [1, 2], b: [2, 1, 3], c: 4}
A correlation filter with a sub-filter that operates on (a, b) will be tested
against the following 3 sets of values:
{a: 1, b: 2}
{a: 2, b: 1}
{b: 3}
In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
c, the filter must be applied in parallel to the correlation filter. For
example:
CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
If 'c = 3' was included in the correlation filter, c would be grouped as well.
This would result in the following values:
{a: 1, b: 2, c: 3}
{a: 2, b: 1}
{b: 3}
If any set of correlated values match the sub-filter then the entity matches
the correlation filter.
"""
def __init__(self, subfilter):
"""Constructor.
Args:
subfilter: A FilterPredicate to apply to the correlated values
"""
self._subfilter = subfilter
def _apply_correlated(self, value_maps):
"""Applies sub-filter to the correlated value maps.
The default implementation matches when any value_map in value_maps
matches the sub-filter.
Args:
value_maps: A list of correlated value_maps.
Returns:
True if any the entity matches the correlation filter.
"""
for map in value_maps:
if self._subfilter._apply(map):
return True
return False
def _group_values(self, prop, values):
"""A function that groups the given values.
Override this function to introduce custom grouping logic. The default
implementation assumes each value belongs in its own group.
Args:
prop: The name of the property who's values are being grouped.
values: A list of opaque values.
Returns:
A list of lists of grouped values.
"""
return [[value] for value in values]
class CompositeFilter(FilterPredicate):
"""An immutable filter predicate that combines other predicates.
This class proactively merges sub-filters that are combined using the same
operator. For example:
CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6])
is equivalent to:
CompositeFilter(AND, [f1, f2, f3, f4, f5, f6])
Currently filters can only be combined using an AND operator.
"""
AND = 'and'
_OPERATORS = frozenset([AND])
def __init__(self, op, filters):
"""Constructor.
Args:
op: The operator to use to combine the given filters
filters: A list of one or more filters to combine
Raises:
datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS
or filters is not a non-empty list containing only FilterPredicates.
"""
if not op in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
if not filters or not isinstance(filters, (list, tuple)):
raise datastore_errors.BadArgumentError(
'filters argument should be a non-empty list (%r)' % (filters,))
super(CompositeFilter, self).__init__()
self._op = op
flattened = []
for f in filters:
if isinstance(f, CompositeFilter) and f._op == self._op:
flattened.extend(f._filters)
elif isinstance(f, FilterPredicate):
flattened.append(f)
else:
raise datastore_errors.BadArgumentError(
'filters argument must be a list of FilterPredicates, found (%r)' %
(f,))
if op == self.AND:
filters = flattened
flattened = []
ineq_map = {}
for f in filters:
if (isinstance(f, _PropertyRangeFilter) or
(isinstance(f, PropertyFilter) and f._has_inequality())):
name = f._get_prop_name()
index = ineq_map.get(name)
if index is not None:
range_filter = flattened[index]
flattened[index] = range_filter.intersect(f)
else:
if isinstance(f, PropertyFilter):
range_filter = _PropertyRangeFilter.from_property_filter(f)
else:
range_filter = f
ineq_map[name] = len(flattened)
flattened.append(range_filter)
else:
flattened.append(f)
self._filters = tuple(flattened)
def __repr__(self):
op = self.op
if op == self.AND:
op = 'AND'
else:
op = str(op)
return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
def _get_prop_names(self):
names = set()
for f in self._filters:
names |= f._get_prop_names()
return names
def _apply(self, value_map):
if self._op == self.AND:
for f in self._filters:
if not f._apply(value_map):
return False
return True
raise NotImplementedError
def _prune(self, value_map):
if self._op == self.AND:
matches = collections.defaultdict(set)
for f in self._filters:
props = f._get_prop_names()
local_value_map = dict((k, v) for k, v in value_map.items()
if k in props)
if not f._prune(local_value_map):
return False
for (prop, values) in local_value_map.items():
matches[prop].update(values)
for prop, value_set in matches.items():
value_map[prop] = sorted(value_set)
return True
raise NotImplementedError
def _to_pbs(self):
"""Returns the internal only pb representation."""
pbs = []
for f in self._filters:
pbs.extend(f._to_pbs())
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
if not self._filters:
return None
if len(self._filters) == 1:
return self._filters[0]._to_pb_v1(adapter)
pb = googledatastore.Filter()
comp_pb = pb.composite_filter
if self.op == self.AND:
comp_pb.op = googledatastore.CompositeFilter.AND
else:
raise datastore_errors.BadArgumentError(
'Datastore V4 only supports CompositeFilter with AND operator.')
for f in self._filters:
comp_pb.filters.add().CopyFrom(f._to_pb_v1(adapter))
return pb
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeFilter, self).__eq__(other)
if len(self._filters) == 1:
result = self._filters[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._filters[0])
return result
return NotImplemented
class _IgnoreFilter(_SinglePropertyFilter):
"""A filter that removes all entities with the given keys."""
class _DedupingFilter(_IgnoreFilter):
"""A filter that removes duplicate keys."""
class Order(_PropertyComponent):
"""A base class that represents a sort order on a query.
All sub-classes must be immutable as these are often stored without creating a
defensive copying.
This class can be used as either the cmp or key arg in sorted() or
list.sort(). To provide a stable ordering a trailing key ascending order is
always used.
"""
def _key(self, lhs_value_map):
"""Creates a key for the given value map."""
raise NotImplementedError
def _cmp(self, lhs_value_map, rhs_value_map):
"""Compares the given value maps."""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a filter pb."""
raise NotImplementedError
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 filter pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError
def key(self, entity, filter_predicate=None):
"""Constructs a "key" value for the given entity based on the current order.
This function can be used as the key argument for list.sort() and sorted().
Args:
entity: The entity_pb2.EntityProto to convert
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
A key value that identifies the position of the entity when sorted by
the current order.
"""
names = self._get_prop_names()
names.add(datastore_types.KEY_SPECIAL_PROPERTY)
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
value_map = _make_key_value_map(entity, names)
if filter_predicate is not None:
filter_predicate._prune(value_map)
return (self._key(value_map),
value_map[datastore_types.KEY_SPECIAL_PROPERTY])
def cmp(self, lhs, rhs, filter_predicate=None):
"""Compares the given values taking into account any filters.
This function can be used as the cmp argument for list.sort() and sorted().
This function is slightly more efficient that Order.key when comparing two
entities, however it is much less efficient when sorting a list of entities.
Args:
lhs: An entity_pb2.EntityProto
rhs: An entity_pb2.EntityProto
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
An integer <, = or > 0 representing the operator that goes in between lhs
and rhs that to create a true statement.
"""
names = self._get_prop_names()
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
lhs_value_map = _make_key_value_map(lhs, names)
rhs_value_map = _make_key_value_map(rhs, names)
if filter_predicate is not None:
filter_predicate._prune(lhs_value_map)
filter_predicate._prune(rhs_value_map)
result = self._cmp(lhs_value_map, rhs_value_map)
if result:
return result
if not lhs.HasField('key') and not rhs.HasField('key'):
return 0
lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(lhs.key))
rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(rhs.key))
return cmp_compat.cmp(lhs_key, rhs_key)
def _get_prop_names(self):
return set([self.__order.property])
def _key(self, lhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'Missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return min(lhs_values)
else:
return _ReverseOrder(max(lhs_values))
def _cmp(self, lhs_value_map, rhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
rhs_values = rhs_value_map[self.__order.property]
if not lhs_values and not rhs_values:
return 0
if not lhs_values:
raise datastore_errors.BadArgumentError(
'LHS missing value for property (%s)' % self.__order.property)
if not rhs_values:
raise datastore_errors.BadArgumentError(
'RHS missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return cmp_compat.cmp(min(lhs_values), min(rhs_values))
else:
return cmp_compat.cmp(max(rhs_values), max(lhs_values))
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__order
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.PropertyOrder representation of the order.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
v1_order = googledatastore.PropertyOrder()
adapter.get_query_converter().v3_order_to_v1_order(self.__order, v1_order)
return v1_order
class CompositeOrder(Order):
"""An immutable class that represents a sequence of Orders.
This class proactively flattens sub-orders that are of type CompositeOrder.
For example:
CompositeOrder([O1, CompositeOrder([02, 03]), O4])
is equivalent to:
CompositeOrder([O1, 02, 03, O4])
"""
def __init__(self, orders):
"""Constructor.
Args:
orders: A list of Orders which are applied in order.
"""
if not isinstance(orders, (list, tuple)):
raise datastore_errors.BadArgumentError(
'orders argument should be list or tuple (%r)' % (orders,))
super(CompositeOrder, self).__init__()
flattened = []
for order in orders:
if isinstance(order, CompositeOrder):
flattened.extend(order._orders)
elif isinstance(order, Order):
flattened.append(order)
else:
raise datastore_errors.BadArgumentError(
'orders argument should only contain Order (%r)' % (order,))
self._orders = tuple(flattened)
def size(self):
"""Returns the number of sub-orders the instance contains."""
return len(self._orders)
def _to_pbs(self):
"""Returns an ordered list of internal only pb representations."""
return [order._to_pb() for order in self._orders]
def _to_pb_v1(self, adapter):
"""Returns an ordered list of googledatastore.PropertyOrder.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
return [order._to_pb_v1(adapter) for order in self._orders]
class FetchOptions(datastore_rpc.Configuration):
"""An immutable class that contains all options for fetching results.
These options apply to any request that pulls results from a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see option documentation below
for details.
"""
def __setstate__(self, state):
if '_Cursor__compiled_cursor' in state:
self.__cursor_bytes = state['_Cursor__compiled_cursor'].SerializeToString()
else:
self.__dict__ = state
class _QueryKeyFilter(_BaseComponent):
"""A class that implements the key filters available on a Query."""
def __call__(self, entity_or_reference):
"""Apply the filter.
Accepts either an entity or a reference to avoid the need to extract keys
from entities when we have a list of entities (which is a common case).
Args:
entity_or_reference: Either an entity_pb2.EntityProto or
entity_pb2.Reference.
"""
if isinstance(entity_or_reference, entity_pb2.Reference):
key = entity_or_reference
elif isinstance(entity_or_reference, entity_pb2.EntityProto):
key = entity_or_reference.key
else:
raise datastore_errors.BadArgumentError(
'entity_or_reference argument must be an entity_pb2.EntityProto ' +
six.ensure_str('or entity_pb2.Reference (%r)' %
(entity_or_reference), 'utf-8'))
return (six.ensure_text(key.app, 'utf-8') == self.__app and
six.ensure_text(key.name_space, 'utf-8') == self.__namespace and
(not self.__kind or key.path.element[-1].type == self.__kind) and
(not self.__path or
key.path.element[0:len(self.__path)] == self.__path))
def _to_pb(self):
"""Returns an internal pb representation."""
pb = datastore_pb.Query()
pb.app = self.__app
datastore_types.SetNamespace(pb, self.__namespace)
if self.__kind is not None:
pb.kind = self.__kind
if self.__ancestor:
ancestor = pb.ancestor
ancestor.CopyFrom(self.__ancestor)
return pb
def _to_pb_v1(self, adapter):
"""Returns a v1 internal proto representation of the query key filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
Returns:
A tuple (googledatastore.RunQueryRequest, googledatastore.Filter).
The second tuple value is a Filter representing the ancestor portion of the
query. If there is no ancestor constraint, this value will be None
"""
pb = googledatastore.RunQueryRequest()
partition_id = pb.partition_id
partition_id.project_id = (
adapter.get_entity_converter().app_to_project_id(self.__app))
if self.__namespace:
partition_id.namespace_id = self.__namespace
if self.__kind is not None:
pb.query.kind.add().name = self.__kind
ancestor_filter = None
if self.__ancestor:
ancestor_filter = googledatastore.Filter()
ancestor_prop_filter = ancestor_filter.property_filter
ancestor_prop_filter.op = (
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop_pb = ancestor_prop_filter.property
prop_pb.name = datastore_types.KEY_SPECIAL_PROPERTY
adapter.get_entity_converter().v3_to_v1_key(
self.ancestor,
ancestor_prop_filter.value.key_value)
return pb, ancestor_filter
class _BaseQuery(_BaseComponent):
"""A base class for query implementations."""
def run(self, conn, query_options=None):
"""Runs the query using provided datastore_rpc.Connection.
Args:
conn: The datastore_rpc.Connection to use
query_options: Optional query options to use
Returns:
A Batcher that implicitly fetches query results asynchronously.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
return Batcher(query_options, self.run_async(conn, query_options))
def run_async(self, conn, query_options=None):
"""Runs the query using the provided datastore_rpc.Connection.
Args:
conn: the datastore_rpc.Connection on which to run the query.
query_options: Optional QueryOptions with which to run the query.
Returns:
An async object that can be used to grab the first Batch. Additional
batches can be retrieved by calling Batch.next_batch/next_batch_async.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
raise NotImplementedError
class Query(_BaseQuery):
"""An immutable class that represents a query signature.
A query signature consists of a source of entities (specified as app,
namespace and optionally kind and ancestor) as well as a FilterPredicate,
grouping and a desired ordering.
"""
def __repr__(self):
args = []
args.append('app=%r' % six.ensure_str(self.app))
ns = self.namespace
if ns:
args.append('namespace=%r' % six.ensure_str(ns))
kind = self.kind
if kind is not None:
args.append('kind=%r' % six.ensure_str(kind))
ancestor = self.ancestor
if ancestor is not None:
websafe = base64.urlsafe_b64encode(ancestor.SerializeToString())
args.append('ancestor=<%s>' % six.ensure_str(websafe))
filter_predicate = self.filter_predicate
if filter_predicate is not None:
args.append('filter_predicate=%r' % filter_predicate)
order = self.order
if order is not None:
args.append('order=%r' % order)
group_by = self.group_by
if group_by is not None:
args.append('group_by=%r' % (tuple(six.ensure_str(x) for x in group_by),))
read_time_us = self.read_time_us
if read_time_us is not None:
args.append('read_time_us=%r' % (read_time_us,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._to_pb_v1(conn, query_options)
else:
req = self._to_pb(conn, query_options)
return Batch.create_async(self, query_options, conn, req,
start_cursor=start_cursor)
def _to_pb_v1(self, conn, query_options):
"""Returns a googledatastore.RunQueryRequest."""
v1_req, v1_ancestor_filter = self._key_filter._to_pb_v1(conn.adapter)
v1_query = v1_req.query
if self.filter_predicate:
filter_predicate_pb = self._filter_predicate._to_pb_v1(conn.adapter)
if self.filter_predicate and v1_ancestor_filter:
comp_filter_pb = v1_query.filter.composite_filter
comp_filter_pb.op = googledatastore.CompositeFilter.AND
comp_filter_pb.filters.add().CopyFrom(filter_predicate_pb)
comp_filter_pb.filters.add().CopyFrom(v1_ancestor_filter)
elif self.filter_predicate:
v1_query.filter.CopyFrom(filter_predicate_pb)
elif v1_ancestor_filter:
v1_query.filter.CopyFrom(v1_ancestor_filter)
if self._order:
for order in self._order._to_pb_v1(conn.adapter):
v1_query.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = datastore_pbs.PROPERTY_NAME_KEY
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
for prop in projection:
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = prop
if self._group_by:
for group_by in self._group_by:
v1_query.distinct_on.add().name = group_by
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
v1_query.limit.value = limit
count = QueryOptions.batch_size(query_options, conn.config)
if count is None:
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is not None:
pass
if query_options.offset:
v1_query.offset = query_options.offset
if query_options.start_cursor is not None:
v1_query.start_cursor = query_options.start_cursor.to_bytes()
if query_options.end_cursor is not None:
v1_query.end_cursor = query_options.end_cursor.to_bytes()
conn._set_request_read_policy(v1_req, query_options)
conn._set_request_transaction(v1_req)
return v1_req
def _to_pb(self, conn, query_options):
"""Returns the internal only pb representation."""
pb = self._key_filter._to_pb()
if self._filter_predicate:
for f in self._filter_predicate._to_pbs():
pb.filter.add().CopyFrom(f)
if self._order:
for order in self._order._to_pbs():
pb.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
pb.keys_only = True
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
pb.property_name.extend(projection)
if self._group_by:
pb.group_by_property_name.extend(self._group_by)
if QueryOptions.produce_cursors(query_options, conn.config):
pb.compile = True
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
pb.limit = limit
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is None:
count = QueryOptions.batch_size(query_options, conn.config)
if count is not None:
pb.count = count
if query_options.offset:
pb.offset = query_options.offset
if query_options.start_cursor is not None:
try:
pb.compiled_cursor.ParseFromString(
query_options.start_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if query_options.end_cursor is not None:
try:
pb.end_compiled_cursor.ParseFromString(
query_options.end_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if ((query_options.hint == QueryOptions.ORDER_FIRST and len(pb.order)) or
(query_options.hint == QueryOptions.ANCESTOR_FIRST and
pb.HasField('ancestor')) or
(query_options.hint == QueryOptions.FILTER_FIRST and pb.filter)):
pb.hint = query_options.hint
if self.read_time_us is not None:
pb.read_time_us = self.read_time_us
conn._set_request_read_policy(pb, query_options)
conn._set_request_transaction(pb)
return pb
def _validate_projection_and_group_by(self, projection, group_by):
"""Validates that a query's projection and group by match.
Args:
projection: A set of string property names in the projection.
group_by: A set of string property names in the group by.
Raises:
datastore_errors.BadRequestError: if the projection and group
by sets are not equal.
"""
if projection:
if group_by:
extra = set(projection) - set(group_by)
if extra:
raise datastore_errors.BadRequestError(
'projections includes properties not in the group_by argument: %s'
% extra)
elif group_by:
raise datastore_errors.BadRequestError(
'cannot specify group_by without a projection')
def apply_query(query, entities, _key=None):
"""Performs the given query on a set of in-memory results.
This function can perform queries impossible in the datastore (e.g a query
with multiple inequality filters on different properties) because all
operations are done in memory. For queries that can also be executed on the
the datastore, the results produced by this function may not use the same
implicit ordering as the datastore. To ensure compatibility, explicit
ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__').
Order by __key__ should always be used when a consistent result is desired
(unless there is a sort order on another globally unique property).
Args:
query: a datastore_query.Query to apply
entities: a list of results, of arbitrary type, on which to apply the query.
_key: a function that takes an element of the result array as an argument
and must return an entity_pb2.EntityProto. If not specified, the
identity function is used (and entities must be a list of
entity_pb2.EntityProto).
Returns:
A subset of entities, filtered and ordered according to the query.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument must be a datastore_query.Query (%r)' % (query,))
if not isinstance(entities, list):
raise datastore_errors.BadArgumentError(
'entities argument must be a list (%r)' % (entities,))
key = _key or (lambda x: x)
filtered_results = [r for r in entities if query._key_filter(key(r))]
if not query._order:
if query._filter_predicate:
return [r for r in filtered_results if query._filter_predicate(key(r))]
return filtered_results
names = query._order._get_prop_names()
if query._filter_predicate:
names |= query._filter_predicate._get_prop_names()
exists_filter = _PropertyExistsFilter(names)
value_maps = []
for result in filtered_results:
value_map = _make_key_value_map(key(result), names)
if exists_filter._apply(value_map) and (
not query._filter_predicate or
query._filter_predicate._prune(value_map)):
value_map['__result__'] = result
value_maps.append(value_map)
value_maps.sort(key=functools.cmp_to_key(query._order._cmp))
return [value_map['__result__'] for value_map in value_maps]
class _AugmentedQuery(_BaseQuery):
"""A query that combines a datastore query with in-memory filters/results."""
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
if self._query._order:
changes = {'keys_only': False}
else:
changes = {}
if self._in_memory_filter or self._in_memory_results:
in_memory_offset = query_options.offset
in_memory_limit = query_options.limit
if in_memory_limit is not None:
if self._in_memory_filter is None:
changes['limit'] = in_memory_limit
elif self._max_filtered_count is not None:
changes['limit'] = in_memory_limit + self._max_filtered_count
else:
changes['limit'] = None
if in_memory_offset:
changes['offset'] = None
if changes.get('limit', None) is not None:
changes['limit'] += in_memory_offset
else:
in_memory_offset = None
else:
in_memory_offset = None
in_memory_limit = None
modified_query_options = QueryOptions(config=query_options, **changes)
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._query._to_pb_v1(conn, modified_query_options)
else:
req = self._query._to_pb(conn, modified_query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
return _AugmentedBatch.create_async(self, modified_query_options, conn, req,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
class Batch(object):
"""A batch of results returned by a query.
This class contains a batch of results returned from the datastore and
relevant metadata. This metadata includes:
query: The query that produced this batch
query_options: The QueryOptions used to run the query. This does not
contained any options passed to the .next_batch() call that created the
current batch.
start_cursor, end_cursor: These are the cursors that can be used
with a query to re-fetch this batch. They can also be used to
find all entities before or after the given batch (by use start_cursor as
an end cursor or vice versa). start_cursor can also be advanced to
point to a position within the batch using Cursor.advance().
skipped_results: the number of result skipped because of the offset
given to the request that generated it. This can be set either on
the original Query.run() request or in subsequent .next_batch() calls.
more_results: If this is true there are more results that can be retrieved
either by .next_batch() or Batcher.next().
This class is also able to fetch the next batch of the query using
.next_batch(). As batches of results must be fetched serially, .next_batch()
can only be called once. Additional calls to .next_batch() will return None.
When there are no more batches .next_batch() will return None as well. Note
that batches returned by iterating over Batcher will always return None for
.next_batch() as the Bather handles fetching the next batch automatically.
A Batch typically represents the result of a single RPC request. The datastore
operates on a "best effort" basis so the batch returned by .next_batch()
or Query.run_async().get_result() may not have satisfied the requested offset
or number of results (specified through FetchOptions.offset and
FetchOptions.batch_size respectively). To satisfy these restrictions
additional batches may be needed (with FetchOptions that specify the remaining
offset or results needed). The Batcher class hides these limitations.
"""
__skipped_cursor = None
__end_cursor = None
def next_batch(self, fetch_options=None):
"""Synchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
A new Batch of results or None if either the next batch has already been
fetched or there are no more results.
"""
async_ = self.next_batch_async(fetch_options)
if async_ is None:
return None
return async_.get_result()
def _compiled_query(self):
return self._batch_shared.compiled_query
def cursor(self, index):
"""Gets the cursor that points just after the result at index - 1.
The index is relative to first result in .results. Since start_cursor
points to the position before the first skipped result, the range of
indexes this function supports is limited to
[-skipped_results, len(results)].
For example, using start_cursor=batch.cursor(i) and
end_cursor=batch.cursor(j) will return the results found in
batch.results[i:j]. Note that any result added in the range (i-1, j]
will appear in the new query's results.
Warning: Any index in the range (-skipped_results, 0) may cause
continuation to miss or duplicate results if outside a transaction.
Args:
index: An int, the index relative to the first result before which the
cursor should point.
Returns:
A Cursor that points to a position just after the result index - 1,
which if used as a start_cursor will cause the first result to be
batch.result[index].
"""
if not isinstance(index, six.integer_types):
raise datastore_errors.BadArgumentError(
'index argument should be an integer (%r)' % (index,))
if not -self._skipped_results <= index <= len(self.__results):
raise datastore_errors.BadArgumentError(
'index argument must be in the inclusive range [%d, %d]' %
(-self._skipped_results, len(self.__results)))
if index == -self._skipped_results:
return self.__start_cursor
elif (index == 0 and
self.__skipped_cursor):
return self.__skipped_cursor
elif index > 0 and self.__result_cursors:
return self.__result_cursors[index - 1]
elif index == len(self.__results):
return self.__end_cursor
else:
return self.__start_cursor.advance(index + self._skipped_results,
self._batch_shared.query,
self._batch_shared.conn)
def next_batch_async(self, fetch_options=None):
"""Asynchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
An async object that can be used to get the next Batch or None if either
the next batch has already been fetched or there are no more results.
"""
if not self.__datastore_cursor:
return None
fetch_options, next_batch = self._make_next_batch(fetch_options)
if (fetch_options is not None and
not FetchOptions.is_configuration(fetch_options)):
raise datastore_errors.BadArgumentError('Invalid fetch options.')
config = self._batch_shared.query_options.merge(fetch_options)
conn = next_batch._batch_shared.conn
requested_offset = 0
if fetch_options is not None and fetch_options.offset is not None:
requested_offset = fetch_options.offset
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
if self._batch_shared.expected_offset != requested_offset:
raise datastore_errors.BadArgumentError(
'Cannot request the next batch with a different offset than '
' expected. Expected: %s, Got: %s.'
% (self._batch_shared.expected_offset, requested_offset))
limit = self._batch_shared.remaining_limit
next_options = QueryOptions(offset=self._batch_shared.expected_offset,
limit=limit,
start_cursor=self.__datastore_cursor)
config = config.merge(next_options)
result = next_batch._make_query_rpc_call(
config,
self._batch_shared.query._to_pb_v1(conn, config))
else:
result = next_batch._make_next_rpc_call(config,
self._to_pb(fetch_options))
self.__datastore_cursor = None
return result
def _to_pb(self, fetch_options=None):
req = datastore_pb.NextRequest()
if FetchOptions.produce_cursors(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config):
req.compile = True
count = FetchOptions.batch_size(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config)
if count is not None:
req.count = count
if fetch_options is not None and fetch_options.offset:
req.offset = fetch_options.offset
req.cursor.CopyFrom(self.__datastore_cursor)
return req
def _extend(self, next_batch):
"""Combines the current batch with the next one. Called by batcher."""
self.__datastore_cursor = next_batch.__datastore_cursor
next_batch.__datastore_cursor = None
self.__more_results = next_batch.__more_results
if not self.__results:
self.__skipped_cursor = next_batch.__skipped_cursor
self.__results.extend(next_batch.__results)
self.__result_cursors.extend(next_batch.__result_cursors)
self.__end_cursor = next_batch.__end_cursor
self._skipped_results += next_batch._skipped_results
def _make_query_rpc_call(self, config, req):
"""Makes a RunQuery call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
_api_version = self._batch_shared.conn._api_version
if _api_version == datastore_rpc._CLOUD_DATASTORE_V1:
return self._batch_shared.conn._make_rpc_call(
config, 'RunQuery', req, googledatastore.RunQueryResponse(),
self.__v1_run_query_response_hook)
return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
def _make_next_rpc_call(self, config, req):
"""Makes a Next call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
return self._batch_shared.conn._make_rpc_call(config, 'Next', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
_need_index_header = 'The suggested index for this query is:'
def __query_result_hook(self, rpc):
"""Internal method used as get_result_hook for RunQuery/Next operation."""
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError as exc:
if isinstance(rpc.request, datastore_pb.Query):
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(
rpc.request)
props = datastore_index.GetRecommendedIndexProperties(props)
yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props)
xml = datastore_index.IndexXmlForQuery(kind, ancestor, props)
raise datastore_errors.NeedIndexError(
'\n'.join([str(exc), self._need_index_header, yaml]),
original_message=str(exc), header=self._need_index_header,
yaml_index=yaml, xml_index=xml)
raise
query_result = rpc.response
self._batch_shared.process_batch(query_result)
if query_result.HasField('skipped_results_compiled_cursor'):
self.__skipped_cursor = Cursor(
_cursor_bytes=query_result.skipped_results_compiled_cursor
.SerializeToString())
self.__result_cursors = [
Cursor(_cursor_bytes=result.SerializeToString())
for result in query_result.result_compiled_cursor
]
if query_result.HasField('compiled_cursor'):
self.__end_cursor = Cursor(
_cursor_bytes=query_result.compiled_cursor.SerializeToString())
self._skipped_results = query_result.skipped_results
if query_result.more_results:
self.__datastore_cursor = query_result.cursor
self.__more_results = True
else:
self._end()
self.__results = self._process_results(query_result.result)
return self
def _end(self):
"""Changes the internal state so that no more batches can be produced."""
self.__datastore_cursor = None
self.__more_results = False
def _make_next_batch(self, fetch_options):
"""Creates the object to store the next batch.
Args:
fetch_options: The datastore_query.FetchOptions passed in by the user or
None.
Returns:
A tuple containing the fetch options that should be used internally and
the object that should be used to contain the next batch.
"""
return fetch_options, Batch(self._batch_shared,
start_cursor=self.__end_cursor)
def _process_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of entity_pb2.EntityProto's returned by the datastore
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_to_query_result
return [converter(result, self._batch_shared.query_options)
for result in results]
def _process_v1_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of googledatastore.EntityResults.
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_v1_to_query_result
return [converter(result.entity, self._batch_shared.query_options)
for result in results]
class _AugmentedBatch(Batch):
"""A batch produced by a datastore_query._AugmentedQuery."""
def cursor(self, index):
raise NotImplementedError
def _extend(self, next_batch):
super(_AugmentedBatch, self)._extend(next_batch)
self.__in_memory_limit = next_batch.__in_memory_limit
self.__in_memory_offset = next_batch.__in_memory_offset
self.__next_index = next_batch.__next_index
def _process_v1_results(self, results):
"""Process V4 results by converting to V3 and calling _process_results."""
v3_results = []
is_projection = bool(self.query_options.projection)
for v1_result in results:
v3_entity = entity_pb2.EntityProto()
self._batch_shared.conn.adapter.get_entity_converter().v1_to_v3_entity(
v1_result.entity, v3_entity, is_projection)
v3_results.append(v3_entity)
return self._process_results(v3_results)
def _process_results(self, results):
in_memory_filter = self._batch_shared.augmented_query._in_memory_filter
if in_memory_filter:
results = list(filter(in_memory_filter, results))
in_memory_results = self._batch_shared.augmented_query._in_memory_results
if in_memory_results and self.__next_index < len(in_memory_results):
original_query = super(_AugmentedBatch, self).query
if original_query._order:
if results:
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i = 0
while i < len(results):
result = results[i]
result_key = original_query._order.key(result)
while next_key <= result_key:
results.insert(i, next_result)
i += 1
self.__next_index += 1
if self.__next_index >= len(in_memory_results):
break
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i += 1
elif results or not super(_AugmentedBatch, self).more_results:
results = in_memory_results + results
self.__next_index = len(in_memory_results)
if self.__in_memory_offset:
assert not self._skipped_results
offset = min(self.__in_memory_offset, len(results))
if offset:
self._skipped_results += offset
self.__in_memory_offset -= offset
results = results[offset:]
if self.__in_memory_limit is not None:
results = results[:self.__in_memory_limit]
self.__in_memory_limit -= len(results)
if self.__in_memory_limit <= 0:
self._end()
return super(_AugmentedBatch, self)._process_results(results)
def _make_next_batch(self, fetch_options):
in_memory_offset = FetchOptions.offset(fetch_options)
augmented_query = self._batch_shared.augmented_query
if in_memory_offset and (augmented_query._in_memory_filter or
augmented_query._in_memory_results):
fetch_options = FetchOptions(offset=0)
else:
in_memory_offset = None
return (fetch_options,
_AugmentedBatch(self._batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=self.__in_memory_limit,
start_cursor=self.end_cursor,
next_index=self.__next_index))
class Batcher(object):
"""A class that implements the Iterator interface for Batches.
Typically constructed by a call to Query.run().
The class hides the "best effort" nature of the datastore by potentially
making multiple requests to the datastore and merging the resulting batches.
This is accomplished efficiently by prefetching results and mixing both
non-blocking and blocking calls to the datastore as needed.
Iterating through batches is almost always more efficient than pulling all
results at once as RPC latency is hidden by asynchronously prefetching
results.
The batches produce by this class cannot be used to fetch the next batch
(through Batch.next_batch()) as before the current batch is returned the
request for the next batch has already been sent.
"""
ASYNC_ONLY = None
AT_LEAST_OFFSET = 0
AT_LEAST_ONE = object()
def __init__(self, query_options, first_async_batch):
"""Constructor.
Although this class can be manually constructed, it is preferable to use
Query.run(query_options).
Args:
query_options: The QueryOptions used to create the first batch.
first_async_batch: The first batch produced by
Query.run_async(query_options).
"""
self.__next_batch = first_async_batch
self.__initial_offset = QueryOptions.offset(query_options) or 0
self.__skipped_results = 0
def next(self):
"""Get the next batch. See .next_batch()."""
return self.next_batch(self.AT_LEAST_ONE)
def next_batch(self, min_batch_size):
"""Get the next batch.
The batch returned by this function cannot be used to fetch the next batch
(through Batch.next_batch()). Instead this function will always return None.
To retrieve the next batch use .next() or .next_batch(N).
This function may return a batch larger than min_to_fetch, but will never
return smaller unless there are no more results.
Special values can be used for min_batch_size:
ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore
even if the this produces a batch with no results.
AT_LEAST_OFFSET - Only pull enough results to satifiy the offset.
AT_LEAST_ONE - Pull batches until at least one result is returned.
Args:
min_batch_size: The minimum number of results to retrieve or one of
(ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE)
Returns:
The next Batch of results.
"""
if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET,
Batcher.AT_LEAST_ONE):
exact = False
else:
exact = True
datastore_types.ValidateInteger(min_batch_size,
'min_batch_size',
datastore_errors.BadArgumentError)
if not self.__next_batch:
raise StopIteration
batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += batch.skipped_results
if min_batch_size is not Batcher.ASYNC_ONLY:
if min_batch_size is Batcher.AT_LEAST_ONE:
min_batch_size = 1
needed_results = min_batch_size - len(batch.results)
while (batch.more_results and
(self.__skipped_results < self.__initial_offset or
needed_results > 0)):
if batch.query_options.batch_size:
batch_size = max(batch.query_options.batch_size, needed_results)
elif exact:
batch_size = needed_results
else:
batch_size = None
self.__next_batch = batch.next_batch_async(FetchOptions(
offset=max(0, self.__initial_offset - self.__skipped_results),
batch_size=batch_size))
next_batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += next_batch.skipped_results
needed_results = max(0, needed_results - len(next_batch.results))
batch._extend(next_batch)
self.__next_batch = batch.next_batch_async()
return batch
class ResultsIterator(six.Iterator):
"""An iterator over the results from Batches obtained from a Batcher.
ResultsIterator implements Python's iterator protocol, so results can be
accessed with the for-statement:
> it = ResultsIterator(Query(kind='Person').run())
> for person in it:
> print 'Hi, %s!' % person['name']
At any time ResultsIterator.cursor() can be used to grab the Cursor that
points just after the last result returned by the iterator.
"""
__current_batch = None
__current_pos = 0
__last_cursor = None
def __init__(self, batcher):
"""Constructor.
Args:
batcher: A datastore_query.Batcher
"""
if not isinstance(batcher, Batcher):
raise datastore_errors.BadArgumentError(
'batcher argument should be datastore_query.Batcher (%r)' %
(batcher,))
self.__batcher = batcher
def index_list(self):
"""Returns the list of indexes used to perform the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._ensure_current_batch().index_list
def cursor(self):
"""Returns a cursor that points just after the last result returned.
If next() throws an exception, this function returns the end_cursor from
the last successful batch or throws the same exception if no batch was
successful.
"""
return (self.__last_cursor or
self._ensure_current_batch().cursor(self.__current_pos))
def _compiled_query(self):
"""Returns the compiled query associated with the iterator.
Internal only do not use.
"""
return self._ensure_current_batch()._compiled_query()
def __next__(self):
"""Returns the next query result."""
while (not self.__current_batch or
self.__current_pos >= len(self.__current_batch.results)):
try:
next_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
except:
if self.__current_batch:
self.__last_cursor = self.__current_batch.end_cursor
raise
self.__current_pos = 0
self.__current_batch = next_batch
result = self.__current_batch.results[self.__current_pos]
self.__current_pos += 1
return result
| 32.482311 | 81 | 0.690634 |
f479e4c7564b46ceb9cbf0369fdeb3cac10260f7 | 4,274 | py | Python | tests/Metrics/test_recall.py | Neklaustares-tPtwP/torchflare | 7af6b01ef7c26f0277a041619081f6df4eb1e42c | [
"Apache-2.0"
] | 1 | 2021-09-14T08:38:05.000Z | 2021-09-14T08:38:05.000Z | tests/Metrics/test_recall.py | weidao-Shi/torchflare | 3c55b5a0761f2e85dd6da95767c6ec03f0f5baad | [
"Apache-2.0"
] | null | null | null | tests/Metrics/test_recall.py | weidao-Shi/torchflare | 3c55b5a0761f2e85dd6da95767c6ec03f0f5baad | [
"Apache-2.0"
] | 1 | 2021-08-06T19:24:43.000Z | 2021-08-06T19:24:43.000Z | # flake8: noqa
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import recall_score
from torchflare.metrics.recall_meter import Recall
from torchflare.metrics.meters import _BaseInputHandler
torch.manual_seed(42)
| 33.920635 | 111 | 0.654422 |
f47a1ea7f8990d7f8f0d9190441ddb6344e10412 | 1,785 | py | Python | parsing/tests/test_utils.py | davesque/parsing.py | ff8b20e53b94e79571971ef23f0e5091e2786566 | [
"MIT"
] | 1 | 2020-11-14T13:06:42.000Z | 2020-11-14T13:06:42.000Z | parsing/tests/test_utils.py | davesque/parsing.py | ff8b20e53b94e79571971ef23f0e5091e2786566 | [
"MIT"
] | null | null | null | parsing/tests/test_utils.py | davesque/parsing.py | ff8b20e53b94e79571971ef23f0e5091e2786566 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import unittest
from ..utils import compose, flatten, truncate, join, unary, equals
| 30.775862 | 87 | 0.652101 |
f47b6c51761fb432f29fb2e6eb1f0ea2e885172e | 1,807 | py | Python | Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | from Utils.Array import input_array
# Time : O(n2)
# Space : O(1) Constant space
"""
Ill be having 2 pointers here
one of them will move through the array looking for -ve numbers to operate on
and another will be pointing to the correct location where i can put the -ve elements, after i find them
also this same location will denote the starting of the 1st +ve number in the array,
--> as we will be going to move them forward
Finally when you find a -ve number, store it temporarily
do the swapping, to move all the +ve numbers forward by one step to, make place for the stored -ve number
then finally put that number in its correct position and move the pointer to store future -ve numbers
"""
if __name__ == "__main__":
arr = input_array()
rearrange_via_modified_insertion_sort(arr)
print(arr)
"""
12 11 -13 -5 6 -7 5 -3 -6
-1 2 -3 4 5 6 -7 8 9
2 3 -1 -4 -6 # Reverse
4 3 2 1 0 -1 -2 -3 # Reverse containing 0
"""
| 34.09434 | 105 | 0.646375 |
f47c09e34304fe10a016d16f624d1fb84ab59f99 | 2,786 | py | Python | python_test/test_epoll/test_epoll.py | zhtsh/test-examples | ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6 | [
"Apache-2.0"
] | null | null | null | python_test/test_epoll/test_epoll.py | zhtsh/test-examples | ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6 | [
"Apache-2.0"
] | null | null | null | python_test/test_epoll/test_epoll.py | zhtsh/test-examples | ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
import socket
import select
from datetime import datetime
from datetime import timedelta
EOL = b'\n\n'
response = b'HTTP/1.0 200 OK\nDate: Mon, 1 Jan 1996 01:01:01 GMT\n'
response += b'Content-Type: text/plain\nContent-Length: 13\n\n'
response += b'Hello, world!\n'
#
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', 8080))
serversocket.listen(1)
serversocket.setblocking(0)
# epollsocket epoll
epoll = select.epoll()
epoll.register(serversocket.fileno(), select.EPOLLIN)
try:
connections = {}
requests = {}
responses = {}
while True:
# epollIOpollselect
events = epoll.poll(1)
#
for fileno, event in events:
# socket
if fileno == serversocket.fileno():
connection, address = serversocket.accept()
connection.setblocking(0)
epoll.register(connection.fileno(), select.EPOLLIN)
connections[connection.fileno()] = connection
requests[connection.fileno()] = b''
responses[connection.fileno()] = response
elif event & select.EPOLLIN:
#
try:
requests[fileno] += connections[fileno].recv(1024)
if EOL in requests[fileno]:
epoll.modify(fileno, event | select.EPOLLOUT)
print(requests[fileno])
except Exception as e:
print(e)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLOUT:
#
try:
byteswritten = connections[fileno].send(responses[fileno])
# responses[fileno] = responses[fileno][byteswritten:]
# if len(responses[fileno]) == 0:
# epoll.modify(fileno, 0)
# connections[fileno].shutdown(socket.SHUT_RDWR)
except Exception as e:
print(e)
# epoll.modify(fileno, 0)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLHUP:
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
epoll.unregister(serversocket.fileno())
epoll.close()
serversocket.close()
| 38.164384 | 79 | 0.561378 |
f47cd9858ae9886cfca8b27e46c09a635662d571 | 2,771 | py | Python | 20.2-Donut/Donut2.py | Kehvarl/AdventOfCode2019 | f72cfeefdfbde365bc9a5b722d5875d556379cf2 | [
"MIT"
] | 1 | 2020-09-27T23:02:46.000Z | 2020-09-27T23:02:46.000Z | 20.2-Donut/Donut2.py | Kehvarl/AdventOfCode2019 | f72cfeefdfbde365bc9a5b722d5875d556379cf2 | [
"MIT"
] | null | null | null | 20.2-Donut/Donut2.py | Kehvarl/AdventOfCode2019 | f72cfeefdfbde365bc9a5b722d5875d556379cf2 | [
"MIT"
] | 1 | 2019-12-09T17:10:48.000Z | 2019-12-09T17:10:48.000Z | import collections
from pprint import pprint
example1 = open("input.txt", "r").read()
# grid = [[val for val in line] for line in example1.split("\n")]
grid = example1.split("\n")
length = 0
for line in grid:
length = max(len(line), length)
out = []
for line in grid:
out.append(line[::-1].zfill(length)[::-1])
grid = out
scanned = []
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
# Find portals
# For each portal:
# Inner edge: recurse
# Outer edge: return
portals = {}
portal_links = {}
height = len(grid) - 1
width = len(grid[0]) - 1
for y in range(len(grid)):
for x in range(len(grid[0])):
if grid[y][x].isalpha():
portal = find_dot(x, y)
if portal:
dot, (tag_x, tag_y) = portal
dot_x, dot_y = dot
edge = dot_x == 2 or dot_x == width - 2 or dot_y == 2 or dot_y == height - 2
tag = "".join(sorted(grid[y][x] + grid[tag_y][tag_x]))
if not portals.get(tag):
portals[tag] = []
portals[tag].append(((x, y), dot, edge))
gx, gy, sx, sy = (0, 0, 0, 0)
for link in portals:
ends = portals[link]
if len(ends) == 2:
(a, (a_x, a_y), a_edge), (b, (b_x, b_y), b_edge) = ends
portal_links[a] = (b_x, b_y, a_edge, link)
portal_links[b] = (a_x, a_y, b_edge, link)
elif link == "ZZ":
goal, (gx, gy), ge = ends[0]
elif link == "AA":
start, (sx, sy), se = ends[0]
pprint(portals)
print(portal_links)
bfs = collections.deque([((sx, sy), 0, 0)])
seen = {(sx, sy, 0)}
running = True
while running:
pos, level, dist = bfs.popleft()
if pos == (gx, gy) and level == 0:
print(dist)
running = False
break
for neighbor in neighbors:
dx, dy = neighbor
tx, ty = pos
tx, ty = tx + dx, ty + dy
t_level = level
if (tx, ty) in portal_links:
px, py, p_edge, link = portal_links[(tx, ty)]
# print(link, (tx, ty), (px, py), p_edge)
if p_edge and t_level > 0:
t_level -= 1
tx, ty = px, py
elif not p_edge:
t_level += 1
tx, ty = px, py
if (tx, ty, t_level) in seen:
continue
seen.add((tx, ty, t_level))
if grid[ty][tx] == '.':
p = (tx, ty)
s = (p, t_level, dist + 1)
bfs.append(s)
print("complete")
| 24.741071 | 93 | 0.498015 |
f47e72619d39a8c165d31a3169ddc7283ecd466a | 845 | py | Python | OR_Client_Library/openrefine_client/tests/test_history.py | idaks/OpenRefine-Provenance-Tools | cc469c3eb8e56c8b0f4616cc501546db3c4176ea | [
"MIT"
] | null | null | null | OR_Client_Library/openrefine_client/tests/test_history.py | idaks/OpenRefine-Provenance-Tools | cc469c3eb8e56c8b0f4616cc501546db3c4176ea | [
"MIT"
] | null | null | null | OR_Client_Library/openrefine_client/tests/test_history.py | idaks/OpenRefine-Provenance-Tools | cc469c3eb8e56c8b0f4616cc501546db3c4176ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
test_history.py
"""
# Copyright (c) 2011 Paul Makepeace, Real Programmers. All rights reserved.
import unittest
from OR_Client_Library.openrefine_client.google.refine.history import *
if __name__ == '__main__':
unittest.main()
| 26.40625 | 75 | 0.60355 |
f47f72a41b188aa9caae89718d01a31bf276031b | 6,160 | py | Python | tests/batch/test_get_batch.py | Remmeauth/remme-core-cli | 94cc09fe9d2e718b45273dde68d6c672c4773f6a | [
"MIT"
] | null | null | null | tests/batch/test_get_batch.py | Remmeauth/remme-core-cli | 94cc09fe9d2e718b45273dde68d6c672c4773f6a | [
"MIT"
] | 94 | 2019-03-27T09:34:28.000Z | 2019-08-27T05:32:33.000Z | tests/batch/test_get_batch.py | Remmeauth/remme-core-cli | 94cc09fe9d2e718b45273dde68d6c672c4773f6a | [
"MIT"
] | 6 | 2019-06-06T15:16:38.000Z | 2020-02-24T12:55:55.000Z | """
Provide tests for command line interface's get batch command.
"""
import json
import pytest
from click.testing import CliRunner
from cli.constants import (
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE = 'ccb529856e538325b435c6a75261702d1bdb52d3873b29189a722330cda628a6' \
'62028a7b39d1f5475cb78f5fc12efb986a35553ce8f1b63580b97fc6ab9e9655'
def test_get_batch():
"""
Case: get a batch by identifier.
Expect: batch is returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert isinstance(json.loads(result.output), dict)
def test_get_batch_with_invalid_id():
"""
Case: get a batch by its invalid identifier.
Expect: the following identifier is invalid error message.
"""
invalid_batch_id = 'abcefg'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
invalid_batch_id,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
expected_error_message = {
'errors': {
'id': [
f'The following identifier `{invalid_batch_id}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
def test_get_batch_without_node_url(mocker):
"""
Case: get a batch by its identifier without passing node URL.
Expect: batch is returned from a node on localhost.
"""
batch_id = '6f200995e766da7218ec2a3d0aeabbe1151128063cdf4e954cd08390a879b28e' \
'085a06f8708d2e6bb34f6501e8ddc981f0353627c1d4f90c80a656a8090c8751'
expected_result = {
"data": {
"header": {
"signer_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f",
"transaction_ids": [
"5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
],
},
"header_signature": "57692f2bcc9be7fe2b59c052d5938eb92bd7be8a36487c1c7efc2c5758bf108e"
"232892987e898071e5ea13b4cbe283e96ac45d8f63cd9065522df7b85b050977",
"transactions": [
{
"header": {
"batcher_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f",
"family_name": "sawtooth_settings",
"family_version": "1.0",
"inputs": [
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c1c0cbf0fbcaf64c0b",
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7",
],
"outputs": [
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c1c0cbf0fbcaf64c0b",
],
"signer_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f",
},
"header_signature": "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
"payload": "CAESgAEKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYyaXplZF9rZXlzEkIwM2Q0MjVkMmQxN2I2NGUzZWY4Zm"
"VlMDI4MDg5YTU2N2ZiYjA1YmQ1NTZmOThjMGI2ZmIJjNMGVhNjJiOGYaEjB4ZDU0NzJhOTY1NWJkYTNmNg==",
},
],
},
}
mock_get_batch_by_id = mocker.patch('cli.batch.service.loop.run_until_complete')
mock_get_batch_by_id.return_value = expected_result
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
batch_id,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_result.get('data') == json.loads(result.output).get('result')
def test_get_batch_with_invalid_node_url():
"""
Case: get a batch by its identifier by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'my-node-url.com'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
invalid_node_url,
])
expected_error_message = {
'errors': f'Please check if your node running at http://{invalid_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
| 34.606742 | 118 | 0.63961 |
f480097de648b87f17c2df8fc143686ff51cd136 | 364 | py | Python | experiments/scripts/preprocess_dataset.py | pbielak/graph-barlow-twins | f8e20134afed4f17ffcecf8f48764df362ffdcad | [
"MIT"
] | 9 | 2021-06-11T13:23:50.000Z | 2022-03-23T19:45:54.000Z | experiments/scripts/preprocess_dataset.py | pbielak/graph-barlow-twins | f8e20134afed4f17ffcecf8f48764df362ffdcad | [
"MIT"
] | 2 | 2021-09-22T13:58:39.000Z | 2021-11-23T02:26:50.000Z | experiments/scripts/preprocess_dataset.py | pbielak/graph-barlow-twins | f8e20134afed4f17ffcecf8f48764df362ffdcad | [
"MIT"
] | 2 | 2021-06-10T06:05:47.000Z | 2021-09-27T15:13:23.000Z | import sys
from gssl.datasets import load_dataset
from gssl.inductive.datasets import load_ppi
from gssl.utils import seed
if __name__ == "__main__":
main()
| 15.826087 | 44 | 0.662088 |
f48259ce6371a22b92ea0a936d7be4886d4013dc | 4,030 | py | Python | agro_site/orders/migrations/0001_initial.py | LukoninDmitryPy/agro_site-2 | eab7694d42104774e5ce6db05a79f11215db6ae3 | [
"MIT"
] | null | null | null | agro_site/orders/migrations/0001_initial.py | LukoninDmitryPy/agro_site-2 | eab7694d42104774e5ce6db05a79f11215db6ae3 | [
"MIT"
] | null | null | null | agro_site/orders/migrations/0001_initial.py | LukoninDmitryPy/agro_site-2 | eab7694d42104774e5ce6db05a79f11215db6ae3 | [
"MIT"
] | 1 | 2022-03-13T11:32:48.000Z | 2022-03-13T11:32:48.000Z | # Generated by Django 2.2.16 on 2022-04-12 13:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import django.utils.timezone
| 52.337662 | 205 | 0.6134 |
f482b268cafb6a5a7b275bd6f15025933187f73e | 881 | py | Python | app/forms.py | FakeYou/flask-microblog | 021b786417a2ae1aaa957661beb25d381a7efdb2 | [
"MIT"
] | null | null | null | app/forms.py | FakeYou/flask-microblog | 021b786417a2ae1aaa957661beb25d381a7efdb2 | [
"MIT"
] | null | null | null | app/forms.py | FakeYou/flask-microblog | 021b786417a2ae1aaa957661beb25d381a7efdb2 | [
"MIT"
] | null | null | null | from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import InputRequired, Email, EqualTo, Length | 48.944444 | 104 | 0.704881 |
f482d9773506167246440d9307b62395f61caa1a | 2,353 | py | Python | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | [
"MIT"
] | null | null | null | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | [
"MIT"
] | null | null | null | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | [
"MIT"
] | null | null | null | from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
| 37.349206 | 800 | 0.592435 |
f4838193c2db95eaa11b6561ddf47a01a31acc59 | 690 | py | Python | pyllusion/movement/movement_circles.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | null | null | null | pyllusion/movement/movement_circles.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | null | null | null | pyllusion/movement/movement_circles.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | null | null | null | import numpy as np
from .movement_matrix import movement_matrix
from ..image import image_circles
def movement_circles(n=50, duration=2, fps=30, width=500, height=500, **kwargs):
"""
>>> import pyllusion as ill
>>>
>>> images = ill.movement_circles(n=50, duration=4, fps=30, color="black", size=0.05)
>>> #ill.images_to_gif(images, path="mygif.gif", fps=30)
"""
n_frames = int(duration * fps)
x, y = movement_matrix(n_frames=n_frames, **kwargs)
# Generate PIL images
images = []
for i in range(n_frames):
images.append(
image_circles(width=width, height=height, n=n, x=x[i], y=y[i], **kwargs)
)
return images
| 27.6 | 89 | 0.631884 |
f484180dc11ca61b16fecb37c23ed96a63de8738 | 6,853 | py | Python | sce.py | hzwfl2/Semantic-consistent-Embedding | d3712cc6f27febbf654e1eb8c43c0b48376a9be1 | [
"MIT"
] | 2 | 2021-12-22T07:39:30.000Z | 2022-01-02T14:45:39.000Z | sce.py | hch-xmu/Semantic-consistent-Embedding | 2e408267095079d70daff6b391209aabb3d9acd3 | [
"MIT"
] | null | null | null | sce.py | hch-xmu/Semantic-consistent-Embedding | 2e408267095079d70daff6b391209aabb3d9acd3 | [
"MIT"
] | 3 | 2021-12-16T12:56:10.000Z | 2022-01-18T02:03:31.000Z | #%%
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC,LinearSVC
from torch import device
from torch.optim import optimizer
from torch.utils.data import DataLoader, Dataset
from read_data import create_data
#%%
#%%
device=torch.device('cuda')
np.random.seed(904)
#%%
#%%
#%%
datapath='data/classData.csv'
modes=['NB'] #'rf'
test_classes={'test_class':[2,3]}
for key,value in test_classes.items():
print('========================================{}:[{}:{}]========================================='.format(modes,key,value))
df = pd.read_csv(datapath)
df['fault_type'] = df['G'].astype('str') + df['C'].astype('str') + df['B'].astype('str') + df['A'].astype('str')
traindata,trainlabel,train_attributelabel, train_attributematrix,testdata,testlabel,test_attributelabel,test_attributematrix,attribute_matrix=create_data(df,value)
_,y_pre,y_true=pre_model(modes[0], traindata, train_attributelabel, testdata, testlabel, test_attributematrix)
original_acc=accuracy_score(y_pre,y_true)
traindata=torch.from_numpy(traindata).float().to(device)
label=torch.from_numpy(trainlabel.squeeze()).long().to(device)
testdata=torch.from_numpy(testdata).float().to(device)
batch_size=400
trainset=my_dataset(traindata,torch.from_numpy(train_attributelabel).float().to(device))
train_loader=DataLoader(trainset,batch_size=batch_size,shuffle=True)
lambda_=[1,1e-5,1,0.25]
dim=[6,12]
model=Embedding_Net(dim,lambda_=lambda_)
model.to(device)
optimizer=optim.RMSprop(model.parameters(),lr=1e-2)
L1,L2,L3,L=[],[],[],[]
model.train()
accs=[]
best_acc=0
for epoch in range(200):
model.train()
for batch,(batch_data,batch_label) in enumerate(train_loader):
optimizer.zero_grad()
package=model(batch_data,batch_label)
loss_R1,loss_R2,loss_CM,loss=package['r1'],package['r2'],package['cm'],package['loss']
loss.backward()
optimizer.step()
L1.append(loss_R1.item())
L2.append(loss_R2.item())
L3.append(loss_CM.item())
L.append(loss.item())
model.eval()
with torch.no_grad():
train_package=model(traindata,torch.from_numpy(train_attributelabel).float().to(device))
f_train=train_package['z1']
f_train=torch.cat([f_train,traindata],dim=1).detach().cpu().numpy()
test_package=model(testdata,torch.from_numpy(test_attributelabel).float().to(device))
f_test=test_package['z1']
f_test=torch.cat([f_test,testdata],dim=1).detach().cpu().numpy()
test_preattribute,label_lis, testlabel=pre_model(modes[0], f_train, train_attributelabel, f_test, testlabel, test_attributematrix)
acc=accuracy_score(label_lis, testlabel)
accs.append(acc)
if acc>best_acc:
best_acc=acc
print('epoch:{:d}, best_acc:{:.4f}'.format(epoch,best_acc))
print('finished! FDAT:{:.4f}, SCE:{:.4f}'.format(original_acc,best_acc))
# %% | 33.758621 | 168 | 0.618707 |
f484cdb74eddcab3519034cf17a9751d9384ce4d | 1,876 | py | Python | graphsage/partition_predict.py | colirain/GraphSAGE | a63145ff18f87cb69340c7b457c34839e9124086 | [
"MIT"
] | null | null | null | graphsage/partition_predict.py | colirain/GraphSAGE | a63145ff18f87cb69340c7b457c34839e9124086 | [
"MIT"
] | null | null | null | graphsage/partition_predict.py | colirain/GraphSAGE | a63145ff18f87cb69340c7b457c34839e9124086 | [
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from graphsage.models import FCPartition
from graphsage.partition_train import construct_placeholders
from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
# DIR = 'trained_models'
# MODEL = 'partition'
# with tf.Session() as sess:
# new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta')
# new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./'))
# new_saver.run()
# print(new_saver)
if __name__ == '__main__':
main() | 30.754098 | 95 | 0.678038 |
f484e0eafc21497bc2d0dc913be6480e2eceab78 | 13,307 | py | Python | scripts/generate_XML_files/DS1/annotatedsen_to_xml.py | AmmarQaseem/CPI-Pipeline-test | 3866883c54d7bd77753ee4b72997949bdcf76359 | [
"PostgreSQL",
"ISC",
"Intel"
] | null | null | null | scripts/generate_XML_files/DS1/annotatedsen_to_xml.py | AmmarQaseem/CPI-Pipeline-test | 3866883c54d7bd77753ee4b72997949bdcf76359 | [
"PostgreSQL",
"ISC",
"Intel"
] | null | null | null | scripts/generate_XML_files/DS1/annotatedsen_to_xml.py | AmmarQaseem/CPI-Pipeline-test | 3866883c54d7bd77753ee4b72997949bdcf76359 | [
"PostgreSQL",
"ISC",
"Intel"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2015, Elham Abbasian <e_abbasian@yahoo.com>, Kersten Doering <kersten.doering@gmail.com>
This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol).
"""
# module to make use of regular expressions
import re
# set the default encoding to utf8 and ignore all decoding/encoding steps.
# (ToDo: check whether the encoding command is needed - debug)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# optparse - Parser for command-line options
from optparse import OptionParser
# import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text
#from xml.sax.saxutils import escape # (ToDo: not needed - debug)
from xml.sax.saxutils import quoteattr
### MAIN PART OF THE SCRIPT ###
if __name__=="__main__":
# configure parsing of command-line arguments
parser= OptionParser()
parser.add_option("-i", "--input", dest="i", help='name of the input file',default="training_dataset_sorted.csv")
parser.add_option("-o", "--output", dest="o", help='name of the output file',default="DS1.xml")
(options,args)=parser.parse_args()
# save parameters in an extra variable
input_file= options.i
output_file = options.o
# open input file with annotated sentences
infile = open(input_file,"r")
# open output file
outfile = open(output_file,"w")
#example for the input format:
#18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction
#example for the output format
"""
<?xml version="1.0" encoding="UTF-8">
<corpus source="DS1">
<document id="DS1.d0" origId="18227838">
<sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/>
<entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/>
<entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/>
<entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/>
<interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" />
<interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" />
</sentence>
[...]
</document>
[...]
</corpus>
"""
# add XML header and define corpus source
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n")
outfile.write("<corpus source=\"DS1\">"+"\n")
# variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not
# the document ID refers to the PubMed ID (origID)
pre_pmid=""
# doc_num counts the number of created documents
doc_num =0
# read lines in CSV file
for line in infile :
# tab-separated format
temp = line.strip().split("\t")
# get PubMed ID, sentences ID, and the sentence itself
# (ToDo: use a split command instead of this regular expression - debug)
curr_pmid = re.match('(\d{8})',temp[0]).group(0)
pmid_sent_num = temp[0]
sentence_text = temp[1]
# find all annotated proteins and compounds by matching their tags
pro_positions= [(a.start(), a.end()) for a in list(re.finditer('<protein-id="(.*?)">(.*?)</protein-id>',sentence_text))]
cmp_positions = [(a.start(), a.end()) for a in list(re.finditer('<compound-id="(.*?)">(.*?)</compound-id>',sentence_text))]
# join the two lists
positions = pro_positions + cmp_positions
positions.sort()
#Initialize the list with the number of identified tags
entity_list =[]
entity_list=[0]*len(positions)
# iterate over all identified positions of the identified tags
for i in range(len(positions)):
# initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset)
entity_list[i]=[0]*4
# store these four elements with grouping in the regular expression
obj = re.match('<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>',sentence_text[positions[i][0]:positions[i][1]])
entity_list[i][0]=obj.group(1) #entity_type
entity_list[i][1]=obj.group(2) #entity_id
entity_list[i][2]=obj.group(3) #entity_text
entity_list[i][2]=entity_list[i][2].replace("[","(").replace("]",")")
# the entity_charoffset will be assign later after having the pure sentence text generated (without any tags)
# the sentence without any tags will be generated by deleting all tags via text concatenation
# initialize (ToDo: initialization like this not needed - debug)
pur_sent_text = sentence_text
# enumerate over the list of positions (index, value)
for i,e in reversed(list(enumerate(positions))):
pur_sent_text = pur_sent_text[0:positions[i][0]]+entity_list[i][2]+pur_sent_text[positions[i][1]:]
# get the character offset of all identified synonyms
# decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc.
# make use of a list of repeated synonyms and synonym positions
repeated_syn_pos =[]
rep_syn =[]
for i in range(len(entity_list)) :
# check whether this is the fist occurrence of the current synonym
if not entity_list[i][2] in rep_syn :
# get the list of positions of all occurences of the current synonym
u_pur_sent_text = pur_sent_text.decode("utf8")
charoffset_value = [(a.start(), a.end()) for a in list(re.finditer(re.escape(entity_list[i][2]),u_pur_sent_text))]
# check whether it occures only once such that the charoffsetone directly be assigned
if len(charoffset_value) == 1 :
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
else:
# if it occures more than one time, the charoffset has to be assigned according to the first pair of positions
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
# append this synonym to the rep_syn list to store all repeated synonyms in this sentence
rep_syn.append(entity_list[i][2])
# delete the fist pair of positions from the list
charoffset_value = charoffset_value[1:]
# add the rest of positions pairs for the current synonym to another list
for j in range(len(charoffset_value)):
repeated_syn_pos.append([entity_list[i][2],charoffset_value[j][0],charoffset_value[j][1]])
else:
# this case refers to at least the second occurrence of the synonym
# for each repeated synonym, assign the first position pair from the repeated_syn_pos list
for k in range(len(repeated_syn_pos)):
if repeated_syn_pos[k][0] == entity_list[i][2]:
break
entity_list[i][3] = str(repeated_syn_pos[k][1])+"-"+str(repeated_syn_pos[k][2])
# get pairs and their interaction status (separated by a double underscore)
listof_int_noint = temp[2:]
interaction_list=[0]*len(listof_int_noint)
for i in range(len(listof_int_noint)):
interaction_list[i]=listof_int_noint[i].split('__')
# interaction/no_interaction corresponds to True/False
TF_int_list=[0]*len(interaction_list)
for intid in range(len(interaction_list)) :
if interaction_list[intid][2]=="interaction" :
TF_int_list[intid]="True"
else :
TF_int_list[intid]="False"
# debug:
# print TF_int_list
# build XML structure
# check whether the PubMed ID changed in comparision to the last parsed sentence
if curr_pmid == pre_pmid :
# if this is the case, only the sentence ID has to be increased
sent_num +=1
# add sentence ID using the current document number
# (doc_num has to be decreased by one, because this index is automatically increased after each sentence)
# all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# build entity tags according to the list identified tags from the CSV file (entity_list)
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# insert types of interaction for each pair of entities
# get the index of the synonym interactions in entity_list
origId = "DS1.d"+str(doc_num-1)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# if the current PubMed ID changed in comparison to the last parsed sentences
else :
if not doc_num == 0 :
outfile.write(" </document>\n")
sent_num =0
# a new document tag has to be opened and the sentences can be added
outfile.write(" <document id=\"DS1.d"+str(doc_num)+"\" origId=\""+str(curr_pmid)+"\">"+"\n")
# replace squared brackets ([,]) with round brackets
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# now have to make entity tags according to entity_list data.
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# build entity tags
origId = "DS1.d"+str(doc_num)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# set new PubMed ID as the last parsed document ID and increase document index
pre_pmid = curr_pmid
doc_num+=1
# close document tag
outfile.write("</document>\n")
# close corpus tag
outfile.write("</corpus>\n")
# close files
infile.close()
outfile.close()
| 58.364035 | 425 | 0.618622 |
f485580fbee3d8993b0b04b4d71777a8883725b7 | 1,182 | py | Python | website/members/urls.py | eamanu/asoc_members | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | [
"MIT"
] | null | null | null | website/members/urls.py | eamanu/asoc_members | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | [
"MIT"
] | null | null | null | website/members/urls.py | eamanu/asoc_members | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from members import views
urlpatterns = [
path('solicitud-alta/', views.signup_initial, name='signup'),
path('solicitud-alta/persona/', views.signup_form_person, name='signup_person'),
path('solicitud-alta/organizacion',
views.signup_form_organization, name='signup_organization'),
path('solicitud-alta/gracias', views.signup_thankyou, name='signup_thankyou'),
path('reportes/', views.reports_main, name='reports_main'),
path('reportes/deudas', views.report_debts, name='report_debts'),
path('reportes/completos', views.report_complete, name='report_complete'),
path('reportes/incompletos', views.report_missing, name='report_missing'),
path('reportes/ingcuotas', views.report_income_quotas, name='report_income_quotas'),
path('reportes/ingdinero', views.report_income_money, name='report_income_money'),
path('reportes/miembros', views.members_list, name="members_list"),
path('reportes/miembros/<pk>/', views.member_detail, name='member_detail'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 47.28 | 88 | 0.755499 |
f485c8b7834281c5e46b0be30ec91fef7f0a76cd | 2,482 | py | Python | Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py | vais-ral/CCPi-ML | ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4 | [
"Apache-2.0"
] | null | null | null | Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py | vais-ral/CCPi-ML | ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4 | [
"Apache-2.0"
] | null | null | null | Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py | vais-ral/CCPi-ML | ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 14:04:03 2018
@author: zyv57124
"""
import scipy.io as sio
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.python.training import gradient_descent
from time import time
#Load data ------------------------------------------------------
def loadMATData(file1):
return sio.loadmat(file1)
#Load Data-------------------------------------------------------
data = loadMATData('ex3data1.mat')
features = data['X']
labels = data['y']
filter = labels ==10
labels[filter] = 0
#shuffle data---------------------------------------------------
ran = np.arange(features.shape[0])
np.random.shuffle(ran)
features = features[ran]
labels = labels[ran]
training_features = features[:3500]
training_labels = labels[:3500]
test_features = features[3501:]
test_labels = labels[3501:]
for i in np.arange(0,500, 10):
#TF Neaural Network Builder--------------------------------------
model = keras.Sequential([
keras.layers.Dense(400, activation=tf.nn.relu),
keras.layers.Dense(25, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
predictions = model.predict(test_features)
cb=TimingCallback()
history = model.fit(training_features, training_labels, batch_size=i+1, epochs=100, verbose=2, callbacks=[cb])
#Store eoch number and loss values in .txt file
loss_data = (history.history['loss'])
f = open("TF_loss_data_batchnum_"+str(i+1)+".txt","w")
for xx in range(1,len(loss_data)+1):
if xx==1:
delta_loss = 'Nan'
else:
delta_loss = (loss_data[xx-2] - loss_data[xx-1])
#Epoch #Loss #Batch size #Time #Change in loss
f.write(str(xx) + "," + str(loss_data[xx-1]) + "," + str(i+1) + "," + str(cb.logs[xx-1]) + "," + str(delta_loss) + "\n" )
f.close() | 17.236111 | 144 | 0.580983 |
f485d7305ea8da6e0bb04315c8cf68b15f093141 | 496 | py | Python | Exercise_8.py | aurimas13/Python-stuff | a6e89e9f6088a6ab29da5b57830e4b7750427454 | [
"MIT"
] | 1 | 2021-06-30T09:31:52.000Z | 2021-06-30T09:31:52.000Z | Exercise_8.py | aurimas13/Python-stuff | a6e89e9f6088a6ab29da5b57830e4b7750427454 | [
"MIT"
] | null | null | null | Exercise_8.py | aurimas13/Python-stuff | a6e89e9f6088a6ab29da5b57830e4b7750427454 | [
"MIT"
] | null | null | null | # Solution of Exercise 8 - Exercise_8.py
#
# Uploaded by Aurimas A. Nausedas on 11/23/20.
# Updated by Aurimas A. Nausedas on 11/06/21.
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
| 27.555556 | 62 | 0.645161 |
f485da5cf70dcae9f004e6210259cc3b9e4d5254 | 402 | py | Python | Easy/two-numbers-sum/solution-1.py | MCFrank16/python-algo | dd48f6c5b9f4a941a18fc4620164c807c0e1d35e | [
"MIT"
] | null | null | null | Easy/two-numbers-sum/solution-1.py | MCFrank16/python-algo | dd48f6c5b9f4a941a18fc4620164c807c0e1d35e | [
"MIT"
] | null | null | null | Easy/two-numbers-sum/solution-1.py | MCFrank16/python-algo | dd48f6c5b9f4a941a18fc4620164c807c0e1d35e | [
"MIT"
] | null | null | null | # solution 1: Brute Force
# time complexity: O(n^2)
# space complexity: O(1)
print(twoNumberSum([3,5,-4,8,11,1,-1,6], 10))
| 23.647059 | 45 | 0.524876 |
f488b9695ea3d93d4ce613f2ebb45a1be83ca949 | 1,631 | py | Python | python/cac_tripplanner/destinations/migrations/0021_event.py | maurizi/cac-tripplanner | 3f4f1f1edc9be9e52c74eb3e124b6697429a79d6 | [
"Apache-2.0"
] | null | null | null | python/cac_tripplanner/destinations/migrations/0021_event.py | maurizi/cac-tripplanner | 3f4f1f1edc9be9e52c74eb3e124b6697429a79d6 | [
"Apache-2.0"
] | null | null | null | python/cac_tripplanner/destinations/migrations/0021_event.py | maurizi/cac-tripplanner | 3f4f1f1edc9be9e52c74eb3e124b6697429a79d6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-28 17:32
from __future__ import unicode_literals
import ckeditor.fields
import destinations.models
from django.db import migrations, models
import django.db.models.deletion
| 42.921053 | 171 | 0.624157 |
f488b98251360b04f0d4a4065b27efc58a8ffeb9 | 8,448 | py | Python | data_extraction/scripts/bnf_adr_extraction.py | elpidakon/CRESCENDDI | ab9e65621d331689f4aaeeb08902f29d90b7d1b9 | [
"MIT"
] | null | null | null | data_extraction/scripts/bnf_adr_extraction.py | elpidakon/CRESCENDDI | ab9e65621d331689f4aaeeb08902f29d90b7d1b9 | [
"MIT"
] | null | null | null | data_extraction/scripts/bnf_adr_extraction.py | elpidakon/CRESCENDDI | ab9e65621d331689f4aaeeb08902f29d90b7d1b9 | [
"MIT"
] | null | null | null | # Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant
# adverse drug-drug interactions (2021)
# Code to extract single-drug side effect data from the BNF website
from bs4 import BeautifulSoup
import urllib
import os, csv
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
URL_BEGINNING = 'https://bnf.nice.org.uk/drug/'
print('beginning scrape for individual drugs...')
# Fetch the HTML containing the full list of APIs.
r = urllib.request.urlopen(URL_BEGINNING).read()
soup1 = BeautifulSoup(r, 'lxml')
# Extract the full URL list.
URL_list = []
for s in soup1.find_all('div', {'class': 'span11'}):
for ai in s(href=True):
temp = URL_BEGINNING + ai['href']
URL_list.append(temp)
print(URL_list)
# Create an empty dataframe for storing the extracted data for APIs.
scraped_API_count = 0
scraped_API = pd.DataFrame(np.nan, index = range(0,160000), columns = ['API', 'AE', 'Frequency'], dtype = str)
row_count = 0
# Empty list to store API mappings to their drug class (if applicable).
API_to_drugclass = []
# Scrape individual drug (API) side effects.
HIGHEST_API_ID = len(URL_list)
for id in tqdm(range(0, HIGHEST_API_ID)):
# Try to fetch the HTML for each API.
try:
l = urllib.request.urlopen(URL_list[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped products.
scraped_API_count += 1
soup2 = BeautifulSoup(l, 'lxml')
API = soup2.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
# In case the API contains a side effect section.
if soup2.find('section', {'id':'sideEffects'}):
ae_list = soup2.find_all('span', {'class': 'sideEffect'})
for a in ae_list:
adv_event = a.getText()
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_API.at[row_count, 'Frequency'] = freq
row_count += 1
# Check if the drug belongs to a specific drug class. If yes, extract
# the drug class name and the link to the corresponding webpage.
if soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*')):
temp = []
temp.append(API)
drug_class = soup2.find('a', href = re.compile(r'.*/drug-class/.*')).span.getText()
temp.append(drug_class)
li = soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*'))['href']
drug_class_link = 'https://bnf.nice.org.uk' + str(li)
temp.append(drug_class_link)
API_to_drugclass.append(temp)
# In case the API does not contain a side effect section.
else:
adv_event = 'NO AEs MENTIONED'
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
scraped_API.at[row_count,'Frequency'] = ''
row_count += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_API_dropna = scraped_API[~scraped_API.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_API_dropna['API'] = scraped_API_dropna['API'].str.strip()
scraped_API_dropna['AE'] = scraped_API_dropna['AE'].str.strip()
scraped_API_dropna['Frequency'] = scraped_API_dropna['Frequency'].str.strip()
print('BNF individual side effects succesfully scraped.')
print('beginning scrape for drug classes...')
# Create a dataframe with drug names, drug classes and related URLs (where applicable).
API_class_df = pd.DataFrame(API_to_drugclass, columns = ['API','Drug_Class','Link'])
# Create a list with all the links for the drug class webpages.
class_links = API_class_df['Link'].unique().tolist()
# Scrape drug class side effects.
HIGHEST_DRUG_CLASS_ID = len(class_links)
scraped_class_count = 0
# Create an empty dataframe for storing the extracted data for drug classes.
scraped_class = pd.DataFrame(np.nan, index = range(0,160000), columns = ['Drug_Class', 'AE', 'Frequency'], dtype = str)
row_count_2 = 0
for id in tqdm(range(0, HIGHEST_DRUG_CLASS_ID)):
# Try to fetch the HTML for each drug class.
try:
l = urllib.request.urlopen(class_links[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped drug classes.
scraped_class_count += 1
soup3 = BeautifulSoup(l, 'lxml')
# Extract the drug class name.
class_name = soup3.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
class_ae_list = soup3.find_all('span', {'class': 'sideEffect'})
for a in class_ae_list:
adv_event = a.getText()
scraped_class.at[row_count_2, 'Drug_Class'] = class_name
scraped_class.at[row_count_2,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_class.at[row_count_2, 'Frequency'] = freq
row_count_2 += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_class_dropna = scraped_class[~scraped_class.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_class_dropna['Drug_Class'] = scraped_class_dropna['Drug_Class'].str.strip()
scraped_class_dropna['AE'] = scraped_class_dropna['AE'].str.strip()
scraped_class_dropna['Frequency'] = scraped_class_dropna['Frequency'].str.strip()
print('BNF drug class side effects succesfully scraped.')
print('combine extracted data...')
## Combine both tables by adding drug class side effects to the individual
## ingredients of each drug class.
# Create a dictionary that contains all drug classes as keys and side effects
# with associated frequencies as values.
AEs_by_class_dict = scraped_class_dropna.groupby('Drug_Class')[['AE', 'Frequency']].apply(lambda g: list(map(tuple, g.values.tolist()))).to_dict()
# Remove URL column
API_class_df.drop(columns = 'Link', inplace = True)
# Create a dataframe with drug class as the index of APIs (if available)
# and add their drug class side effects and associated frequencies.
API_class_df['Drug_Class'] = API_class_df['Drug_Class'].str.strip()
API_class_df.set_index('Drug_Class', inplace = True)
API_class_df['AE_freq_tuple'] = API_class_df.index.to_series().map(AEs_by_class_dict)
API_class_df.reset_index(inplace=True)
# Create a new dataframe to store drug class side effect data for each API.
AEs_from_class_df = API_class_df.explode('AE_freq_tuple').reset_index(drop=True)
AEs_from_class_df[['AE', 'Frequency']] = pd.DataFrame(AEs_from_class_df['AE_freq_tuple'].tolist(), index = AEs_from_class_df.index)
AEs_from_class_df['from_drug_class'] = 'Yes'
AEs_from_class_df.drop(columns = ['AE_freq_tuple','Drug_Class'], inplace = True)
# Fill NAs in Frequency column if no side effects are mentioned.
scraped_API_dropna.loc[scraped_API_dropna.AE == 'NO AEs MENTIONED', 'Frequency'] = 'N/A'
# Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'.
scraped_API_dropna['from_drug_class'] = np.where(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')
# Concatenate the two dataframes to get a final one.
final_df = pd.concat([scraped_API_dropna, AEs_from_class_df])
# Remove any rows that do not contain side effects.
final_df = final_df[final_df.AE != 'NO AEs MENTIONED']
# Convert dataframe to lowercase.
final_df = final_df.apply(lambda x: x.astype(str).str.lower())
# Sort alphabetically.
final_df = final_df.sort_values(by=['API', 'from_drug_class'])
# Remove any duplicates.
final_df.drop_duplicates(subset = ['API', 'AE', 'Frequency'], keep = 'first', inplace = True)
# Rename columns.
final_df.columns = ['Drug_name', 'AE', 'Frequency', 'from_drug_class']
FILE_NAME = 'data_extraction/output/bnf_single_data.csv'
print('saving to file...')
# Save the dataset to a csv file.
final_df.to_csv(FILE_NAME, index=False, encoding = "utf-8")
| 43.546392 | 147 | 0.68608 |
f489d029eb3e215d049f6f2f3cc368f56d30226f | 1,080 | py | Python | core/forms.py | nicoknoll/howimetcorona | c55198118b2c31ee8b76c023b5a9fc4454cc1e08 | [
"Apache-2.0"
] | 1 | 2020-03-21T09:47:17.000Z | 2020-03-21T09:47:17.000Z | core/forms.py | nicoknoll/howimetcorona | c55198118b2c31ee8b76c023b5a9fc4454cc1e08 | [
"Apache-2.0"
] | 5 | 2020-03-20T20:12:16.000Z | 2021-09-22T18:46:48.000Z | core/forms.py | nicoknoll/howimetcorona | c55198118b2c31ee8b76c023b5a9fc4454cc1e08 | [
"Apache-2.0"
] | null | null | null | from django import forms
| 28.421053 | 77 | 0.662963 |
f48be2ac89c37ef219c2ad00751eceeb8e3e514f | 270 | py | Python | bartender/drinks/generators.py | autiwg/bartender | 1c26aefb777a01ce527745c543e60b11a972fe5d | [
"Unlicense",
"MIT"
] | null | null | null | bartender/drinks/generators.py | autiwg/bartender | 1c26aefb777a01ce527745c543e60b11a972fe5d | [
"Unlicense",
"MIT"
] | null | null | null | bartender/drinks/generators.py | autiwg/bartender | 1c26aefb777a01ce527745c543e60b11a972fe5d | [
"Unlicense",
"MIT"
] | null | null | null | from django.utils import timezone
from django.utils.text import slugify
| 30 | 109 | 0.725926 |
f48bfbdf82f8ea69c9578103bcb880d230cfe368 | 718 | py | Python | papers/wdmerger_I/plots/sponge.py | AMReX-Astro/wdmerger | 9f575efacc8d373b6d2961f731e30bf59ee15ffd | [
"MIT"
] | 2 | 2019-01-23T21:12:02.000Z | 2021-12-14T07:34:38.000Z | papers/wdmerger_I/plots/sponge.py | AMReX-Astro/wdmerger | 9f575efacc8d373b6d2961f731e30bf59ee15ffd | [
"MIT"
] | 1 | 2017-08-05T06:25:41.000Z | 2017-08-05T06:25:41.000Z | papers/wdmerger_I/plots/sponge.py | AMReX-Astro/wdmerger | 9f575efacc8d373b6d2961f731e30bf59ee15ffd | [
"MIT"
] | 2 | 2018-12-25T01:05:59.000Z | 2020-12-28T10:01:59.000Z | # This Python program is used to create a plot displaying the sponge
# function we use in the CASTRO hydrodynamics for the wdmerger problem.
import numpy as np
import matplotlib.pyplot as plt
rs = 0.75
rt = 0.85
r = np.linspace(0.0, 1.0, 1000)
f = np.zeros(len(r))
idx = np.where(r < rs)
f[idx] = 0.0
idx = np.where(r < rt)
idx = np.where(r[idx] >= rs)
f[idx] = 0.5 * (1.0 - np.cos(np.pi * (r[idx] - rs) / (rt - rs)))
idx = np.where(r >= rt)
f[idx] = 1.0
plt.plot(r, 1.0 - f, linewidth=4.0)
plt.xlabel('Radius', fontsize=20)
plt.ylabel(r'$1 - f_S$', fontsize=20)
plt.xlim([0.0, 1.0])
plt.ylim([-0.05, 1.05])
plt.tick_params(labelsize=16)
plt.tight_layout()
plt.savefig('sponge.eps')
| 18.894737 | 71 | 0.635097 |
f48c4c17d15169f83e1e0f82eed8e69642feb9a8 | 753 | py | Python | Python/110-1/Midterm Additional HW/005.py | JenFuChen/NKUST | bd80a449eddfdaf75709379d2e904ff70d409666 | [
"MIT"
] | 3 | 2021-11-07T17:33:54.000Z | 2021-12-28T08:31:20.000Z | Python/110-1/Midterm Additional HW/005.py | JenFuChen/NKUST | bd80a449eddfdaf75709379d2e904ff70d409666 | [
"MIT"
] | null | null | null | Python/110-1/Midterm Additional HW/005.py | JenFuChen/NKUST | bd80a449eddfdaf75709379d2e904ff70d409666 | [
"MIT"
] | null | null | null | # 005
while(1):
level = int(input())
if(level <= 0):
break
L = 2*level-1
mid = int((L - 1) / 2)
inspa = mid * 2 - 1
for i in range(L):
spa = level - i - 1
if spa >= 0:
print(" " * spa, end='')
print('*', end='')
if spa < 0:
spa = -spa
print(" " * spa, end='')
print('*', end='')
if(i > 0 and i <= mid):
for j in range(i*2-1):
print(" ", end='')
print('*', end='')
if(i > 0 and i > mid and i != L-1):
inspa = inspa - 2
for j in range(inspa):
print(" ", end='')
print('*', end='')
print()
| 25.965517 | 44 | 0.332005 |
f48c7224abe2e2f0a451d9341ea395ac8a419de0 | 1,978 | py | Python | dynamo/plot/pseudotime.py | davisidarta/dynamo-release | 0dbd769f52ea07f3cdaa8fb31022ceb89938c382 | [
"BSD-3-Clause"
] | null | null | null | dynamo/plot/pseudotime.py | davisidarta/dynamo-release | 0dbd769f52ea07f3cdaa8fb31022ceb89938c382 | [
"BSD-3-Clause"
] | null | null | null | dynamo/plot/pseudotime.py | davisidarta/dynamo-release | 0dbd769f52ea07f3cdaa8fb31022ceb89938c382 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ..tools.utils import update_dict
from .utils import save_fig
| 28.257143 | 86 | 0.532356 |
f48d18e383286d35c87dd89bd5701bc78cbbbad7 | 4,327 | py | Python | ocean_lib/web3_internal/utils.py | joshualyguessennd/ocean.py | 23274698df4aae078d53b12d768c721af16f6e80 | [
"Apache-2.0"
] | null | null | null | ocean_lib/web3_internal/utils.py | joshualyguessennd/ocean.py | 23274698df4aae078d53b12d768c721af16f6e80 | [
"Apache-2.0"
] | 1 | 2021-02-16T18:31:53.000Z | 2021-02-16T18:31:53.000Z | ocean_lib/web3_internal/utils.py | joshualyguessennd/ocean.py | 23274698df4aae078d53b12d768c721af16f6e80 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
from collections import namedtuple
import eth_account
import eth_keys
import eth_utils
from eth_keys import KeyAPI
from eth_utils import big_endian_to_int
from ocean_lib.web3_internal.web3_provider import Web3Provider
from web3 import Web3
Signature = namedtuple("Signature", ("v", "r", "s"))
logger = logging.getLogger(__name__)
def generate_multi_value_hash(types, values):
"""
Return the hash of the given list of values.
This is equivalent to packing and hashing values in a solidity smart contract
hence the use of `soliditySha3`.
:param types: list of solidity types expressed as strings
:param values: list of values matching the `types` list
:return: bytes
"""
assert len(types) == len(values)
return Web3.soliditySha3(types, values)
def prepare_prefixed_hash(msg_hash):
"""
:param msg_hash:
:return:
"""
return generate_multi_value_hash(
["string", "bytes32"], ["\x19Ethereum Signed Message:\n32", msg_hash]
)
def add_ethereum_prefix_and_hash_msg(text):
"""
This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover.
:param text: str any str to be signed / used in recovering address from a signature
:return: hash of prefixed text according to the recommended ethereum prefix
"""
prefixed_msg = f"\x19Ethereum Signed Message:\n{len(text)}{text}"
return Web3.sha3(text=prefixed_msg)
def get_public_key_from_address(web3, account):
"""
:param web3:
:param account:
:return:
"""
_hash = web3.sha3(text="verify signature.")
signature = web3.personal.sign(_hash, account.address, account.password)
signature = split_signature(web3, web3.toBytes(hexstr=signature))
signature_vrs = Signature(
signature.v % 27, big_endian_to_int(signature.r), big_endian_to_int(signature.s)
)
prefixed_hash = prepare_prefixed_hash(_hash)
pub_key = KeyAPI.PublicKey.recover_from_msg_hash(
prefixed_hash, KeyAPI.Signature(vrs=signature_vrs)
)
assert (
pub_key.to_checksum_address() == account.address
), "recovered address does not match signing address."
return pub_key
def to_32byte_hex(web3, val):
"""
:param web3:
:param val:
:return:
"""
return web3.toBytes(val).rjust(32, b"\0")
def split_signature(web3, signature):
"""
:param web3:
:param signature: signed message hash, hex str
:return:
"""
assert len(signature) == 65, (
f"invalid signature, " f"expecting bytes of length 65, got {len(signature)}"
)
v = web3.toInt(signature[-1])
r = to_32byte_hex(web3, int.from_bytes(signature[:32], "big"))
s = to_32byte_hex(web3, int.from_bytes(signature[32:64], "big"))
if v != 27 and v != 28:
v = 27 + v % 2
return Signature(v, r, s)
| 29.040268 | 95 | 0.697712 |
f48e4de60f001ef56a4fbd661495b8d069dc740f | 192 | py | Python | autofront/__init__.py | JimmyLamothe/autofront | d179e54411f5d53046a5fa52b4430e09b01ebaca | [
"BSD-3-Clause"
] | 1 | 2020-11-16T22:18:03.000Z | 2020-11-16T22:18:03.000Z | autofront/__init__.py | JimmyLamothe/autofront | d179e54411f5d53046a5fa52b4430e09b01ebaca | [
"BSD-3-Clause"
] | null | null | null | autofront/__init__.py | JimmyLamothe/autofront | d179e54411f5d53046a5fa52b4430e09b01ebaca | [
"BSD-3-Clause"
] | null | null | null | import autofront.autofront as autofront
import autofront.utilities as utilities
initialize = autofront.initialize
add = autofront.add
run = autofront.run
get_display = utilities.get_display
| 21.333333 | 39 | 0.833333 |
f48e86cd3da483fb8b0fe253866faf1ceee934c8 | 8,444 | py | Python | src/main.py | ketsonroberto/PBDO | cdc1c5275bc17753be5c06a216f92391b6f1f1ab | [
"MIT"
] | null | null | null | src/main.py | ketsonroberto/PBDO | cdc1c5275bc17753be5c06a216f92391b6f1f1ab | [
"MIT"
] | null | null | null | src/main.py | ketsonroberto/PBDO | cdc1c5275bc17753be5c06a216f92391b6f1f1ab | [
"MIT"
] | null | null | null | # THIS IS A FILE TO TEST THE CODE. DO NOT USE IT AS PART OF THE CODE.
import matplotlib.pyplot as plt
import numpy as np
from StochasticMechanics import Stochastic
from scipy.optimize import minimize
from Performance import PerformanceOpt
from Hazards import Stationary
from Building import *
from BuildingProperties import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import optimize
freq = np.linspace(0.00001, 20, 500)
gamma = np.ones((ndof)) * [0.5]
nu = np.ones((ndof)) * [0.5]
alpha = np.ones((ndof)) * [1]
m = np.ones((ndof)) * [1]
c = np.ones((ndof)) * [1]
k = np.ones((ndof)) * [200]
a = np.ones((ndof)) * [0.8] #0.01
ksi = np.ones((ndof)) * [0.05]
# ksi = [0.05, 0.05]
im_max = 30
B_max = 1
# S1 = np.ones(ndof)
# Ps = Stationary(power_spectrum_object='white_noise', ndof=ndof)
# power_spectrum = Ps.power_spectrum_excitation(freq=freq, S0=S1)
# Von Karman
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, U = Ps.power_spectrum_excitation(u10=6.2371, freq=freq, z=z)
# plt.semilogy(freq/(2*np.pi), power_spectrum[:,0])
# plt.show()
# columns["area"] = 0.001
# columns.update({"area": 0.001})
ks = []
ms = []
msf = []
#cost = []
nlc = 100
lc = np.linspace(0.05, 2, nlc)
# fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
# fig.suptitle('Mass and Stiffness')
# ax1.plot(lc,ms)
# ax1.plot(lc,msf)
# ax2.plot(lc,ks)
# ax3.plot(ks,cost)
# plt.show()
columns = update_columns(columns=columns, lx=0.4, ly=0.4)
Building = Structure(building, columns, slabs, core, concrete, steel)
k_story = Building.stiffness_story()
m_story = Building.mass_storey(top_story=False)
m_story_f = Building.mass_storey(top_story=True)
k = np.ones(ndof) * [k_story]
m = np.ones(ndof) * [m_story]
m[-1] = m_story_f
length = 0.3
size_col = np.ones(ndof) * [length]
Sto = Stochastic(power_spectrum=power_spectrum, model='bouc_wen', ndof=ndof, freq=freq)
#Opt = PerformanceOpt(power_spectrum=power_spectrum, model='bouc_wen', freq=freq, tol=1e-5, maxiter=100,
# design_life=1) # design_life = 50
# total_cost = Opt.objective_function(size_col=size_col, ksi=ksi, im_max=im_max, B_max=B_max, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
#CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
# steel=steel, cost=cost)
#size_col = np.ones(ndof) * [0.5]
#size_col = np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
#size_col = np.array([0.1, 0.2, 0.3])
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
sizea = 0.1
sizeb = 1
wa = 0.1
wb=100
npar = 10
nw = 10
X = np.zeros((npar * nw, 3 * ndof + 1))
y = np.zeros((npar * nw, 2 * ndof))
ct=0
ct1=0
for kk in range(npar):
size_col = sizea+(sizeb-sizea)*np.random.rand(ndof)
M, C, K, m, c, k = Sto.get_MCK(size_col=size_col, args=args, columns=columns)
for i in range(nw):
im = wa + (wb - wa) * np.random.rand(1)[0]
idd = 0
for j in np.arange(0, 3 * ndof, 3):
X[ct, j] = m[idd]
X[ct, j + 1] = c[idd]
X[ct, j + 2] = k[idd]
idd = idd + 1
X[ct, -1] = im
ct = ct + 1
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=freq, z=z)
Var, Vard = Sto.statistical_linearization(M=M, C=C, K=K, power_sp=power_spectrum, tol=0.01, maxiter=100,
gamma=gamma, nu=nu, alpha=alpha, a=a)
idd = 0
for j in np.arange(0, 2 * ndof, 2):
y[ct1, j] = Var[idd][0]
y[ct1, j + 1] = Vard[idd][0]
idd = idd + 1
ct1 = ct1 + 1
print(np.shape(y))
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels_U = [None,
ConstantKernel(1.0, (1e-4, 1e4)) * RBF(1, (1e-4, 1e4)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=1,
length_scale_bounds=(1.0e-5, 100.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, nu=1.5)]
gp = GaussianProcessRegressor(kernel=kernels_U[0], n_restarts_optimizer=10, normalize_y=False)
gp.fit(X, y)
r2 = gp.score(X, y)
print(r2)
yp = gp.predict(np.array(X[2].reshape(1, -1)))
val = X[2]
val[-1]=100.0
print(val)
yp = gp.predict(val.reshape(1, -1))
print(yp)
#print(np.shape(X))
#print(np.shape(y))
#nn_architecture = [
# {"input_dim": 10, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 6, "activation": "relu"},
#]
#from neural import NeuralNets
#from sklearn.model_selection import train_test_split
#NN = NeuralNets(nn_architecture)
#TEST_SIZE = 0.1
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=132)
##print(X_train)
#params_values, cost_history = NN.train(X=np.transpose(X_train), Y=np.transpose(y_train), epochs=1000,
# learning_rate=1, verbose=True)
"""
b0 = np.linspace(0.1, 0.5, 20)
cost_f = []
cost_i = []
cost_t = []
mm = []
pp = []
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
for i in range(len(b0)):
Cf = CostFailure.cost_damage(b=b0[i], col_size=size_col[0], L=columns["height"], ncolumns=columns["quantity"],
dry_wall_area=dry_wall_area)
Ci = CostFailure.initial_cost_stiffness(col_size=b0[i], par0=25.55133, par1=0.33127)
scol = np.array([b0[i], b0[i]])
Ct = Opt.objective_function(size_col=scol, args=args)
#mom, phi = Building.compression(col_size=b0[i], L=columns["height"])
cost_f.append(Cf)
cost_i.append(Ci)
cost_t.append(Ct)
fig = plt.figure()
plt.plot(b0, cost_t,'-o')
plt.show()
#fig = plt.figure()
#plt.plot(phi, mom,'-o')
#plt.show()
"""
"""
b0 = np.linspace(0.05,0.5,5)
b1 = np.linspace(0.05,0.5,5)
B0, B1 = np.meshgrid(b0, b1)
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
tc = np.zeros((5, 5))
for i in range(len(b0)):
print(i)
for j in range(len(b1)):
size_col = np.array([b0[i], b1[j]])
resp = Opt.objective_function(size_col=size_col, args=args)
tc[i,j] = resp
Z = tc.reshape(B0.shape)
Z = np.array(Z)
nd = np.unravel_index(np.argmin(Z, axis=None), Z.shape)
print([B0[nd], B1[nd]])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(B0, B1, np.log(Z), cmap=plt.cm.get_cmap('plasma'),linewidth=0, antialiased=False)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
"""
#size_col = np.ones(ndof) * [0.2]
#args=[ksi, im_max, B_max, gamma, nu, alpha, a]
##args = {"ksi": ksi, "im_max": im_max, "B_max": B_max, "gamma": gamma, "nu": nu, "alpha": alpha, "a": a}
#bnds = []
#for i in range(ndof):
# bnds.append((0.1, 1))
#bnds=tuple(bnds)
###from scipy import optimize
###res = optimize.fmin(Opt.objective_function, x0=size_col)
#res = minimize(Opt.objective_function, x0=size_col, args=args, bounds=bnds)
###from scipy.optimize import basinhopping
###minimizer_kwargs = {"method": "BFGS", "args": args}
###ret = basinhopping(Opt.objective_function, x0=size_col, minimizer_kwargs=minimizer_kwargs, niter=200)
#print(res)
### Global methods.
###from scipy.optimize import rosen, shgo
###from scipy.optimize import dual_annealing
###ret = dual_annealing(Opt.objective_function, bounds=bnds)
###print((ret.x, ret.fun))
#c = Opt.linear_damping(m=m, k=k, ksi=ksi)
#M, C, K = Opt.create_mck(m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a)
#financial_loss_rate = Opt.stochastic_financial_loss(M=M, C=C, K=K, stiff=k, im_max=im_max,
# B_max=B_max, size_col=size_col, Nim=1, NB=1, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
| 30.157143 | 114 | 0.620441 |
f48e9c0665ea9a8d85811305b04f10d8aba4b991 | 777 | py | Python | categorical_embedder/embedders/core/aux/custom_object_handler.py | erelcan/categorical-embedder | 376b8779500af2aa459c879f8e525f2ef25d6b31 | [
"Apache-2.0"
] | 3 | 2020-12-19T10:52:58.000Z | 2021-06-08T09:06:44.000Z | categorical_embedder/embedders/core/aux/custom_object_handler.py | erelcan/categorical-embedder | 376b8779500af2aa459c879f8e525f2ef25d6b31 | [
"Apache-2.0"
] | null | null | null | categorical_embedder/embedders/core/aux/custom_object_handler.py | erelcan/categorical-embedder | 376b8779500af2aa459c879f8e525f2ef25d6b31 | [
"Apache-2.0"
] | null | null | null | from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class
from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function
| 35.318182 | 88 | 0.804376 |
f48f23b7a5506d60b9ac1a5607df61a337660101 | 10,406 | py | Python | osprofiler/cmd/shell.py | charliebr30/osprofiler | cffca4e29e373e3f09f2ffdd458761183a851569 | [
"Apache-2.0"
] | null | null | null | osprofiler/cmd/shell.py | charliebr30/osprofiler | cffca4e29e373e3f09f2ffdd458761183a851569 | [
"Apache-2.0"
] | 1 | 2017-04-15T22:16:06.000Z | 2017-04-15T22:16:06.000Z | osprofiler/cmd/shell.py | shwsun/osprofiler | 46d29fc5ab8a4068217e399883f39cdd443a7500 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Profiler.
"""
import argparse
import inspect
import sys
from oslo_config import cfg
import osprofiler
from osprofiler.cmd import cliutils
from osprofiler.cmd import commands
from osprofiler import exc
from osprofiler import opts
if __name__ == "__main__":
main()
| 42.129555 | 79 | 0.548818 |
f48f3252e9a2f94d57cf6c129396083ea3b2d577 | 3,695 | py | Python | bmt/util.py | patrickkwang/bmt-lite | bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b | [
"MIT"
] | null | null | null | bmt/util.py | patrickkwang/bmt-lite | bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b | [
"MIT"
] | null | null | null | bmt/util.py | patrickkwang/bmt-lite | bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b | [
"MIT"
] | null | null | null | """Utilities."""
from functools import wraps
import re
from typing import Callable, List, Optional, TypeVar, Union
from .data import (
all_classes, all_slots,
)
def pascal_to_snake(s: str, sep: str = "_") -> str:
"""Convert Pascal case to snake case.
Assumes that
a) all words are either all-lowercase or all-uppercase
b) all 1-letter words are lowercase
c) there are no adjacent 1-letter words
d) there are no adjacent uppercase words
Examples:
PhenotypicFeature -> phenotypic_feature
RNAProduct -> RNA_product
FeedACamel -> feed_a_camel
Optionally specify `sep` (default "_").
"""
# add an underscore before each capital letter
underscored = re.sub(
r"(?<!^)(?=[A-Z])",
sep,
s,
)
# collapse any adjacent one-letter words
collapsed = re.sub(
r"(?<![a-zA-Z])[A-Z](?:_[A-Z](?=$|_))+",
lambda match: match.group(0).replace("_", ""),
underscored,
)
# lower-case any words containing only one uppercase letter
lowercased = re.sub(
r"(?<![A-Z])[A-Z](?![A-Z])",
lambda match: match.group(0).lower(),
collapsed,
)
return lowercased
def snake_to_pascal(s: str, sep: str = "_") -> str:
"""Convert snake case to Pascal case.
This is the inverse of pascal_to_snake() when its assumptions
are true.
Optionally specify `sep` (default "_").
"""
return re.sub(
fr"(?:^|{sep})([a-zA-Z])",
lambda match: match.group(1).upper(),
s
)
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake"
def normalize(s: str) -> str:
"""Normalize string input."""
if s.startswith("biolink:"):
s = s[8:]
if "_" in s:
# it's snake case
return s.replace("_", " ")
if " " in s:
return s
return pascal_to_snake(s, " ")
T = TypeVar("T")
def listify(func: Callable) -> Callable:
"""Expand function to take list of arguments."""
return wrapper
def with_formatting():
"""Add format conversions to method."""
def decorator(func: Callable) -> Callable:
"""Generate decorator."""
return wrapper
return decorator
| 27.781955 | 83 | 0.558863 |
f48f57744939caba5685c9a4b651a9c481a371aa | 657 | py | Python | src/py_to_json/__init__.py | jlevitt/py-to-json | 26bb68926f5ada601e965f42980e438c9718be73 | [
"MIT"
] | null | null | null | src/py_to_json/__init__.py | jlevitt/py-to-json | 26bb68926f5ada601e965f42980e438c9718be73 | [
"MIT"
] | null | null | null | src/py_to_json/__init__.py | jlevitt/py-to-json | 26bb68926f5ada601e965f42980e438c9718be73 | [
"MIT"
] | null | null | null | #
# OMNIVORE CONFIDENTIAL
# __________________
#
# [2013] - [2019] Omnivore Technologies
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Omnivore Technologies and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Omnivore Technologies
# and its suppliers and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Omnivore Technologies.
#
| 36.5 | 73 | 0.78691 |
be2e1617c4a15afe6886703b261c4b500fdae5e3 | 7,960 | py | Python | sktime/utils/time_series.py | brettkoonce/sktime | 6336247bad0dac8692aa4b911c267f401dea4163 | [
"BSD-3-Clause"
] | 1 | 2020-09-11T06:26:08.000Z | 2020-09-11T06:26:08.000Z | sktime/utils/time_series.py | brettkoonce/sktime | 6336247bad0dac8692aa4b911c267f401dea4163 | [
"BSD-3-Clause"
] | 2 | 2020-04-20T12:26:42.000Z | 2020-04-22T17:09:14.000Z | sktime/utils/time_series.py | brettkoonce/sktime | 6336247bad0dac8692aa4b911c267f401dea4163 | [
"BSD-3-Clause"
] | 1 | 2022-02-14T18:19:01.000Z | 2022-02-14T18:19:01.000Z | __author__ = ["Markus Lning"]
__all__ = [
"compute_relative_to_n_timepoints",
"time_series_slope",
"fit_trend",
"remove_trend",
"add_trend"
]
import numpy as np
from sklearn.utils import check_array
from sktime.utils.validation.forecasting import check_time_index
def compute_relative_to_n_timepoints(n_timepoints, n="sqrt"):
"""
Get number of intervals from number of time points for various allowed
input arguments.
Helpful to compute number of intervals relative to time series length,
e.g. using floats or functions.
Parameters
----------
n_timepoints : int
n : {int, float, str, callable}
Returns
-------
n_intervals_ : int
Computed number of intervals
"""
# check input: n_timepoints
if not np.issubdtype(type(n_timepoints), np.dtype(int).type):
raise ValueError(
f"`n_timepoints` must be an integer, but found: "
f"{type(n_timepoints)}")
if not n_timepoints >= 1:
raise ValueError(
f"`n_timepoints` must be >= 1, but found: {n_timepoints}")
# compute number of splits
allowed_strings = ["sqrt", "log"]
# integer
if np.issubdtype(type(n), np.dtype(int).type):
if not n <= n_timepoints:
raise ValueError(
f"If `n_intervals` is an integer, it must be smaller "
f"than `n_timepoints`, but found: `n_intervals`={n} "
f"and `n_timepoints`={n_timepoints}")
if n < 1:
raise ValueError(f"If `n_intervals` is an integer, "
f"`n_intervals` must be >= 1, but found: {n}")
n_intervals_ = n
# function
elif callable(n):
n_intervals_ = n(n_timepoints)
# string
elif isinstance(n, str):
if n not in allowed_strings:
raise ValueError(
f"If `n_intervals` is a string, `n_intervals` must be "
f"in {allowed_strings}, but found: {n}")
str_func_map = {
"sqrt": np.sqrt,
"log": np.log
}
func = str_func_map[n]
n_intervals_ = func(n_timepoints)
# float
elif isinstance(n, float):
if not (0 < n <= 1):
raise ValueError(
f"If `n_intervals` is a float, `n_intervals` must be > 0 "
f"and <= 1, but found: {n}")
n_intervals_ = n * n_timepoints
else:
raise ValueError(
f"`n_intervals` must be either one of the allowed string options "
f"in "
f"{allowed_strings}, an integer or a float number.")
# make sure n_intervals is an integer and there is at least one interval
n_intervals_ = np.maximum(1, np.int(n_intervals_))
return n_intervals_
def time_series_slope(y):
"""
Compute slope of time series (y) using ordinary least squares.
Parameters
----------
y : array_like
Time-series.
axis : int
Axis along which the time-series slope is computed.
Returns
-------
slope : float
Slope of time-series.
"""
y = np.asarray(y).ravel()
len_series = len(y)
if len_series < 2:
return 0
else:
x = np.arange(len_series) # time index
x_mean = (len_series - 1) / 2 # faster than x.mean()
return (np.mean(x * y) - x_mean * np.mean(y)) / (
np.mean(x ** 2) - x_mean ** 2)
def fit_trend(x, order=0):
"""Fit linear regression with polynomial terms of given order
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is fitted separately
order : int
The polynomial order of the trend, zero is constant (mean), one is
linear trend, two is quadratic trend, and so on.
Returns
-------
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
See Also
-------
add_trend
remove_trend
"""
x = check_array(x)
if order == 0:
coefs = np.mean(x, axis=1).reshape(-1, 1)
else:
n_obs = x.shape[1]
index = np.arange(n_obs)
poly_terms = np.vander(index, N=order + 1)
# linear least squares fitting using numpy's optimised routine,
# assuming samples in columns
# coefs = np.linalg.pinv(poly_terms).dot(x.T).T
coefs, _, _, _ = np.linalg.lstsq(poly_terms, x.T, rcond=None)
# returning fitted coefficients in expected format with samples in rows
coefs = coefs.T
return coefs
def remove_trend(x, coefs, time_index=None):
"""Remove trend from an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is de-trended separately
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients for each sample, single column means order zero,
two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The de-trended series is the residual of the linear regression of the
data on the trend of given order.
See Also
--------
fit_trend
add_trend
References
----------
Adapted from statsmodels (0.9.0), see
https://www.statsmodels.org/dev/_modules/statsmodels/tsa/tsatools.html
#detrend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, remove mean
if order == 0:
xt = x - coefs
return xt
else:
if time_index is None:
# if no time index is given, create range index
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x - np.dot(poly_terms, coefs.T).T
return xt
def add_trend(x, coefs, time_index=None):
"""Add trend to array for given fitted coefficients along axis 0 or 1,
inverse function to `remove_trend()`
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is treated separately
coefs : array-like, shape=[n_samples, order + 1]
fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The series with added trend.
See Also
-------
fit_trend
remove_trend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, add mean
if order == 0:
xt = x + coefs
else:
if time_index is None:
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x + np.dot(poly_terms, coefs.T).T
return xt
| 29.157509 | 79 | 0.593593 |
be2e7ef040dc5a54cf6259bfaf5348f1c97d85ac | 2,061 | py | Python | prog_vae/prog_encoder/prog_encoder.py | Hanjun-Dai/sdvae | bd26ea949c496419634fd2cf4802fc8e19a9194c | [
"MIT"
] | 70 | 2018-02-24T07:50:59.000Z | 2021-12-27T02:42:37.000Z | prog_vae/prog_encoder/prog_encoder.py | Hanjun-Dai/sdvae | bd26ea949c496419634fd2cf4802fc8e19a9194c | [
"MIT"
] | 7 | 2018-05-31T00:50:19.000Z | 2021-09-28T11:58:22.000Z | prog_vae/prog_encoder/prog_encoder.py | Hanjun-Dai/sdvae | bd26ea949c496419634fd2cf4802fc8e19a9194c | [
"MIT"
] | 19 | 2019-01-11T10:56:00.000Z | 2022-03-23T23:09:39.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import csv
import numpy as np
import math
import random
from collections import defaultdict
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append( '%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)) )
from prog_util import DECISION_DIM
from cmd_args import cmd_args
from pytorch_initializer import weights_init
sys.path.append( '%s/../cfg_parser' % os.path.dirname(os.path.realpath(__file__)) )
import cfg_parser as parser
if __name__ == '__main__':
pass
| 30.308824 | 91 | 0.6623 |
be30c6f12931ff680481e45af1a532c7eab58cb2 | 1,089 | py | Python | pyvmu/messages.py | JosephRedfern/VarienseVMU | e27c05a83124e024cd049b10f7d682f7f41a5c73 | [
"MIT"
] | 5 | 2017-10-23T13:13:09.000Z | 2018-05-07T14:38:47.000Z | pyvmu/messages.py | JosephRedfern/VarienseVMU | e27c05a83124e024cd049b10f7d682f7f41a5c73 | [
"MIT"
] | 2 | 2018-04-18T08:15:52.000Z | 2018-05-17T11:32:47.000Z | pyvmu/messages.py | JosephRedfern/VarienseVMU | e27c05a83124e024cd049b10f7d682f7f41a5c73 | [
"MIT"
] | 3 | 2017-09-06T18:05:21.000Z | 2018-11-21T13:08:16.000Z | from collections import namedtuple
Accelerometer = namedtuple('Accelerometer', ["timestamp", "x", "y", "z"])
Magnetometer = namedtuple('Magnetometer', ['timestamp', 'x', 'y', 'z'])
Gyroscope = namedtuple('Gyroscope', ['timestamp', 'x', 'y', 'z'])
Euler = namedtuple('Euler', ['timestamp', 'x', 'y', 'z'])
Quaternion = namedtuple('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])
Heading = namedtuple('Heading', ['timestamp', 'h'])
Status = namedtuple('Status', ['magnetometer_enabled',
'gyroscope_enabled',
'accelerometer_enabled',
'gyroscope_resolution',
'accelerometer_resolution',
'low_output_rate',
'heading_streaming',
'euler_streaming',
'magnetometer_streaming',
'quaternions_streaming',
'gyroscope_streaming',
'accelerometer_streaming'])
| 45.375 | 73 | 0.486685 |
be313f1e475a00f009ff53d9286703681a5859de | 2,847 | py | Python | scripts/Caesar-Cipher/CaesarCipher.py | Pythobit/python-projects | 1a6ee3f0f417846626dfa021af49c999771a0199 | [
"MIT"
] | 2 | 2021-10-19T06:17:33.000Z | 2021-10-19T06:17:37.000Z | scripts/Caesar-Cipher/CaesarCipher.py | Pythobit/Python-Projects | 1a6ee3f0f417846626dfa021af49c999771a0199 | [
"MIT"
] | 4 | 2021-10-19T06:04:36.000Z | 2021-10-19T11:42:57.000Z | scripts/Caesar-Cipher/CaesarCipher.py | Pythobit/Python-Projects | 1a6ee3f0f417846626dfa021af49c999771a0199 | [
"MIT"
] | 1 | 2021-10-19T06:55:26.000Z | 2021-10-19T06:55:26.000Z | from __future__ import print_function
import os
import string
import argparse
try:
maketrans = string.maketrans # python2
except AttributeError:
maketrans = str.maketrans # python3
def caeser_cipher(string_: str, offset: int, decode: bool, file_: string) -> None:
"""Caeser Cipher implementation, reads file or string. Also decodes.
Default implementation is ROT13 encoding.
To decode, specify the same offset you used to encode and your ciphertext / file.
:param string_: string to encode / decode
:param offset: # of chars to rotate by
:param decode: decode instead of encode
:param file_: file to read in then encode/decode
"""
if file_ and os.path.exists(file_):
with open(file_, "r") as f:
string_ = f.read()
if decode:
offset *= -1
lower_offset_alphabet = (
string.ascii_lowercase[offset:] + string.ascii_lowercase[:offset]
)
lower_translation_table = maketrans(string.ascii_lowercase, lower_offset_alphabet)
upper_offset_alphabet = (
string.ascii_uppercase[offset:] + string.ascii_uppercase[:offset]
)
upper_translation_table = maketrans(string.ascii_uppercase, upper_offset_alphabet)
lower_converted = string_.translate(lower_translation_table)
final_converted = lower_converted.translate(upper_translation_table)
if file_:
extension = "dec" if decode else "enc"
with open("{}.{}".format(file_, extension), "w") as f:
print(final_converted, file=f)
else:
print(final_converted)
def check_offset_range(value: int) -> int:
"""Validates that value is in the allowable range.
:param value: integer to validate
:return: valid integer
:raises: argparse.ArgumentTypeError
"""
value = int(value)
if value < -25 or value > 25:
raise argparse.ArgumentTypeError("{} is an invalid offset".format(value))
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Simple Caeser Cipher Encoder and Decoder"
)
parser.add_argument(
"-d",
"--decode",
action="store_true",
dest="decode",
help="decode ciphertext (offset should equal what was used to encode)",
default=False,
)
parser.add_argument(
"-o",
"--offset",
dest="offset",
default=13,
type=check_offset_range,
help="number of characters to shift",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--file", dest="file", help="file to encode", default=None)
group.add_argument(
"-s", "--string", dest="string", help="string to encode", default=None
)
args = parser.parse_args()
caeser_cipher(args.string, args.offset, args.decode, args.file)
| 30.945652 | 88 | 0.663505 |
be31bc2fba335d1b861c92be573990bfd80133fd | 8,217 | py | Python | onadata/libs/permissions.py | BuildAMovement/whistler-kobocat | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | [
"BSD-2-Clause"
] | 38 | 2017-02-28T05:39:40.000Z | 2019-01-16T04:39:04.000Z | onadata/libs/permissions.py | BuildAMovement/whistler-kobocat | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | [
"BSD-2-Clause"
] | 20 | 2017-04-27T09:14:27.000Z | 2019-01-17T06:35:52.000Z | onadata/libs/permissions.py | BuildAMovement/whistler-kobocat | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | [
"BSD-2-Clause"
] | 5 | 2017-02-22T12:25:19.000Z | 2019-01-15T11:16:40.000Z | from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from guardian.shortcuts import (
assign_perm,
remove_perm,
get_perms,
get_users_with_perms)
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.main.models.user_profile import UserProfile
from onadata.apps.logger.models import XForm
from onadata.apps.api.models import Project
# Userprofile Permissions
CAN_ADD_USERPROFILE = 'add_userprofile'
CAN_CHANGE_USERPROFILE = 'change_userprofile'
CAN_DELETE_USERPROFILE = 'delete_userprofile'
CAN_ADD_XFORM_TO_PROFILE = 'can_add_xform'
CAN_VIEW_PROFILE = 'view_profile'
# Organization Permissions
CAN_VIEW_ORGANIZATION_PROFILE = 'view_organizationprofile'
CAN_ADD_ORGANIZATION_PROFILE = 'add_organizationprofile'
CAN_ADD_ORGANIZATION_XFORM = 'can_add_xform'
CAN_CHANGE_ORGANIZATION_PROFILE = 'change_organizationprofile'
CAN_DELETE_ORGANIZATION_PROFILE = 'delete_organizationprofile'
IS_ORGANIZATION_OWNER = 'is_org_owner'
# Xform Permissions
CAN_CHANGE_XFORM = 'change_xform'
CAN_ADD_XFORM = 'add_xform'
CAN_DELETE_XFORM = 'delete_xform'
CAN_VIEW_XFORM = 'view_xform'
CAN_ADD_SUBMISSIONS = 'report_xform'
CAN_TRANSFER_OWNERSHIP = 'transfer_xform'
CAN_MOVE_TO_FOLDER = 'move_xform'
# Project Permissions
CAN_VIEW_PROJECT = 'view_project'
CAN_CHANGE_PROJECT = 'change_project'
CAN_TRANSFER_PROJECT_OWNERSHIP = 'transfer_project'
CAN_DELETE_PROJECT = 'delete_project'
CAN_ADD_DATADICTIONARY = 'add_datadictionary'
CAN_CHANGE_DATADICTIONARY = 'change_datadictionary'
CAN_DELETE_DATADICTIONARY = 'delete_datadictionary'
ROLES_ORDERED = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
ROLES = {role.name: role for role in ROLES_ORDERED}
# Memoize a class to permissions dict.
for role in ROLES.values():
role.class_to_permissions = defaultdict(list)
[role.class_to_permissions[k].append(p) for p, k in role.permissions]
def get_object_users_with_permissions(obj, exclude=None, serializable=False):
"""Returns users, roles and permissions for a object.
When called with with `serializable=True`, return usernames (strings)
instead of User objects, which cannot be serialized by REST Framework.
"""
result = []
if obj:
users_with_perms = get_users_with_perms(
obj, attach_perms=True, with_group_users=False).items()
result = [{
'user': user if not serializable else user.username,
'role': get_role(permissions, obj),
'permissions': permissions} for user, permissions in
users_with_perms if not is_organization(
UserProfile.objects.get_or_create(user=user)[0]
)
]
return result
| 31.848837 | 77 | 0.69271 |
be330b0c9754c05467f2b02c3762c1390226f3d3 | 10,078 | py | Python | lanelines.py | gauborg/lane-finding-gborgaonkar | 466313a0da7c245e25f0987afa953300501d5322 | [
"MIT"
] | null | null | null | lanelines.py | gauborg/lane-finding-gborgaonkar | 466313a0da7c245e25f0987afa953300501d5322 | [
"MIT"
] | null | null | null | lanelines.py | gauborg/lane-finding-gborgaonkar | 466313a0da7c245e25f0987afa953300501d5322 | [
"MIT"
] | null | null | null | # Self-Driving Car Engineer Nanodegree
#
# ## Project: **Finding Lane Lines on the Road**
# ## Import Packages
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import moviepy
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=5):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# lists to store the slopes of lines which match our criteria
left_slope = []
right_slope = []
# lists to store the calculate b intercepts of these lines
left_b = []
right_b = []
for line in lines:
for x1,y1,x2,y2 in line:
slope = ((y2-y1)/(x2-x1))
# only select lines with specific slope range
if(((slope < 0.8) and (slope > 0.5)) or ((slope > -0.8) and (slope < -0.5))):
# check where the endpoints lie on the image...
if (x1 < (img.shape[1]/2) and x2 < (img.shape[1]/2)):
left_slope.append(slope)
left_b.append(y1-slope*x1)
left_b.append(y2-slope*x2)
else:
right_slope.append(slope)
right_b.append(y1-slope*x1)
right_b.append(y2-slope*x2)
try:
# we calculate average slope to draw the line
avg_left_slope = sum(left_slope)/len(left_slope)
avg_right_slope = sum(right_slope)/len(right_slope)
avg_left_b = sum(left_b)/len(left_b)
avg_right_b = sum(right_b)/len(right_b)
# Y co-ordinate of the lane line will definitely be at the bottom of the image
y1 = img.shape[0]
y2 = 320
y3 = 320
y4 = img.shape[0]
# X co-ordinate can be calculated by using the eqn of the line and y co-ordinate
x1 = (y1 - avg_left_b)/avg_left_slope
x2 = (y2 - avg_left_b)/avg_left_slope
x3 = (y3 - avg_right_b)/avg_right_slope
x4 = (y4 - avg_right_b)/avg_right_slope
# draw the lines, converting values to integer for pixels
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
cv2.line(img, (int(x3), int(y3)), (int(x4), int(y4)), color, thickness)
except ZeroDivisionError as error:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, =0.8, =1., =0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * + img * +
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, , img, , )
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
import os
directory = os.listdir("test_images/")
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
for i in directory:
image = mpimg.imread(os.path.join("test_images/", i))
weighted_image = lanelines(image)
mpimg.imsave(os.path.join("test_images_output/" + "output+" + i), weighted_image)
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
# `solidWhiteRight.mp4`
# `solidYellowLeft.mp4`
#
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# Import everything needed to edit/save/watch video clips
import imageio
from moviepy.editor import VideoFileClip
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) # NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| 36.514493 | 137 | 0.682675 |
be3353aec22fa60209490a5516f7d6ee7289c13d | 1,873 | py | Python | zict/zip.py | phobson/zict | 666c7cd9fd4667cc8831a35cf958fd51788acd3e | [
"BSD-3-Clause"
] | null | null | null | zict/zip.py | phobson/zict | 666c7cd9fd4667cc8831a35cf958fd51788acd3e | [
"BSD-3-Clause"
] | null | null | null | zict/zip.py | phobson/zict | 666c7cd9fd4667cc8831a35cf958fd51788acd3e | [
"BSD-3-Clause"
] | null | null | null | try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import zipfile
| 24.324675 | 88 | 0.595302 |
be357d6f3c1ddf5962bf29bb44f0430102e3f1c8 | 7,741 | py | Python | neutron_lbaas/drivers/driver_mixins.py | containers-kraken/neutron-lbaas | 43fbc34cc90512e33202bc4187ccf712dda6a782 | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/drivers/driver_mixins.py | containers-kraken/neutron-lbaas | 43fbc34cc90512e33202bc4187ccf712dda6a782 | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/drivers/driver_mixins.py | containers-kraken/neutron-lbaas | 43fbc34cc90512e33202bc4187ccf712dda6a782 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.plugins.common import constants
from oslo_log import log as logging
import six
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
| 42.070652 | 79 | 0.628472 |
be365df64e401dd1c966469e8ae1e80392fa4b62 | 1,207 | py | Python | Lib/hTools2/modules/ftp.py | miguelsousa/hTools2 | eab400677c1b21bb2519a7354a142e167c2b39ba | [
"BSD-3-Clause"
] | null | null | null | Lib/hTools2/modules/ftp.py | miguelsousa/hTools2 | eab400677c1b21bb2519a7354a142e167c2b39ba | [
"BSD-3-Clause"
] | null | null | null | Lib/hTools2/modules/ftp.py | miguelsousa/hTools2 | eab400677c1b21bb2519a7354a142e167c2b39ba | [
"BSD-3-Clause"
] | null | null | null | # [h] hTools2.modules.ftp
"""Tools to connect to a FTP server, upload files etc."""
# This module uses the `ftplib` library to handle FTP connection and upload.
# http://docs.python.org/library/ftplib.html
import os
from ftplib import FTP
def connect_to_server(url, login, password, folder, verbose=False):
"""Connects to the FTP server using the given connection settings.
Use the given ``url``, ``login`` and ``password`` information to make a connection. Move to the given ``folder`` (if it exists), and return a ``FTP`` object.
To get to the lower level details about the FTP connection, use the optional parameter ``verbose=True``.
"""
# create FTP connection
ftp = FTP(url, login, password)
if verbose == True:
print "%s" % ftp.getwelcome()
# move to folder
ftp.cwd(folder)
if verbose == True:
ftp.retrlines('LIST')
print
return ftp
def upload_file(filePath, FTPconnection):
"""Upload the file at ``file_path`` to a FTP server, using the given ``ftp_connection``."""
file = open(filePath, 'rb')
fileName = os.path.split(filePath)[1]
FTPconnection.storbinary('STOR ' + fileName, file)
file.close()
| 30.175 | 161 | 0.6686 |
be368e6b255149306c28292dd49ca28ab1a75535 | 553 | py | Python | network/pytorch2onnx.py | MRsoymilk/toy-car | 5bd51bf231781a17e1d7acb4654c3d4b6adbed41 | [
"MIT"
] | null | null | null | network/pytorch2onnx.py | MRsoymilk/toy-car | 5bd51bf231781a17e1d7acb4654c3d4b6adbed41 | [
"MIT"
] | null | null | null | network/pytorch2onnx.py | MRsoymilk/toy-car | 5bd51bf231781a17e1d7acb4654c3d4b6adbed41 | [
"MIT"
] | null | null | null | import Net
import configparser
import torch
from PIL import Image
config = configparser.ConfigParser()
config.read('./config.ini')
MODEL = config.get("Network", "Model")
transformations = Net.transformations
net = Net.Net()
net.eval()
net.load_state_dict(torch.load(MODEL))
image = Image.open("./html/rwby.jpg")
image = transformations(image).float()
image = torch.autograd.Variable(image[None, ...])
torch.onnx.export(
net,
image,
MODEL.split('pth')[0] + 'onnx',
export_params=True,
output_names=['toy-car']
)
print("finish")
| 19.068966 | 49 | 0.703436 |
be385b749f1c26b913c643d471ca79a2fd89e72b | 724 | py | Python | var/spack/repos/builtin/packages/r-gridextra/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-gridextra/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-gridextra/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 34.47619 | 95 | 0.754144 |
be387cca53cfcab985ce1dca7b42033320d21418 | 2,707 | py | Python | tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | import torch
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.training.network_training.tuframework_variants.data_augmentation.tuframeworkTrainerV2_insaneDA import \
tuframeworkTrainerV2_insaneDA
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
| 44.377049 | 120 | 0.663465 |
be3baf27f812f65c9b958afcfa252dbaf8d5e093 | 3,088 | py | Python | ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py | romsok24/epiphany | f058984939561fc8d51288765976118ae12e6c32 | [
"Apache-2.0"
] | null | null | null | ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py | romsok24/epiphany | f058984939561fc8d51288765976118ae12e6c32 | [
"Apache-2.0"
] | null | null | null | ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py | romsok24/epiphany | f058984939561fc8d51288765976118ae12e6c32 | [
"Apache-2.0"
] | null | null | null | from typing import List
from src.command.command import Command
| 27.81982 | 66 | 0.51943 |
be3e44160e188056687e999ee1b846a80b373896 | 1,819 | py | Python | build/generate_confirmed_cases_by_counties.py | jtagcat/koroonakaart | 16a6eb24a19b286589b063742b03a123315feefc | [
"CC0-1.0",
"MIT"
] | 1 | 2021-12-20T23:05:58.000Z | 2021-12-20T23:05:58.000Z | build/generate_confirmed_cases_by_counties.py | jtagcat/koroonakaart | 16a6eb24a19b286589b063742b03a123315feefc | [
"CC0-1.0",
"MIT"
] | null | null | null | build/generate_confirmed_cases_by_counties.py | jtagcat/koroonakaart | 16a6eb24a19b286589b063742b03a123315feefc | [
"CC0-1.0",
"MIT"
] | 1 | 2021-12-20T23:05:47.000Z | 2021-12-20T23:05:47.000Z | from build.chart_data_functions import get_confirmed_cases_by_county
from build.chart_data_functions import get_county_by_day
from build.constants import CONFIRMED_CASES_BY_COUNTIES_PATH
from build.constants import COUNTY_MAPPING
from build.constants import COUNTY_POPULATION
from build.constants import DATE_SETTINGS
from build.constants import TEST_RESULTS_PATH
from build.constants import TODAY_DMYHM
from build.constants import YESTERDAY_YMD
from build.utils import analyze_memory
from build.utils import analyze_time
from build.utils import logger
from build.utils import read_json_from_file
from build.utils import save_as_json
import pandas as pd
if __name__ == "__main__":
main()
| 30.316667 | 87 | 0.774601 |
be4037367a1afa83a7501ca75f082c616c63c62c | 625 | py | Python | ros_tf_publisher.py | BrightLamp/PyLearningCodes | ed237528c41ab2a9832b88806732097ffae0a0ed | [
"MIT"
] | null | null | null | ros_tf_publisher.py | BrightLamp/PyLearningCodes | ed237528c41ab2a9832b88806732097ffae0a0ed | [
"MIT"
] | null | null | null | ros_tf_publisher.py | BrightLamp/PyLearningCodes | ed237528c41ab2a9832b88806732097ffae0a0ed | [
"MIT"
] | null | null | null | # encoding=utf-8
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('py_tf_broadcaster')
br = tf.TransformBroadcaster()
x = 0.0
y = 0.0
z = 0.0
roll = 0
pitch = 0
yaw = 1.57
rate = rospy.Rate(1)
while not rospy.is_shutdown():
yaw = yaw + 0.1
roll = roll + 0.1
br.sendTransform((x, y, z),
tf.transformations.quaternion_from_euler(roll, pitch, yaw),
rospy.Time.now(),
"base_link",
"front_caster") # base_linklink1
rate.sleep()
| 24.038462 | 84 | 0.5104 |
be40e740adf7c24c5c205687723b024d4eaf9752 | 2,674 | py | Python | dataset_manager/technical_indicators.py | NightingaleV/bakalarska_prace-ann-algotrading | 07866e092cb527a7e1d9d7050790d9ffd611dc83 | [
"MIT"
] | null | null | null | dataset_manager/technical_indicators.py | NightingaleV/bakalarska_prace-ann-algotrading | 07866e092cb527a7e1d9d7050790d9ffd611dc83 | [
"MIT"
] | null | null | null | dataset_manager/technical_indicators.py | NightingaleV/bakalarska_prace-ann-algotrading | 07866e092cb527a7e1d9d7050790d9ffd611dc83 | [
"MIT"
] | null | null | null | # Imports
import numpy as np
| 31.093023 | 71 | 0.5819 |