hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fd22b642b9f6e7837ab39991845866acb71bd9d | 929 | py | Python | txt/test/teste.py | juliano777/apostila_python | 521c05c1579a52d22d6b670af92e3763366b6301 | [
"BSD-3-Clause"
] | 3 | 2020-04-18T20:07:39.000Z | 2021-06-17T18:41:34.000Z | txt/test/teste.py | juliano777/apostila_python | 521c05c1579a52d22d6b670af92e3763366b6301 | [
"BSD-3-Clause"
] | null | null | null | txt/test/teste.py | juliano777/apostila_python | 521c05c1579a52d22d6b670af92e3763366b6301 | [
"BSD-3-Clause"
] | 1 | 2020-04-18T20:07:46.000Z | 2020-04-18T20:07:46.000Z | #_*_ encoding: utf-8 _*_
import time
''' Fibonacci function '''
''' Memoize function '''
# Start time
t1 = time.time()
# Loop
for i in range(35):
print('fib(%s) = %s' % (i, fibo(i)))
# End time
t2 = time.time()
# Total time
print('Tempo de execuo: %.3fs' % (t2 - t1))
# Take a pause
raw_input('Pressione <ENTER> para continuar\n')
# Memoization of fibo (closure)
fibo = memoize(fibo)
# Start time
t1 = time.time()
# loop after memoization
for i in range(40):
print('fib(%s) = %s' % (i, fibo(i)))
# End time
t2 = time.time()
# Total time
print('Tempo de execuo: %.3fs' % (t2 - t1))
| 16.298246 | 47 | 0.568353 |
3fd2e3175b855481fd32ee5d4ebc2f50e3468d9a | 4,101 | py | Python | Tests/Methods/Mesh/test_get_field.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Mesh/test_get_field.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Mesh/test_get_field.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from SciDataTool import DataTime, Data1D, DataLinspace, VectorField
from pyleecan.Classes.SolutionData import SolutionData
from pyleecan.Classes.SolutionMat import SolutionMat
from pyleecan.Classes.SolutionVector import SolutionVector
| 32.039063 | 87 | 0.566935 |
3fd35632335b7013aa84b5d96f778f88b22e2bbe | 17,026 | py | Python | python/services/compute/beta/reservation.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | 16 | 2021-01-08T19:35:22.000Z | 2022-03-23T16:23:49.000Z | python/services/compute/beta/reservation.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | 1 | 2021-08-18T19:12:20.000Z | 2021-08-18T19:12:20.000Z | python/services/compute/beta/reservation.py | LaudateCorpus1/declarative-resource-client-library | a559c4333587fe9531cef150532e6fcafff153e4 | [
"Apache-2.0"
] | 11 | 2021-03-18T11:27:28.000Z | 2022-03-12T06:49:14.000Z | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import reservation_pb2
from google3.cloud.graphite.mmv2.services.google.compute import reservation_pb2_grpc
from typing import List
| 36.615054 | 119 | 0.681017 |
3fd4a59a910de7324648c153841dea6bd5328a4e | 4,682 | py | Python | Examples/cimpleCraft/cimple4.py | shaesaert/TuLiPXML | 56cf4d58a9d7e17b6f6aebe6de8d5a1231035671 | [
"BSD-3-Clause"
] | 1 | 2021-05-28T23:44:28.000Z | 2021-05-28T23:44:28.000Z | Examples/cimpleCraft/cimple4.py | shaesaert/TuLiPXML | 56cf4d58a9d7e17b6f6aebe6de8d5a1231035671 | [
"BSD-3-Clause"
] | 2 | 2017-10-03T18:54:08.000Z | 2018-08-21T09:50:09.000Z | Examples/cimpleCraft/cimple4.py | shaesaert/TuLiPXML | 56cf4d58a9d7e17b6f6aebe6de8d5a1231035671 | [
"BSD-3-Clause"
] | 1 | 2018-10-06T12:58:52.000Z | 2018-10-06T12:58:52.000Z | # Import modules
from __future__ import print_function
import sys
import numpy as np
from polytope import box2poly
from tulip import hybrid
from tulip.abstract import prop2part, discretize
import Interface.DSL as DSL
from Interface import Statechart as dumpsmach
from Interface.Reduce import *
from Interface.Transform import *
print("----------------------------------\n Script options \n----------------------------------")
verbose = 1 # Decrease printed output = 0, increase= 1
print("""----------------------------------\n System Definition \n----------------------------------
-- System Constants
-- System Label State Space & partition
""")
# System constants
input_bound = 1.0
disturbance_bound = 0.1
# The system dynamics
A = np.array([[1., 0, 2., 0], [0, 1., 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]])
B = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]])
E = np.array([[1., 0, 0, 0], [0, 1., 0, 0], [0, 0, 1., 0], [0, 0, 0, 1.]])
# $x^+=Ax+Bu+E W$
# Size of the sets
X = box2poly([[0, 100.], [0, 100.], [-5, 5.], [-5, 5.]])
U = box2poly(input_bound*np.array([[0, 1], [0, 1], [0, 1], [0, 1]]))
W = box2poly(disturbance_bound*np.array([[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]]))
print("----------------------------------\n Define system\n----------------------------------")
# Intermezzo polytope tutorial
# https://github.com/tulip-control/polytope/blob/master/doc/tutorial.md
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, X)
print(str(sys_dyn))
print("----------------------------------\n Define labelling \n----------------------------------")
cprops ={}
cprops["inA"] = box2poly([[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inB"] = box2poly([[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inObj1"] = box2poly([[15, 35], [30, 70], [-5, 5], [-5, 5]])
cprops["inObj2"] = box2poly([[65, 85], [30, 70], [-5, 5], [-5, 5]])
cpartition = prop2part(X, cprops)
if verbose == 1:
print("partition before refinement")
print(cpartition)
print("---------------------------------\n System partition State Space \n----------------------------------")
disc_dynamics = discretize(cpartition, sys_dyn, N=5, min_cell_volume=1, closed_loop=True, conservative=True)
states=[state for (state, label) in disc_dynamics.ts.states.find(with_attr_dict={'ap': {'inA'}})]
disc_dynamics.ts.states.initial|=states
print("----------------------------------\n Define specification \n----------------------------------")
# Specifications
# Environment variables and assumptions
env_vars = list()
env_init = list()
env_safe = list()
env_prog = list()
# System variables and requirements
sys_vars = ['inA', 'inB']
sys_init = ['inA']
sys_safe = ['!inObj1', '!inObj2']
sys_prog = ['inA', 'inB']
(ctrl_modes, grspec) = transform2control(disc_dynamics.ts, statevar='ctrl')
print("----------------------------------\n Combine sys and spec \n----------------------------------")
phi = grspec | spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
phi.qinit = '\A \E'
phi.moore = False
phi.plus_one = False
ctrl = synth.synthesize(phi,ignore_sys_init=True)
#
# print("----------------------------------\n Reduce states \n----------------------------------")
#
# Events_init = {('fullGas', True)}
#
#
# ctrl_red=reduce_mealy(ctrl,relabel=False,outputs={'ctrl'}, prune_set=Events_init, combine_trans=False)
#
print("----------------------------------\n Output results \n----------------------------------")
if verbose == 1:
print(" (Verbose) ")
try:
disc_dynamics.ts.save("cimple_aircraft_orig.png")
ctrl_modes.save("cimple_aircraft_modes.png")
# ctrl_red.save('cimple_aircraft_ctrl_red.png')
ctrl.save("cimple_aircraft_ctrl_orig.png")
print(" (Verbose): saved all Finite State Transition Systems ")
except Exception:
pass
print('nodes in ctrl:')
print(len(ctrl.nodes()))
print(len(ctrl.transitions()))
print('\n')
#
# print('nodes in ctrl_red:')
# print(len(ctrl_red.nodes()))
# print(len(ctrl_red.transitions()))
# print('\n')
#
#
print("----------------------------------\n Convert controller to Xmi \n----------------------------------")
sys.stdout.flush()
# --------------- Writing the statechart -----------
try:
filename = str(__file__)
filename = filename[0:-3] + "_gen"
except NameError:
filename = "test_gen"
# write strategy plus control modes at the same time to a statechart
with open(filename+".xml", "w") as f:
# f.write(dumpsmach.tulip_to_xmi(ctrl_red,ctrl_modes))
f.write(dumpsmach.tulip_to_xmi(ctrl, ctrl_modes)) | 32.971831 | 110 | 0.548056 |
3fd50d9f4c976d633be6e56345cbe4edfe16b20b | 561 | py | Python | CableClub/cable_club_colosseum.py | V-FEXrt/Pokemon-Spoof-Plus | d397d680742496b7f64b401511da7eb57f63c973 | [
"MIT"
] | 2 | 2017-05-04T20:24:19.000Z | 2017-05-04T20:58:07.000Z | CableClub/cable_club_colosseum.py | V-FEXrt/Pokemon-Spoof-Plus | d397d680742496b7f64b401511da7eb57f63c973 | [
"MIT"
] | null | null | null | CableClub/cable_club_colosseum.py | V-FEXrt/Pokemon-Spoof-Plus | d397d680742496b7f64b401511da7eb57f63c973 | [
"MIT"
] | null | null | null | from AI.team_manager import TeamManager
from CableClub.cable_club_constants import Com
out_byte = 0
last_recieved = 0
count = 0 | 24.391304 | 66 | 0.654189 |
3fd6f8b99302959fd856c0174a84ad3698e8de10 | 931 | py | Python | workflow/wrappers/bio/popoolation2/indel_filtering_identify_indel_regions/wrapper.py | NBISweden/manticore-smk | fd0b4ccd4239dc91dac423d0ea13478d36702561 | [
"MIT"
] | null | null | null | workflow/wrappers/bio/popoolation2/indel_filtering_identify_indel_regions/wrapper.py | NBISweden/manticore-smk | fd0b4ccd4239dc91dac423d0ea13478d36702561 | [
"MIT"
] | null | null | null | workflow/wrappers/bio/popoolation2/indel_filtering_identify_indel_regions/wrapper.py | NBISweden/manticore-smk | fd0b4ccd4239dc91dac423d0ea13478d36702561 | [
"MIT"
] | 2 | 2021-08-23T16:09:51.000Z | 2021-11-12T21:35:56.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Per Unneberg"
__copyright__ = "Copyright 2020, Per Unneberg"
__email__ = "per.unneberg@scilifelab.se"
__license__ = "MIT"
import os
import re
import tempfile
from snakemake.shell import shell
from snakemake.utils import logger
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
conda_prefix = os.getenv("CONDA_PREFIX")
script = os.path.join(
conda_prefix, "opt/popoolation2-code/indel_filtering/identify-indel-regions.pl"
)
options = snakemake.params.get("options", "")
mpileup = snakemake.input.mpileup
tmp = os.path.basename(tempfile.mkstemp()[1])
fifo = f"{mpileup}{tmp}.fifo"
if os.path.exists(fifo):
os.unlink(fifo)
shell("mkfifo {fifo}")
shell("zcat {mpileup} > {fifo} &")
shell(
"perl "
"{script} "
"{options} "
"--input {fifo} "
"--output {snakemake.output.gtf} "
"{log}"
)
if os.path.exists(fifo):
os.unlink(fifo)
| 22.707317 | 83 | 0.692803 |
3fd71b6b624dc062e8df4e8fc57377ace10d329d | 6,741 | py | Python | open_publishing/provision/progression_rule.py | open-publishing/open-publishing-api | 0d1646bb2460c6f35cba610a355941d2e07bfefd | [
"BSD-3-Clause"
] | null | null | null | open_publishing/provision/progression_rule.py | open-publishing/open-publishing-api | 0d1646bb2460c6f35cba610a355941d2e07bfefd | [
"BSD-3-Clause"
] | null | null | null | open_publishing/provision/progression_rule.py | open-publishing/open-publishing-api | 0d1646bb2460c6f35cba610a355941d2e07bfefd | [
"BSD-3-Clause"
] | null | null | null | from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty
from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField
from open_publishing.user import User
from open_publishing.core.enums import ValueStatus
from open_publishing.core.enums import ProvisionRuleRole, ProvisionChannelType, ProvisionChannelBase
from open_publishing.core.enums import ProvisionRuleAlgorithm
from .rule import ProvisionRule
from .filter_list import ProvisionFilterList
class ProgressionChannelsList(SequenceField):
_item_type = ProgressionChannel
class ProgressionRule(ProvisionRule):
recipient = FieldDescriptor('recipient')
role = FieldDescriptor('role')
channels = FieldDescriptor('channels')
class ProgressionList(ProvisionFilterList):
_filter = ProvisionRuleAlgorithm.progression
| 35.856383 | 100 | 0.558374 |
3fd8a3a7cd4b29135af9a933907e8e7ce8de084c | 2,746 | py | Python | forms/utils.py | braceio/forms | deb12f37447d6167ad284ae68085a02454c8f649 | [
"MIT"
] | 36 | 2015-01-02T05:15:02.000Z | 2018-03-06T11:36:41.000Z | forms/utils.py | braceio/forms | deb12f37447d6167ad284ae68085a02454c8f649 | [
"MIT"
] | 1 | 2015-02-16T20:03:41.000Z | 2016-01-01T23:42:25.000Z | forms/utils.py | braceio/forms | deb12f37447d6167ad284ae68085a02454c8f649 | [
"MIT"
] | 20 | 2015-01-04T21:38:12.000Z | 2021-01-17T12:59:10.000Z | from datetime import timedelta
from functools import update_wrapper
from flask import make_response, current_app, request, url_for, jsonify
import uuid
# decorators
def get_url(endpoint, secure=False, **values):
''' protocol preserving url_for '''
path = url_for(endpoint, **values)
if secure:
url_parts = request.url.split('/', 3)
path = "https://" + url_parts[2] + path
return path | 31.563218 | 81 | 0.621267 |
3fd8c6ef2dca4f5f0372db69829883a2a443d40b | 4,536 | py | Python | tests/ref_test.py | lykme516/pykka | d66b0c49658fc0e7c4e1ae46a0f9c50c7e964ca5 | [
"Apache-2.0"
] | 1 | 2021-01-03T09:25:23.000Z | 2021-01-03T09:25:23.000Z | tests/ref_test.py | hujunxianligong/pykka | d66b0c49658fc0e7c4e1ae46a0f9c50c7e964ca5 | [
"Apache-2.0"
] | null | null | null | tests/ref_test.py | hujunxianligong/pykka | d66b0c49658fc0e7c4e1ae46a0f9c50c7e964ca5 | [
"Apache-2.0"
] | null | null | null | import time
import unittest
from pykka import ActorDeadError, ThreadingActor, ThreadingFuture, Timeout
def ConcreteRefTest(actor_class, future_class, sleep_function):
C.__name__ = '%sRefTest' % (actor_class.__name__,)
C.future_class = future_class
return C
ThreadingActorRefTest = ConcreteRefTest(
ThreadingActor, ThreadingFuture, time.sleep)
try:
import gevent
from pykka.gevent import GeventActor, GeventFuture
GeventActorRefTest = ConcreteRefTest(
GeventActor, GeventFuture, gevent.sleep)
except ImportError:
pass
try:
import eventlet
from pykka.eventlet import EventletActor, EventletFuture
EventletActorRefTest = ConcreteRefTest(
EventletActor, EventletFuture, eventlet.sleep)
except ImportError:
pass
| 31.068493 | 74 | 0.668651 |
3fda3cc0af3e5e42cd6c1e11390f1713cf4c09d1 | 3,365 | py | Python | tests/unit/test_baseObject.py | asaranprasad/nvda | e9609694acbfb06398eb6552067a0dcd532d67af | [
"bzip2-1.0.6"
] | 1 | 2018-11-16T10:15:59.000Z | 2018-11-16T10:15:59.000Z | tests/unit/test_baseObject.py | asaranprasad/nvda | e9609694acbfb06398eb6552067a0dcd532d67af | [
"bzip2-1.0.6"
] | 3 | 2017-09-29T17:14:18.000Z | 2019-05-20T16:13:39.000Z | tests/unit/test_baseObject.py | asaranprasad/nvda | e9609694acbfb06398eb6552067a0dcd532d67af | [
"bzip2-1.0.6"
] | 1 | 2017-09-29T08:53:52.000Z | 2017-09-29T08:53:52.000Z | #tests/unit/test_baseObject.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2018 NV Access Limited, Babbage B.V.
"""Unit tests for the baseObject module, its classes and their derivatives."""
import unittest
from baseObject import ScriptableObject
from objectProvider import PlaceholderNVDAObject
from scriptHandler import script
| 30.044643 | 94 | 0.744428 |
3fda75ffd417e01dfff80ddf791281704e021a18 | 3,960 | py | Python | querybook/server/lib/query_executor/connection_string/hive.py | shivammmmm/querybook | 71263eb7db79e56235ea752f2cf3339ca9b3a092 | [
"Apache-2.0"
] | 1,144 | 2021-03-30T05:06:16.000Z | 2022-03-31T10:40:31.000Z | querybook/server/lib/query_executor/connection_string/hive.py | shivammmmm/querybook | 71263eb7db79e56235ea752f2cf3339ca9b3a092 | [
"Apache-2.0"
] | 593 | 2021-07-01T10:34:25.000Z | 2022-03-31T23:24:40.000Z | querybook/server/lib/query_executor/connection_string/hive.py | shivammmmm/querybook | 71263eb7db79e56235ea752f2cf3339ca9b3a092 | [
"Apache-2.0"
] | 113 | 2021-03-30T00:07:20.000Z | 2022-03-31T07:18:43.000Z | import re
from typing import Dict, Tuple, List, NamedTuple, Optional
from lib.utils.decorators import with_exception_retry
from .helpers.common import (
split_hostport,
get_parsed_variables,
merge_hostport,
random_choice,
)
from .helpers.zookeeper import get_hostname_and_port_from_zk
# TODO: make these configurable?
MAX_URI_FETCH_ATTEMPTS = 10
MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC = 5
def _extract_connection_url(connection_string: str) -> RawHiveConnectionConf:
# Parser for Hive JDBC string
# Loosely based on https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients#HiveServer2Clients-JDBC
match = re.search(
r"^(?:jdbc:)?hive2:\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)\/(\w*)((?:;[\w.-]+=[\w.-]+)*)(\?[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?(\#[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?$", # noqa: E501
connection_string,
)
hosts = match.group(1)
default_db = match.group(2) or "default"
session_variables = match.group(3) or ""
conf_list = match.group(4) or ""
var_list = match.group(5) or ""
parsed_hosts = []
for hostport in hosts.split(","):
parsed_hosts.append(split_hostport(hostport))
parsed_session_variables = get_parsed_variables(session_variables[1:])
parsed_conf_list = get_parsed_variables(conf_list[1:])
parsed_var_list = get_parsed_variables(var_list[1:])
return RawHiveConnectionConf(
hosts=parsed_hosts,
default_db=default_db,
session_variables=parsed_session_variables,
conf_list=parsed_conf_list,
var_list=parsed_var_list,
)
| 33.846154 | 202 | 0.689899 |
3fdb9c34cb8887a4abfe9945968ed8dd70631d27 | 137 | py | Python | flopz/__init__.py | Flopz-Project/flopz | eb470811e4a8be5e5d625209b0f8eb7ccd1d5da3 | [
"Apache-2.0"
] | 7 | 2021-11-19T15:53:58.000Z | 2022-03-28T03:38:52.000Z | flopz/__init__.py | Flopz-Project/flopz | eb470811e4a8be5e5d625209b0f8eb7ccd1d5da3 | [
"Apache-2.0"
] | null | null | null | flopz/__init__.py | Flopz-Project/flopz | eb470811e4a8be5e5d625209b0f8eb7ccd1d5da3 | [
"Apache-2.0"
] | 1 | 2022-03-25T12:44:01.000Z | 2022-03-25T12:44:01.000Z | """
flopz.
Low Level Assembler and Firmware Instrumentation Toolkit
"""
__version__ = "0.2.0"
__author__ = "Noelscher Consulting GmbH"
| 15.222222 | 56 | 0.744526 |
3fdde609468413e798c5347a27251969395c0fce | 2,294 | py | Python | OracleCASB_API_Client/occs.py | ftnt-cse/Oracle_CASB_API_Client | 00c92c7383d62d029736481f079773253e05589c | [
"Apache-2.0"
] | null | null | null | OracleCASB_API_Client/occs.py | ftnt-cse/Oracle_CASB_API_Client | 00c92c7383d62d029736481f079773253e05589c | [
"Apache-2.0"
] | null | null | null | OracleCASB_API_Client/occs.py | ftnt-cse/Oracle_CASB_API_Client | 00c92c7383d62d029736481f079773253e05589c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys, logging
import requests, json, argparse, textwrap
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from oracle_casb_api import *
parser = argparse.ArgumentParser(
prog='Oracle CASB API Client',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
./OCCS_api_client.py: an API client impelmentation to fetch Oracle CASB Events and reports. It then parses it and sends it as syslog to a Syslog server/SIEM Solution
'''))
parser.add_argument('-s', '--syslog-server',type=str, required=True, help="Syslog Server where to send the fetched OCCS data as syslog")
parser.add_argument('-b', '--base-url',type=str, required=True, help="Oracle CASB base url, typically https://XXXXXXXX.palerra.net")
parser.add_argument('-k', '--access-key',type=str, required=True, help="Oracle CASB Access Key")
parser.add_argument('-a', '--access-secret',type=str, required=True, help='Oracle CASB Access Secret')
parser.add_argument('-t', '--time-period',type=int, required=True, help='time period of the events expressed as number of hours')
args = parser.parse_args()
logger = logging.getLogger('OCCS_Logger')
logger.setLevel(logging.ERROR)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
occs_object=occs_init()
start_date = arrow.now().shift(hours=(-1 * args.time_period)).format('YYYY-MM-DDTHH:mm:ss.SSS')
end_date = arrow.now().format('YYYY-MM-DDTHH:mm:ss.SSS')
res = occs_object.get_risk_events(start_date)
send_syslog(args.syslog_server,(prepare_risk_events_for_syslog(res)))
res = occs_object.get_user_risk_score_report('userrisk',start_date,end_date,'100')
send_syslog(args.syslog_server,(prepare_users_risk_scores_for_syslog(res)))
| 37.606557 | 170 | 0.732781 |
3fde07047223da1d88704610e639913da4a2c4f4 | 1,787 | py | Python | src/classification_metrics.py | crmauceri/ReferringExpressions | d2ca43bf6df88f83fbe6dfba99b1105dd14592f4 | [
"Apache-2.0"
] | 6 | 2020-06-05T06:52:59.000Z | 2021-05-27T11:38:16.000Z | src/classification_metrics.py | crmauceri/ReferringExpressions | d2ca43bf6df88f83fbe6dfba99b1105dd14592f4 | [
"Apache-2.0"
] | 1 | 2021-03-28T13:27:21.000Z | 2021-04-29T17:58:28.000Z | src/classification_metrics.py | crmauceri/ReferringExpressions | d2ca43bf6df88f83fbe6dfba99b1105dd14592f4 | [
"Apache-2.0"
] | 2 | 2019-12-09T09:14:47.000Z | 2019-12-22T13:57:08.000Z | import argparse
import json
from data_management.DatasetFactory import datasetFactory
from config import cfg
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculates metrics from output of a Classification network.' +
' Run `run_network.py <config> test` first.')
parser.add_argument('config_file', help='config file path')
parser.add_argument('results_file', help='results file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
refer = datasetFactory(cfg)
hamming_loss = 0.0
TP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FN = np.zeros((cfg.IMG_NET.N_LABELS+1,))
total = 0.0
# load generation outputs
with open(args.results_file, 'r') as f:
genData = json.load(f)
for row in genData:
total += 1.0
hamming_loss += row['Hamming_Loss']
TP[row['TP_classes']] += 1
FP[row['FP_classes']] += 1
FN[row['FN_classes']] += 1
print("Mean Hamming Loss: %3.3f" % (hamming_loss/total))
print("Mean precision: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FP))))
print("Mean recall: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FN))))
print("Class\tPrecision\tRecall")
for idx in range(cfg.IMG_NET.N_LABELS):
label = refer[0].coco.cats[refer[0].coco_cat_map[idx]]
print("%s\t%3.3f\t%3.3f" % (label['name'].ljust(20), TP[idx]/(TP[idx]+FP[idx]), TP[idx]/(TP[idx]+FN[idx]))) | 34.365385 | 115 | 0.614997 |
3fe076a26915fb3a8a0df4e110f97d0bbe198980 | 6,448 | py | Python | base_model.py | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 | [
"MIT"
] | 23 | 2019-12-19T02:46:33.000Z | 2022-03-22T07:52:28.000Z | base_model.py | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 | [
"MIT"
] | 5 | 2020-07-28T14:25:45.000Z | 2022-03-08T14:30:21.000Z | base_model.py | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 | [
"MIT"
] | 5 | 2019-12-20T15:46:08.000Z | 2021-11-23T01:15:32.000Z | import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding, QuestionEmbedding2
from classifier import SimpleClassifier
from fc import FCNet
from Decoders.decoder1 import _netG as netG
import torch.nn.functional as F
from torch.autograd import Variable
from misc.utils import LayerNorm
| 40.049689 | 114 | 0.618021 |
3fe105950fe7c097a0cf82f9fd41aa14438e8996 | 66 | py | Python | qymel/core/__init__.py | hal1932/QyMEL | 4fdf2409aaa34516f021a37aac0f011fe6ea6073 | [
"MIT"
] | 6 | 2019-12-23T05:20:29.000Z | 2021-01-30T21:17:32.000Z | qymel/core/__init__.py | hal1932/QyMEL | 4fdf2409aaa34516f021a37aac0f011fe6ea6073 | [
"MIT"
] | null | null | null | qymel/core/__init__.py | hal1932/QyMEL | 4fdf2409aaa34516f021a37aac0f011fe6ea6073 | [
"MIT"
] | 1 | 2020-03-05T08:17:44.000Z | 2020-03-05T08:17:44.000Z | # coding: utf-8
from .force_reload import *
from .scopes import *
| 16.5 | 27 | 0.727273 |
3fe18d763d2aae257f541fc27bf3a672136ac390 | 5,244 | py | Python | lambda/nodemanager.py | twosdai/cloud-enablement-aws | 145bf88acc1781cdd696e2d77a5c2d3b796e16c3 | [
"Apache-2.0"
] | 11 | 2018-05-25T18:48:30.000Z | 2018-11-30T22:06:58.000Z | lambda/nodemanager.py | twosdai/cloud-enablement-aws | 145bf88acc1781cdd696e2d77a5c2d3b796e16c3 | [
"Apache-2.0"
] | 10 | 2019-01-29T19:39:46.000Z | 2020-07-01T07:37:08.000Z | lambda/nodemanager.py | twosdai/cloud-enablement-aws | 145bf88acc1781cdd696e2d77a5c2d3b796e16c3 | [
"Apache-2.0"
] | 18 | 2019-01-29T05:31:23.000Z | 2021-09-16T20:04:24.000Z | # Copyright 2002-2018 MarkLogic Corporation. All Rights Reserved.
import boto3
import botocore
import logging
import hashlib
import json
import time
from botocore.exceptions import ClientError
log = logging.getLogger()
log.setLevel(logging.INFO)
# global variables
ec2_client = boto3.client('ec2')
asg_client = boto3.client('autoscaling')
ec2_resource = boto3.resource('ec2') | 35.432432 | 101 | 0.54939 |
3fe27cb210e5f440aba20265f1b60a9554e9c206 | 5,724 | py | Python | pyABC/0.10.14/petab/amici.py | ICB-DCM/lookahead-study | b9849ce2b0cebbe55d6c9f7a248a5f4dff191007 | [
"MIT"
] | 3 | 2021-01-20T14:14:04.000Z | 2022-02-23T21:21:18.000Z | pyABC/0.10.14/petab/amici.py | ICB-DCM/lookahead-study | b9849ce2b0cebbe55d6c9f7a248a5f4dff191007 | [
"MIT"
] | 3 | 2021-01-20T23:11:20.000Z | 2021-02-15T14:36:39.000Z | pyABC/Modified/petab/amici.py | ICB-DCM/lookahead-study | b9849ce2b0cebbe55d6c9f7a248a5f4dff191007 | [
"MIT"
] | null | null | null | import logging
from collections.abc import Sequence, Mapping
from typing import Callable, Union
import copy
import pyabc
from .base import PetabImporter, rescale
logger = logging.getLogger(__name__)
try:
import petab
import petab.C as C
except ImportError:
petab = C = None
logger.error("Install petab (see https://github.com/icb-dcm/petab) to use "
"the petab functionality.")
try:
import amici
import amici.petab_import
from amici.petab_objective import simulate_petab, LLH, RDATAS
except ImportError:
amici = amici.petab_import = simulate_petab = LLH = RDATAS = None
logger.error("Install amici (see https://github.com/icb-dcm/amici) to use "
"the amici functionality.")
| 32.338983 | 79 | 0.601328 |
3fe32adbae6d30f0649147cee237cf1904d94533 | 99 | py | Python | ui_automation_core/helpers/browser/alert_action_type.py | Harshavardhanchowdary/python-ui-testing-automation | a624c6b945276c05722be2919d95aa9e5539d0d0 | [
"MIT"
] | null | null | null | ui_automation_core/helpers/browser/alert_action_type.py | Harshavardhanchowdary/python-ui-testing-automation | a624c6b945276c05722be2919d95aa9e5539d0d0 | [
"MIT"
] | null | null | null | ui_automation_core/helpers/browser/alert_action_type.py | Harshavardhanchowdary/python-ui-testing-automation | a624c6b945276c05722be2919d95aa9e5539d0d0 | [
"MIT"
] | null | null | null | from enum import Enum, auto
| 16.5 | 28 | 0.676768 |
3fe331ae497b79a61bbb73e932ba9991e96f0b3f | 18,769 | py | Python | xsertion/test_layers.py | karazijal/xsertion | 102c1a4f07b049647064a968257d56b00a064d6c | [
"MIT"
] | null | null | null | xsertion/test_layers.py | karazijal/xsertion | 102c1a4f07b049647064a968257d56b00a064d6c | [
"MIT"
] | null | null | null | xsertion/test_layers.py | karazijal/xsertion | 102c1a4f07b049647064a968257d56b00a064d6c | [
"MIT"
] | 1 | 2021-11-09T09:06:48.000Z | 2021-11-09T09:06:48.000Z | import unittest
from xsertion.layers import *
from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten
from keras.models import Model
import json
if __name__=="__main__":
unittest.main() | 42.656818 | 121 | 0.553146 |
3fe371c906222e31026634c1cd2e9e52427c680b | 151 | py | Python | language/python/modules/websocket/websocket_module.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 1 | 2019-11-25T07:23:42.000Z | 2019-11-25T07:23:42.000Z | language/python/modules/websocket/websocket_module.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 13 | 2020-01-07T16:09:47.000Z | 2022-03-02T12:51:44.000Z | language/python/modules/websocket/websocket_module.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
web socket
"""
import websocket
if __name__ == '__main__':
pass
| 10.066667 | 26 | 0.629139 |
3fe5513bca482d43a59c15049895c9303427b971 | 85 | py | Python | engines/__init__.py | mukeran/simple_sandbox | a2a97d13d814548f313871f0bd5c48f65b1a6180 | [
"MIT"
] | null | null | null | engines/__init__.py | mukeran/simple_sandbox | a2a97d13d814548f313871f0bd5c48f65b1a6180 | [
"MIT"
] | null | null | null | engines/__init__.py | mukeran/simple_sandbox | a2a97d13d814548f313871f0bd5c48f65b1a6180 | [
"MIT"
] | null | null | null | from .watcher import FileWatcher
from .fpm_sniffer import FPMSniffer, FPMSnifferMode
| 28.333333 | 51 | 0.858824 |
3fe6078d322f58b763a2e00d815b964e8911f9bf | 885 | py | Python | PyTrinamic/modules/TMC_EvalShield.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 37 | 2019-01-13T11:08:45.000Z | 2022-03-25T07:18:15.000Z | PyTrinamic/modules/TMC_EvalShield.py | AprDec/PyTrinamic | a9db10071f8fbeebafecb55c619e5893757dd0ce | [
"MIT"
] | 56 | 2019-02-25T02:48:27.000Z | 2022-03-31T08:45:34.000Z | PyTrinamic/modules/TMC_EvalShield.py | AprDec/PyTrinamic | a9db10071f8fbeebafecb55c619e5893757dd0ce | [
"MIT"
] | 26 | 2019-01-14T05:20:16.000Z | 2022-03-08T13:27:35.000Z | '''
Created on 18.03.2020
@author: LK
'''
| 26.818182 | 85 | 0.615819 |
3fe68b75dfeb56985a424ac16b45a678c22019cc | 285 | py | Python | kattis/rollcall.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | 2 | 2021-04-05T14:26:37.000Z | 2021-06-10T04:22:01.000Z | kattis/rollcall.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | null | null | null | kattis/rollcall.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | null | null | null | import sys
d, n = [], {}
for i in sys.stdin:
if i.rstrip() == "":
break
a, b = map(str, i.split())
d.append([a, b])
if a in n:
n[a] += 1
else:
n[a] = 1
d = sorted(d, key=lambda x: (x[1], x[0]))
for k, v in d:
if n[k] > 1:
print(k, v)
else:
print(k)
| 14.25 | 41 | 0.45614 |
3fe82d5a85daeba3d97651074742e05e1165543c | 1,697 | py | Python | test/vizier/test_nodes.py | robotarium/vizier | 6ce2be4fc0edcdaf5ba246094c2e79bff32e219d | [
"MIT"
] | 11 | 2016-08-18T20:37:06.000Z | 2019-11-24T17:34:27.000Z | test/vizier/test_nodes.py | robotarium/vizier | 6ce2be4fc0edcdaf5ba246094c2e79bff32e219d | [
"MIT"
] | 6 | 2018-10-07T17:01:40.000Z | 2019-11-24T17:41:16.000Z | test/vizier/test_nodes.py | robotarium/vizier | 6ce2be4fc0edcdaf5ba246094c2e79bff32e219d | [
"MIT"
] | 3 | 2016-08-22T13:58:24.000Z | 2018-06-07T21:06:35.000Z | import json
import vizier.node as node
import unittest
| 30.854545 | 69 | 0.614614 |
3fe84decaa2c4b931f2c3a8a70e6c95473baf73c | 457 | py | Python | tests/not_test_basics.py | kipfer/simple_modbus_server | f16caea62311e1946498392ab4cb5f3d2e1306cb | [
"MIT"
] | 1 | 2021-03-11T13:04:00.000Z | 2021-03-11T13:04:00.000Z | tests/not_test_basics.py | kipfer/simple_modbus_server | f16caea62311e1946498392ab4cb5f3d2e1306cb | [
"MIT"
] | null | null | null | tests/not_test_basics.py | kipfer/simple_modbus_server | f16caea62311e1946498392ab4cb5f3d2e1306cb | [
"MIT"
] | null | null | null | import modbus_server
s = modbus_server.Server(
host="localhost", port=5020, daemon=True, loglevel="WARNING", autostart=False
)
s.start()
s.set_coil(1, True)
s.set_coils(2, [True, False, True])
s.set_discrete_input(1, True)
s.set_discrete_inputs(2, [True, False, True])
s.set_input_register(1, 1234, "h")
s.set_input_registers(2, [1, 2, 3, 4, 5], "h")
s.set_holding_register(1, 1234, "h")
s.set_holding_registers(2, [1, 2, 3, 4, 5], "h")
s.stop()
| 20.772727 | 81 | 0.68709 |
3fe8ccedd5919a259d55f873b8eeacc8ac42d24a | 5,417 | py | Python | cuda/rrnn_semiring.py | Noahs-ARK/rational-recurrences | 3b7ef54520bcaa2b24551cf42a125c9251124229 | [
"MIT"
] | 27 | 2018-09-28T02:17:07.000Z | 2020-10-15T14:57:16.000Z | cuda/rrnn_semiring.py | Noahs-ARK/rational-recurrences | 3b7ef54520bcaa2b24551cf42a125c9251124229 | [
"MIT"
] | 1 | 2021-03-25T22:08:35.000Z | 2021-03-25T22:08:35.000Z | cuda/rrnn_semiring.py | Noahs-ARK/rational-recurrences | 3b7ef54520bcaa2b24551cf42a125c9251124229 | [
"MIT"
] | 5 | 2018-11-06T05:49:51.000Z | 2019-10-26T03:36:43.000Z | RRNN_SEMIRING = """
extern "C" {
__global__ void rrnn_semiring_fwd(
const float * __restrict__ u,
const float * __restrict__ eps,
const float * __restrict__ c1_init,
const float * __restrict__ c2_init,
const int len,
const int batch,
const int dim,
const int k,
float * __restrict__ c1,
float * __restrict__ c2,
int semiring_type) {
assert (k == K);
int ncols = batch*dim;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
const float *up = u + (col*k);
float *c1p = c1 + col;
float *c2p = c2 + col;
float cur_c1 = *(c1_init + col);
float cur_c2 = *(c2_init + col);
const float eps_val = *(eps + (col%dim));
for (int row = 0; row < len; ++row) {
float u1 = *(up);
float u2 = *(up+1);
float forget1 = *(up+2);
float forget2 = *(up+3);
float prev_c1 = cur_c1;
float op1 = times_forward(semiring_type, cur_c1, forget1);
cur_c1 = plus_forward(semiring_type, op1, u1);
float op2 = times_forward(semiring_type, cur_c2, forget2);
float op3_ = plus_forward(semiring_type, eps_val, prev_c1);
float op3 = times_forward(semiring_type, op3_, u2);
cur_c2 = plus_forward(semiring_type, op2, op3);
*c1p = cur_c1;
*c2p = cur_c2;
up += ncols_u;
c1p += ncols;
c2p += ncols;
}
}
__global__ void rrnn_semiring_bwd(
const float * __restrict__ u,
const float * __restrict__ eps,
const float * __restrict__ c1_init,
const float * __restrict__ c2_init,
const float * __restrict__ c1,
const float * __restrict__ c2,
const float * __restrict__ grad_c1,
const float * __restrict__ grad_c2,
const float * __restrict__ grad_last_c1,
const float * __restrict__ grad_last_c2,
const int len,
const int batch,
const int dim,
const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_eps,
float * __restrict__ grad_c1_init,
float * __restrict__ grad_c2_init,
int semiring_type) {
assert (k == K);
int ncols = batch*dim;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
float cur_c1 = *(grad_last_c1 + col);
float cur_c2 = *(grad_last_c2 + col);
const float eps_val = *(eps + (col%dim));
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *c1p = c1 + col + (len-1)*ncols;
const float *c2p = c2 + col + (len-1)*ncols;
const float *gc1p = grad_c1 + col + (len-1)*ncols;
const float *gc2p = grad_c2 + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float geps = 0.f;
for (int row = len-1; row >= 0; --row) {
float u1 = *(up);
float u2 = *(up+1);
float forget1 = *(up+2);
float forget2 = *(up+3);
const float c1_val = *c1p;
const float c2_val = *c2p;
const float prev_c1 = (row>0) ? (*(c1p-ncols)) : (*(c1_init+col));
const float prev_c2 = (row>0) ? (*(c2p-ncols)) : (*(c2_init+col));
const float gc1 = *(gc1p) + cur_c1;
const float gc2 = *(gc2p) + cur_c2;
cur_c1 = cur_c2 = 0.f;
float op1 = times_forward(semiring_type, prev_c1, forget1);
float gop1 = 0.f, gu1 = 0.f;
plus_backward(semiring_type, op1, u1, gc1, gop1, gu1);
float gprev_c1 = 0.f, gprev_c2 = 0.f, gforget1=0.f;
times_backward(semiring_type, prev_c1, forget1, gop1, gprev_c1, gforget1);
*(gup) = gu1;
*(gup+2) = gforget1;
cur_c1 += gprev_c1;
float op2 = times_forward(semiring_type, prev_c2, forget2);
float op3_ = plus_forward(semiring_type, eps_val, prev_c1);
float op3 = times_forward(semiring_type, op3_, u2);
float gop2 = 0.f, gop3 = 0.f;
plus_backward(semiring_type, op2, op3, gc2, gop2, gop3);
float gop3_ = 0.f, gu2 = 0.f, gforget2 = 0.f, cur_geps=0.f;
times_backward(semiring_type, prev_c2, forget2, gop2, gprev_c2, gforget2);
times_backward(semiring_type, op3_, u2, gop3, gop3_, gu2);
plus_backward(semiring_type, eps_val, prev_c1, gop3_, cur_geps, gprev_c1);
*(gup+1) = gu2;
*(gup+3) = gforget2;
geps += cur_geps;
cur_c1 += gprev_c1;
cur_c2 += gprev_c2;
up -= ncols_u;
c1p -= ncols;
c2p -= ncols;
gup -= ncols_u;
gc1p -= ncols;
gc2p -= ncols;
}
*(grad_c1_init + col) = cur_c1;
*(grad_c2_init + col) = cur_c2;
*(grad_eps + col%dim) = geps;
}
}
"""
| 36.601351 | 86 | 0.502677 |
3fe8e4411ff091a355fe9346309f0659c9b08983 | 1,841 | py | Python | tests.py | c-okelly/movie_script_analytics | 6fee40c0378921199ab14ca0b4db447b9f4e7bcf | [
"MIT"
] | 1 | 2017-11-09T13:24:47.000Z | 2017-11-09T13:24:47.000Z | tests.py | c-okelly/movie_script_analytics | 6fee40c0378921199ab14ca0b4db447b9f4e7bcf | [
"MIT"
] | null | null | null | tests.py | c-okelly/movie_script_analytics | 6fee40c0378921199ab14ca0b4db447b9f4e7bcf | [
"MIT"
] | null | null | null | import re
import text_objects
import numpy as np
import pickle
# f = open("Data/scripts_text/17-Again.txt", 'r')
# text = f.read()
# text = text[900:1500]
# print(text)
# count = len(re.findall("\W+",text))
# print(count)
#
# lines = text.split('\n')
# lines_on_empty = re.split("\n\s+\n", text)
# print(len(lines))
# print(len(lines_on_empty))
#
# # Find empty lines
# count = 0
# for item in lines:
# if re.search("\A\s+\Z", item):
# print(count)
# count += 1
#
# # Search for character names in list
# for item in lines:
# if re.search("\A\s*Name_character\s*(\(.*\))?\s*\Z", item):
# print(item)
# # Generate list of characters from the script
# characters = dict()
#
#
# for line in lines:
# #Strip whitespace and check if whole line is in capital letters
# line = line.strip()
# if (line.isupper()):
#
# # Exclude lines with EXT / INT in them
# s1 = re.search('EXT\.', line)
# s2 = re.search('INT\.', line)
#
# # Select correct lines and strip out and elements within parathenses. Normally continued
# if (not(s1 or s2)):
# line = re.sub("\s*\(.*\)","",line)
# # If character no in dict add them. If a already in increase count by 1
# if line in characters:
# characters[line] = characters[line] + 1
# else:
# characters[line] = 1
#
# print(characters)
# Get description lines
if __name__ == '__main__':
#
# string = " -EARLY APRIL, 1841"
# print(re.match("^\s+-(\w+\s{0,3},?/?){0,4}(\s\d{0,5})-\s+",string))
# for i in np.arange(0,1,0.1):
# print(i,"to",i+0.1)
# array= [1,3,5,6,1]
#
# count = 0
var = pickle.load(open("Data/Pickled_objects/400.dat","rb"))
object_1 = var[0]
print(object_1.info_dict)
| 23.303797 | 98 | 0.558935 |
3fea883542666ba0f05267690f8d99f2d06892ea | 1,945 | py | Python | malcolm/modules/demo/parts/countermovepart.py | dinojugosloven/pymalcolm | 0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a | [
"Apache-2.0"
] | null | null | null | malcolm/modules/demo/parts/countermovepart.py | dinojugosloven/pymalcolm | 0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a | [
"Apache-2.0"
] | null | null | null | malcolm/modules/demo/parts/countermovepart.py | dinojugosloven/pymalcolm | 0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a | [
"Apache-2.0"
] | null | null | null | import time
from annotypes import Anno, add_call_types
from malcolm.core import PartRegistrar
from malcolm.modules import builtin
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = builtin.parts.APartName
AMri = builtin.parts.AMri
with Anno("The demand value to move our counter motor to"):
ADemand = float
with Anno("The amount of time to get to the demand position"):
ADuration = float
# How long between ticks of the "motor" position while moving
UPDATE_TICK = 0.1
# We will set these attributes on the child block, so don't save them
| 36.018519 | 77 | 0.679177 |
3fea9db35ea3c9741fed546bd70ab750ac964bbd | 12,740 | py | Python | scripts/run_temporal_averaging.py | alexkaiser/heart_valves | 53f30ec3680503542890a84949b7fb51d1734272 | [
"BSD-3-Clause"
] | null | null | null | scripts/run_temporal_averaging.py | alexkaiser/heart_valves | 53f30ec3680503542890a84949b7fb51d1734272 | [
"BSD-3-Clause"
] | null | null | null | scripts/run_temporal_averaging.py | alexkaiser/heart_valves | 53f30ec3680503542890a84949b7fb51d1734272 | [
"BSD-3-Clause"
] | null | null | null | import pyvista
import os, sys, glob
import subprocess
import math
from natsort import natsorted
import multiprocessing
if __name__ == '__main__':
if len(sys.argv) >= 2:
nprocs_sim = int(sys.argv[1]) # number of procs in the sim, which determines how many files go into the decomposed data
else:
print("using default nprocs_sim = 1")
nprocs_sim = 1
# first make sure there is a times file
if not os.path.isfile('times.txt'):
subprocess.call('visit -cli -nowin -s ~/copies_scripts/write_times_file_visit.py', shell=True)
times = []
times_file = open('times.txt', 'r')
for line in times_file:
times.append(float(line))
eulerian = True
lagrangian = True
first_cycle = True
second_cycle = False
if first_cycle:
cycles_to_output = [0] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_1"
elif second_cycle:
cycles_to_output = [1] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_2"
else:
cycles_to_output = [1,2,3] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_2_3_4"
cycle_duration = 8.3250000000000002e-01
mri_read_times_per_cycle = 10
dt_mri_read = cycle_duration / mri_read_times_per_cycle
output_times_per_cycle = 20
dt_output = cycle_duration / output_times_per_cycle
if not os.path.exists(base_dir):
os.mkdir(base_dir)
if eulerian:
eulerian_var_names = ['P','Omega', 'U']
# output file extension
extension = 'vtu'
suffix = "_averaged"
base_name_out = "eulerian_vars_mri_freq"
# average all the Eulerian files here
# for idx_mri_read in range(mri_read_times_per_cycle):
# average_eulerian_mesh_one_step(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension)
jobs = []
for idx_mri_read in range(mri_read_times_per_cycle):
p = multiprocessing.Process(target=average_eulerian_mesh_one_step, args=(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension))
jobs.append(p)
p.start()
for p in jobs:
p.join()
# for idx_output in range(output_times_per_cycle):
# eulerian_dir_name = base_dir + '/' + 'eulerian_vars' + suffix + str(idx_output).zfill(4)
# if not os.path.exists(eulerian_dir_name):
# os.mkdir(eulerian_dir_name)
# only average cycle 2
# cycles_to_include = [2]
# loops over parallel data structure as outer loop
# for proc_num in range(nprocs_sim):
# read and zero meshes to use to accumulate from first mesh
dir_name = "eulerian_vars" + str(0).zfill(4)
# read all time zero meshes
# meshes_mri_read = []
# n_to_average = []
# for idx_mri_read in range(mri_read_times_per_cycle):
# meshes_mri_read.append(read_distributed_vtr(dir_name))
# n_to_average.append(0)
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] *= 0.0
meshes_mri_read = []
for idx_mri_read in range(mri_read_times_per_cycle):
fname = base_name_out + str(idx_mri_read).zfill(4) + '.' + extension
meshes_mri_read.append( pyvista.read(base_dir + "/" + fname) )
meshes_output = []
for idx_output in range(output_times_per_cycle):
meshes_output.append(read_distributed_vtr(dir_name))
for var_name in eulerian_var_names:
meshes_output[idx_output][var_name] *= 0.0
# # average over times
# for idx, t in enumerate(times):
# # check if time in range
# cycle_num = math.floor(t / cycle_duration)
# # skip cycle one
# if cycle_num in cycles_to_output:
# print("processing step ", idx)
# dir_name = "eulerian_vars" + str(idx).zfill(4)
# # time since start of this cycle
# t_reduced = t % cycle_duration
# idx_mri_read = math.floor(t_reduced / dt_mri_read)
# mesh_tmp = read_distributed_vtr(dir_name)
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] += mesh_tmp[var_name]
# n_to_average[idx_mri_read] += 1.0
# # print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
# print("n_to_average = ", n_to_average)
# # convert sums to averages
# for idx_mri_read in range(mri_read_times_per_cycle):
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] /= float(n_to_average[idx_mri_read])
# linearly interpolate before output
for idx_mri_read in range(mri_read_times_per_cycle):
for var_name in eulerian_var_names:
meshes_output[2*idx_mri_read][var_name] = meshes_mri_read[idx_mri_read][var_name]
for idx_mri_read in range(mri_read_times_per_cycle):
idx_mri_read_next = (idx_mri_read + 1) % mri_read_times_per_cycle
for var_name in eulerian_var_names:
meshes_output[2*idx_mri_read + 1][var_name] = 0.5 * (meshes_mri_read[idx_mri_read][var_name] + meshes_mri_read[idx_mri_read_next][var_name])
for idx_output in range(output_times_per_cycle):
eulerian_dir_name = base_dir
fname = "eulerian_vars" + suffix + str(idx_output).zfill(4) + '.' + extension
meshes_output[idx_output].save(eulerian_dir_name + "/" + fname)
# summary file
nprocs_output = 1
write_pvd("eulerian_vars" + suffix, dt_output, output_times_per_cycle, extension, nprocs_output)
os.rename("eulerian_vars" + suffix + '.pvd', base_dir + "/eulerian_vars" + suffix + '.pvd')
if lagrangian:
suffix = "_averaged"
for lag_file in os.listdir('..'):
if lag_file.endswith('.vertex'):
print("found lag file ", lag_file, ", processing ")
base_name_lag = lag_file.rsplit('.', 1)[0]
print("base_name_lag = ", base_name_lag)
# read and zero meshes to use to accumulate from first mesh
fname = base_name_lag + str(0).zfill(4) + '.vtu'
if not os.path.isfile(fname):
print("vtu file not found, cannot process this file, continuing")
continue
meshes_mri_read = []
n_to_average = []
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_mri_read.append(pyvista.read(fname))
n_to_average.append(0)
meshes_mri_read[idx_mri_read].points *= 0.0
meshes_output = []
for idx_output in range(output_times_per_cycle):
meshes_output.append(pyvista.read(fname))
meshes_output[idx_output].points *= 0.0
# average over times
for idx, t in enumerate(times):
# check if time in range
cycle_num = math.floor(t / cycle_duration)
# skip cycle one
if cycle_num in cycles_to_output:
fname = base_name_lag + str(idx).zfill(4) + '.vtu'
# time since start of this cycle
t_reduced = t % cycle_duration
idx_mri_read = math.floor(t_reduced / dt_mri_read)
mesh_tmp = pyvista.read(fname)
meshes_mri_read[idx_mri_read].points += mesh_tmp.points
n_to_average[idx_mri_read] += 1.0
# print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
print("n_to_average = ", n_to_average)
# convert sums to averages
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_mri_read[idx_mri_read].points /= float(n_to_average[idx_mri_read])
# linearly interpolate before output
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_output[2*idx_mri_read].points = meshes_mri_read[idx_mri_read].points
for idx_mri_read in range(mri_read_times_per_cycle):
idx_mri_read_next = (idx_mri_read + 1) % mri_read_times_per_cycle
meshes_output[2*idx_mri_read + 1].points = 0.5 * (meshes_mri_read[idx_mri_read].points + meshes_mri_read[idx_mri_read_next].points)
for idx_output in range(output_times_per_cycle):
fname = base_name_lag + suffix + str(idx_output).zfill(4) + '.vtu'
meshes_output[idx_output].save(base_dir + "/" + fname)
# os.rename(fname, base_dir + "/" + base_name_lag + suffix + '.pvd')
# summary file
extension = 'vtu'
write_pvd(base_name_lag + suffix, dt_output, output_times_per_cycle, extension, 1)
os.rename(base_name_lag + suffix + '.pvd', base_dir + "/" + base_name_lag + suffix + '.pvd')
| 33.882979 | 209 | 0.586499 |
3fec010889ccbdbd07b4bb7fe68a11cde75d9565 | 3,641 | py | Python | server.py | MVHSiot/yelperhelper | a94dc9e80e301241da58b678770338e3fa9b642e | [
"MIT"
] | null | null | null | server.py | MVHSiot/yelperhelper | a94dc9e80e301241da58b678770338e3fa9b642e | [
"MIT"
] | null | null | null | server.py | MVHSiot/yelperhelper | a94dc9e80e301241da58b678770338e3fa9b642e | [
"MIT"
] | null | null | null | import sys
try:
sys.path.append('/opt/python3/lib/python3.4/site-packages')
except:
pass
import yelp_api
import pickle
import calc
pub_key = 'pub-c-2c436bc0-666e-4975-baaf-63f16a61558d'
sub_key = 'sub-c-0442432a-3312-11e7-bae3-02ee2ddab7fe'
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
pnconfig = PNConfiguration()
pnconfig.subscribe_key = sub_key
pnconfig.publish_key = pub_key
pubnub = PubNub(pnconfig)
pubnub.add_listener(subscribeCallback())
pubnub.subscribe().channels('secondary_channel').execute()
while True:
pass
| 44.402439 | 198 | 0.611096 |
3fed58a2f0d55e3c995e8a4ab026bd1e2fa3c343 | 59 | py | Python | gmaploader/__init__.py | cormac-rynne/gmaploader | eec679af9a5d36b691bde05ffd6043bfef7e1acf | [
"MIT"
] | 2 | 2022-02-02T16:41:17.000Z | 2022-03-16T08:43:18.000Z | gmaploader/__init__.py | cormac-rynne/gmaploader | eec679af9a5d36b691bde05ffd6043bfef7e1acf | [
"MIT"
] | null | null | null | gmaploader/__init__.py | cormac-rynne/gmaploader | eec679af9a5d36b691bde05ffd6043bfef7e1acf | [
"MIT"
] | null | null | null | __version__ = '0.1.1'
from .gmaploader import GMapLoader
| 11.8 | 34 | 0.745763 |
3feef5a3e0cc27bf16fbab36a842bb9bb4ecc2cd | 643 | py | Python | machina/templatetags/forum_tracking_tags.py | jujinesy/initdjango-machina | 93c24877f546521867b3ef77fa278237af932d42 | [
"BSD-3-Clause"
] | 1 | 2021-10-08T03:31:24.000Z | 2021-10-08T03:31:24.000Z | machina/templatetags/forum_tracking_tags.py | jujinesy/initdjango-machina | 93c24877f546521867b3ef77fa278237af932d42 | [
"BSD-3-Clause"
] | 7 | 2020-02-12T01:11:13.000Z | 2022-03-11T23:26:32.000Z | machina/templatetags/forum_tracking_tags.py | jujinesy/initdjango-machina | 93c24877f546521867b3ef77fa278237af932d42 | [
"BSD-3-Clause"
] | 1 | 2019-04-20T05:26:27.000Z | 2019-04-20T05:26:27.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from machina.core.loading import get_class
TrackingHandler = get_class('forum_tracking.handler', 'TrackingHandler')
register = template.Library()
| 24.730769 | 91 | 0.738725 |
3fef44aadd222f045efc994567ce2c00bef12f97 | 1,194 | py | Python | xmodaler/modeling/layers/attention_pooler.py | cclauss/xmodaler | 1368fba6c550e97008628edbf01b59a0a6c8fde5 | [
"Apache-2.0"
] | 830 | 2021-06-26T07:16:33.000Z | 2022-03-25T10:31:32.000Z | xmodaler/modeling/layers/attention_pooler.py | kevinjunwei/xmodaler | 3e128a816876988c5fb07d842fde4a140e699dde | [
"Apache-2.0"
] | 28 | 2021-08-19T12:39:02.000Z | 2022-03-14T13:04:19.000Z | xmodaler/modeling/layers/attention_pooler.py | kevinjunwei/xmodaler | 3e128a816876988c5fb07d842fde4a140e699dde | [
"Apache-2.0"
] | 85 | 2021-08-15T06:58:29.000Z | 2022-02-19T07:30:56.000Z | # Copyright 2021 JD.com, Inc., JD AI
"""
@author: Yehao Li
@contact: yehaoli.sysu@gmail.com
"""
import torch
import torch.nn as nn
__all__ = ["AttentionPooler"] | 29.85 | 68 | 0.593802 |
3fefc1a6bf75d8c0151f7c8fa8710346285e3ae9 | 281 | py | Python | aas_core_meta/__init__.py | aas-core-works/aas-core3-meta | 88b618c82f78392a47ee58cf2657ae6df8e5a418 | [
"MIT"
] | null | null | null | aas_core_meta/__init__.py | aas-core-works/aas-core3-meta | 88b618c82f78392a47ee58cf2657ae6df8e5a418 | [
"MIT"
] | null | null | null | aas_core_meta/__init__.py | aas-core-works/aas-core3-meta | 88b618c82f78392a47ee58cf2657ae6df8e5a418 | [
"MIT"
] | null | null | null | """Provide meta-models for Asset Administration Shell information model."""
__version__ = "2021.11.20a2"
__author__ = (
"Nico Braunisch, Marko Ristin, Robert Lehmann, Marcin Sadurski, Manuel Sauer"
)
__license__ = "License :: OSI Approved :: MIT License"
__status__ = "Alpha"
| 31.222222 | 81 | 0.736655 |
3ff189fdd25a003504ca018c6776d007950e9fc2 | 2,937 | py | Python | arxivmail/web.py | dfm/ArXivMailer | f217466b83ae3009330683d1c53ba5a44b4bab29 | [
"MIT"
] | 1 | 2020-09-15T11:59:44.000Z | 2020-09-15T11:59:44.000Z | arxivmail/web.py | dfm/ArXivMailer | f217466b83ae3009330683d1c53ba5a44b4bab29 | [
"MIT"
] | null | null | null | arxivmail/web.py | dfm/ArXivMailer | f217466b83ae3009330683d1c53ba5a44b4bab29 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import flask
from .mail import send_email
from .models import db, Subscriber, Category
__all__ = ["web"]
web = flask.Blueprint("web", __name__)
| 34.964286 | 76 | 0.606742 |
3ff244c8c0c0b1265e61249a530b3e42331c5fc4 | 13,794 | py | Python | qiskit/pulse/timeslots.py | lerongil/qiskit-terra | a25af2a2378bc3d4f5ec73b948d048d1b707454c | [
"Apache-2.0"
] | 3 | 2019-11-20T08:15:28.000Z | 2020-11-01T15:32:57.000Z | qiskit/pulse/timeslots.py | lerongil/qiskit-terra | a25af2a2378bc3d4f5ec73b948d048d1b707454c | [
"Apache-2.0"
] | null | null | null | qiskit/pulse/timeslots.py | lerongil/qiskit-terra | a25af2a2378bc3d4f5ec73b948d048d1b707454c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Timeslots for channels.
"""
from collections import defaultdict
import itertools
from typing import Tuple, Union, Optional
from .channels import Channel
from .exceptions import PulseError
# pylint: disable=missing-return-doc
def shift(self, time: int) -> 'Interval':
"""Return a new interval shifted by `time` from self
Args:
time: time to be shifted
Returns:
Interval: interval shifted by `time`
"""
return Interval(self.start + time, self.stop + time)
def __eq__(self, other):
"""Two intervals are the same if they have the same starting and stopping values.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
"""
return self.start == other.start and self.stop == other.stop
def stops_before(self, other):
"""Whether intervals stops at value less than or equal to the
other interval's starting time.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
"""
return self.stop <= other.start
def starts_after(self, other):
"""Whether intervals starts at value greater than or equal to the
other interval's stopping time.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
"""
return self.start >= other.stop
def __repr__(self):
"""Return a readable representation of Interval Object"""
return "{}({}, {})".format(self.__class__.__name__, self.start, self.stop)
class Timeslot:
"""Named tuple of (Interval, Channel)."""
def shift(self, time: int) -> 'Timeslot':
"""Return a new Timeslot shifted by `time`.
Args:
time: time to be shifted
"""
return Timeslot(self.interval.shift(time), self.channel)
def has_overlap(self, other: 'Timeslot') -> bool:
"""Check if self has overlap with `interval`.
Args:
other: Other Timeslot to check for overlap with
Returns:
bool: True if intervals overlap and are on the same channel
"""
return self.interval.has_overlap(other) and self.channel == other.channel
def __eq__(self, other) -> bool:
"""Two time-slots are the same if they have the same interval and channel.
Args:
other (Timeslot): other Timeslot
"""
return self.interval == other.interval and self.channel == other.channel
def __repr__(self):
"""Return a readable representation of Timeslot Object"""
return "{}({}, {})".format(self.__class__.__name__,
self.channel,
(self.interval.start, self.interval.stop))
class TimeslotCollection:
"""Collection of `Timeslot`s."""
def __init__(self, *timeslots: Union[Timeslot, 'TimeslotCollection']):
"""Create a new time-slot collection.
Args:
*timeslots: list of time slots
Raises:
PulseError: when overlapped time slots are specified
"""
self._table = defaultdict(list)
for timeslot in timeslots:
if isinstance(timeslot, TimeslotCollection):
self._merge_timeslot_collection(timeslot)
else:
self._merge_timeslot(timeslot)
def _merge_timeslot_collection(self, other: 'TimeslotCollection'):
"""Mutably merge timeslot collections into this TimeslotCollection.
Args:
other: TimeSlotCollection to merge
"""
for channel, other_ch_timeslots in other._table.items():
if channel not in self._table:
self._table[channel] += other_ch_timeslots # extend to copy items
else:
# if channel is in self there might be an overlap
for idx, other_ch_timeslot in enumerate(other_ch_timeslots):
insert_idx = self._merge_timeslot(other_ch_timeslot)
if insert_idx == len(self._table[channel]) - 1:
# Timeslot was inserted at end of list. The rest can be appended.
self._table[channel] += other_ch_timeslots[idx + 1:]
break
def _merge_timeslot(self, timeslot: Timeslot) -> int:
"""Mutably merge timeslots into this TimeslotCollection.
Note timeslots are sorted internally on their respective channel
Args:
timeslot: Timeslot to merge
Returns:
int: Return the index in which timeslot was inserted
Raises:
PulseError: If timeslots overlap
"""
interval = timeslot.interval
ch_timeslots = self._table[timeslot.channel]
insert_idx = len(ch_timeslots)
# merge timeslots by insertion sort.
# Worst case O(n_channels), O(1) for append
# could be improved by implementing an interval tree
for ch_timeslot in reversed(ch_timeslots):
ch_interval = ch_timeslot.interval
if interval.start >= ch_interval.stop:
break
elif interval.has_overlap(ch_interval):
raise PulseError("Timeslot: {0} overlaps with existing"
"Timeslot: {1}".format(timeslot, ch_timeslot))
insert_idx -= 1
ch_timeslots.insert(insert_idx, timeslot)
return insert_idx
def ch_timeslots(self, channel: Channel) -> Tuple[Timeslot]:
"""Sorted tuple of `Timeslot`s for channel in this TimeslotCollection."""
if channel in self._table:
return tuple(self._table[channel])
return tuple()
def ch_start_time(self, *channels: Channel) -> int:
"""Return earliest start time in this collection.
Args:
*channels: Channels over which to obtain start_time.
"""
timeslots = list(itertools.chain(*(self._table[chan] for chan in channels
if chan in self._table)))
if timeslots:
return min(timeslot.start for timeslot in timeslots)
return 0
def ch_stop_time(self, *channels: Channel) -> int:
"""Return maximum time of timeslots over all channels.
Args:
*channels: Channels over which to obtain stop time.
"""
timeslots = list(itertools.chain(*(self._table[chan] for chan in channels
if chan in self._table)))
if timeslots:
return max(timeslot.stop for timeslot in timeslots)
return 0
def ch_duration(self, *channels: Channel) -> int:
"""Return maximum duration of timeslots over all channels.
Args:
*channels: Channels over which to obtain the duration.
"""
return self.ch_stop_time(*channels)
def is_mergeable_with(self, other: 'TimeslotCollection') -> bool:
"""Return if self is mergeable with `timeslots`.
Args:
other: TimeslotCollection to be checked for mergeability
"""
common_channels = set(self.channels) & set(other.channels)
for channel in common_channels:
ch_timeslots = self.ch_timeslots(channel)
other_ch_timeslots = other.ch_timeslots(channel)
if ch_timeslots[-1].stop < other_ch_timeslots[0].start:
continue # We are appending along this channel
i = 0 # iterate through this
j = 0 # iterate through other
while i < len(ch_timeslots) and j < len(other_ch_timeslots):
if ch_timeslots[i].interval.has_overlap(other_ch_timeslots[j].interval):
return False
if ch_timeslots[i].stop <= other_ch_timeslots[j].start:
i += 1
else:
j += 1
return True
def merge(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':
"""Return a new TimeslotCollection with `timeslots` merged into it.
Args:
timeslots: TimeslotCollection to be merged
"""
return TimeslotCollection(self, timeslots)
def shift(self, time: int) -> 'TimeslotCollection':
"""Return a new TimeslotCollection shifted by `time`.
Args:
time: time to be shifted by
"""
slots = [slot.shift(time) for slot in self.timeslots]
return TimeslotCollection(*slots)
def complement(self, stop_time: Optional[int] = None) -> 'TimeslotCollection':
"""Return a complement TimeSlotCollection containing all unoccupied Timeslots
within this TimeSlotCollection.
Args:
stop_time: Final time too which complement Timeslot's will be returned.
If not set, defaults to last time in this TimeSlotCollection
"""
timeslots = []
stop_time = stop_time or self.stop_time
for channel in self.channels:
curr_time = 0
for timeslot in self.ch_timeslots(channel):
next_time = timeslot.interval.start
if next_time-curr_time > 0:
timeslots.append(Timeslot(Interval(curr_time, next_time), channel))
curr_time = timeslot.interval.stop
# pad out channel to stop_time
if stop_time-curr_time > 0:
timeslots.append(Timeslot(Interval(curr_time, stop_time), channel))
return TimeslotCollection(*timeslots)
def __eq__(self, other) -> bool:
"""Two time-slot collections are the same if they have the same time-slots.
Args:
other (TimeslotCollection): other TimeslotCollection
"""
if set(self.channels) != set(other.channels):
return False
for channel in self.channels:
if self.ch_timeslots(channel) != self.ch_timeslots(channel):
return False
return True
def __repr__(self):
"""Return a readable representation of TimeslotCollection Object"""
rep = dict()
for key, val in self._table.items():
rep[key] = [(timeslot.start, timeslot.stop) for timeslot in val]
return self.__class__.__name__ + str(rep)
| 32.456471 | 89 | 0.603741 |
3ff2f2040265231a2d5824e04f8c8d39faec1ec0 | 22,499 | py | Python | core/assembler.py | iahuang/scratch-gcc | bc4989f3dc54f0cdc3098f66078d17750c111bec | [
"MIT"
] | null | null | null | core/assembler.py | iahuang/scratch-gcc | bc4989f3dc54f0cdc3098f66078d17750c111bec | [
"MIT"
] | null | null | null | core/assembler.py | iahuang/scratch-gcc | bc4989f3dc54f0cdc3098f66078d17750c111bec | [
"MIT"
] | null | null | null | """ A basic two-pass MIPS assembler. Outputs a binary file in a custom format that can then be loaded into Scratch """
import struct
import re
import json
import os
"""
Diagram of the Scratch MIPS VM memory space
+--------------------- <- 0x0000000
| i/o space (see below)
+--------------------- <- 0x0000100
| data segment
+---------------------
| program
|
+--------------------- <- everything from here up ^^^ is included in the scratch binary file
|
| stack ^^^^
+--------------------- <- stack_pointer
| uninitialized/heap
|
|
+--------------------- <- mem_end
Static memory segment for interfacing with the Scratch VM (256 bytes wide)
Definitions for interfacing with this part of memory can be found in "lib/sys.h"
io {
0x00000 char stdout_buffer - write to this address to print to the "console"
0x00004 uint32 mem_end - pointer to the last address in memory
0x00008 uint32 stack_start - pointer to the bottom of the stack
0x0000C uint8 halt - set this byte to halt execution of the program
for whatever reason
}
...
Scratch executable binary format (the file outputted by Assembly.outputBinaryFile() )
header (100 bytes) {
char[4] identifier - set to "SBIN"
uint32 program_counter - the location in memory to begin execution
uint32 stack_pointer - initial location of the stack pointer
uint32 alloc_size - total amount of system memory to allocate
}
vvvv to be loaded in starting at address 0x00000000
program_data (n bytes) {
byte[256] - i/o segment data (zero initialized)
byte[n] - program data
}
"""
a
| 34.089394 | 126 | 0.586382 |
3ff3b22779c14ce17a4d6563f15286360782e0ac | 3,237 | py | Python | qvdfile/tests/test_qvdfile.py | cosmocracy/qvdfile | c1f92ec153c07f607fd57c6f6679e3c7269d643e | [
"Apache-2.0"
] | 17 | 2019-07-18T12:50:33.000Z | 2021-05-25T06:26:45.000Z | qvdfile/tests/test_qvdfile.py | cosmocracy/qvdfile | c1f92ec153c07f607fd57c6f6679e3c7269d643e | [
"Apache-2.0"
] | 2 | 2021-05-15T03:53:08.000Z | 2021-07-22T14:31:15.000Z | qvdfile/tests/test_qvdfile.py | cosmocracy/qvdfile | c1f92ec153c07f607fd57c6f6679e3c7269d643e | [
"Apache-2.0"
] | 5 | 2019-07-18T12:55:31.000Z | 2021-12-21T15:09:37.000Z | import pytest
import errno
import os
import glob
import shutil
import xml.etree.ElementTree as ET
from qvdfile.qvdfile import QvdFile, BadFormat
# READING QVD ==================================================================
# init
def test_init_smoke(qvd):
# metadata is in attribs
assert "TableName" in qvd.attribs.keys()
assert qvd.attribs["TableName"] == "tab1"
# fields info is in fields
assert len(qvd.fields) == 3
assert "ID" in [ f["FieldName"] for f in qvd.fields ]
def test_init_no_file():
with pytest.raises(FileNotFoundError):
qvd = QvdFile("data/no_such_file.qvd")
def test_init_not_qvd_or_bad_file():
with pytest.raises(BadFormat):
qvd = QvdFile(__file__)
# getFieldVal
# fieldsInRow
# createMask
# getRow
# WRITING QVD ===================================================================
# code and tests will follow....
| 22.957447 | 81 | 0.611369 |
3ff5387e0936b375509e91f2742e4bc5ae6feee1 | 4,221 | py | Python | app/__init__.py | i2nes/app-engine-blog | 94cdc25674c946ad643f7f140cbedf095773de3f | [
"MIT"
] | null | null | null | app/__init__.py | i2nes/app-engine-blog | 94cdc25674c946ad643f7f140cbedf095773de3f | [
"MIT"
] | null | null | null | app/__init__.py | i2nes/app-engine-blog | 94cdc25674c946ad643f7f140cbedf095773de3f | [
"MIT"
] | null | null | null | from flask import Flask
from app.models import Article, Feature
import logging
def create_app(config, blog_config):
"""This initiates the Flask app and starts your app engine instance.
Startup Steps:
1. Instantiate the Flask app with the config settings.
2. Register bluprints.
3. Create the Contact and About Pages in the datastore if they don't exist yet.
4. Load the blog_config settings from the datatstore. Or add them if they don't exist yet.
"""
logging.info('STARTUP: Getting ready to launch the Flask App')
app = Flask(__name__)
app.config.update(config)
# Register blueprints
logging.info('STARTUP: Register Blueprints')
from .main import app as main_blueprint
app.register_blueprint(main_blueprint, url_prefix='/')
from .editor import app as editor_blueprint
app.register_blueprint(editor_blueprint, url_prefix='/editor')
# Add Contact and About pages to the datastore when first launching the blog
logging.info('STARTUP: Set up Contact and About pages')
# Contact page creation
query = Article.query(Article.slug == 'contact-page')
result = query.fetch(1)
if result:
logging.info('STARTUP: Contact page exists')
else:
logging.info('STARTUP: Creating a contact page')
contact_page = Article()
contact_page.title1 = 'Contact Me'
contact_page.title2 = 'Have questions? I have answers (maybe).'
contact_page.slug = 'contact-page'
contact_page.author = ''
contact_page.content = 'Want to get in touch with me? Fill out the form below to send me a message and I ' \
'will try to get back to you within 24 hours! '
contact_page.published = False
contact_page.put()
# About page creation
query = Article.query(Article.slug == 'about-page')
result = query.fetch(1)
if result:
logging.info('STARTUP: About page exists')
else:
logging.info('STARTUP: Creating an about page')
about_page = Article()
about_page.title1 = 'About Me'
about_page.title2 = 'This is what I do.'
about_page.slug = 'about-page'
about_page.author = ''
about_page.content = ''
about_page.published = False
about_page.put()
# Register blog configurations
# The Blog is initially configured with blog_conf settings
# The settings are added to the datastore and will take precedence from now on
# You can change the settings in the datastore.
# The settings are only updated on Startup, so you need to restart the instances to apply changes.
logging.info('STARTUP: Register Blog Configurations')
query = Feature.query()
for feature in blog_config:
# TODO: Add the accesslist to the datastore. The access list is still read only from the config file.
if feature == 'EDITOR_ACCESS_LIST':
pass
# TODO: The posts limit is an int and needs to be converted. Find a better way of doing this.
elif feature == 'POSTS_LIST_LIMIT':
result = query.filter(Feature.title == feature).fetch()
if result:
logging.info('STARTUP: Loading {}'.format(result[0].title))
blog_config['POSTS_LIST_LIMIT'] = int(result[0].value)
else:
logging.info('STARTUP: Adding to datastore: {}'.format(feature))
f = Feature()
f.title = feature
f.value = str(blog_config[feature])
f.put()
# Load the configs or add them to the datastore if they don't exist yet
else:
result = query.filter(Feature.title == feature).fetch()
if result:
logging.info('STARTUP: Loading {}'.format(result[0].title))
blog_config[result[0].title] = result[0].value
else:
logging.info('STARTUP: Adding to datastore: {}'.format(feature))
f = Feature()
f.title = feature
f.value = blog_config[feature]
f.put()
# Startup complete
logging.info('STARTUP: READY TO ROCK!!!')
return app
| 33.768 | 116 | 0.630419 |
3ff664299cdf95218a7f9411379521d7b5cdbaa4 | 430 | py | Python | libs/msfpayload.py | darkoperator/SideStep | 2c75af77ee2241595de4c65d7e4f8342dcc0bb50 | [
"BSL-1.0"
] | 3 | 2015-09-16T16:09:14.000Z | 2017-01-14T21:53:08.000Z | libs/msfpayload.py | darkoperator/SideStep | 2c75af77ee2241595de4c65d7e4f8342dcc0bb50 | [
"BSL-1.0"
] | null | null | null | libs/msfpayload.py | darkoperator/SideStep | 2c75af77ee2241595de4c65d7e4f8342dcc0bb50 | [
"BSL-1.0"
] | 2 | 2016-04-22T04:44:50.000Z | 2021-12-18T15:12:22.000Z | """
Generates the Meterpreter payload from msfvenom
"""
import subprocess | 53.75 | 275 | 0.727907 |
3ff6b1161dba69f783ae2e124e780852ea91eaaa | 9,689 | py | Python | RevitPythonShell_Scripts/GoogleTools.extension/GoogleTools.tab/Ontologies.Panel/BOS_SetValues.pushbutton/script.py | arupiot/create_revit_families | 9beab3c7e242426b2dca99ca5477fdb433e39db2 | [
"MIT"
] | 1 | 2021-02-04T18:20:58.000Z | 2021-02-04T18:20:58.000Z | RevitPythonShell_Scripts/GoogleTools.extension/GoogleTools.tab/Ontologies.Panel/BOS_SetValues.pushbutton/script.py | arupiot/DBOTools | 9beab3c7e242426b2dca99ca5477fdb433e39db2 | [
"MIT"
] | null | null | null | RevitPythonShell_Scripts/GoogleTools.extension/GoogleTools.tab/Ontologies.Panel/BOS_SetValues.pushbutton/script.py | arupiot/DBOTools | 9beab3c7e242426b2dca99ca5477fdb433e39db2 | [
"MIT"
] | null | null | null | # Select an element
# Open yaml file with entity types
# If parameters are already present, set values according to yaml input
import sys
import clr
import System
import rpw
import yaml
import pprint
from System.Collections.Generic import *
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
from rpw.ui.forms import *
from Autodesk.Revit.UI.Selection import ObjectType
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
app = doc.Application
pp = pprint.PrettyPrinter(indent=1)
shared_param_file = app.OpenSharedParameterFile()
selection = [doc.GetElement(element_Id) for element_Id in uidoc.Selection.GetElementIds()]
def parameterName2ExternalDefinition(sharedParamFile, definitionName):
"""
Given the name of a parameter, return the definition from the shared parameter file
"""
externalDefinition = None
for group in sharedParamFile.Groups:
for definition in group.Definitions:
if definition.Name == definitionName:
externalDefinition = definition
return externalDefinition
family_instances = []
not_family_instances = []
print("Selected {} items".format(len(selection)))
for item in selection:
if type(item).__name__ == "FamilyInstance":
family_instances.append(item)
else:
not_family_instances.append(item)
print("The following elements are family instances and will receive the parameter values from the ontology:")
if family_instances == []:
print("None")
else:
print([item.Id.ToString() for item in family_instances])
print("The following elements are not family instances and will be dropped from the selection:")
if not_family_instances == []:
print("None")
else:
print([item.Id.ToString() for item in not_family_instances])
yaml_path = select_file("Yaml File (*.yaml)|*.yaml", "Select the yaml file with the parameters", multiple = False, restore_directory = True)
if yaml_path:
with open(yaml_path, "r") as stream:
ontology_yaml = yaml.safe_load(stream)
file_name_split = yaml_path.split("\\")
file_name_with_ext = file_name_split[-1]
file_name_with_ext_split = file_name_with_ext.split(".")
group_name = file_name_with_ext_split[0]
canonical_types = dict(filter(lambda elem : elem[1].get("is_canonical") == True, ontology_yaml.items()))
parameter_names = []
for canonical_type in canonical_types.items():
implements_params = canonical_type[1]["implements"]
for implement_param in implements_params:
parameter_names.append(implement_param)
parameter_names = list(dict.fromkeys(parameter_names))
param_names_with_prefix = []
for pn in parameter_names:
param_name_with_prefix = "Implements_" + pn
param_names_with_prefix.append(param_name_with_prefix)
param_names_with_prefix.append("Entity_Type")
#print(param_names_with_prefix)
# Check if item has the parameters:
print("Checking if family instances have the required parameters...")
for family_instance in family_instances:
all_params = family_instance.Parameters
all_params_names = [param.Definition.Name for param in all_params]
#pp.pprint(all_params_names)
missing_params = []
for param_name in param_names_with_prefix:
if param_name in all_params_names:
pass
else:
missing_params.append(param_name)
if missing_params == []:
print("Family instance {} has all required parameters.".format(family_instance.Id.ToString()))
else:
print("Family instance {} is missing the following parameters".format(family_instance.Id))
pp.pprint(missing_params)
family_instances.remove(family_instance)
print("Family instance {} removed from the list of objects to modify")
# ADD SELECTION OF TYPE THROUGH MENU
print("Please select an entity type from the yaml ontology...")
form_title = "Select an entity type:"
canonical_types = dict(filter(lambda elem : elem[1].get("is_canonical") == True, ontology_yaml.items()))
options = canonical_types.keys()
entity_type_name = rpw.ui.forms.SelectFromList(form_title,options,description=None,sort=True,exit_on_close=True)
entity_type_dict = (dict(filter(lambda elem: elem [0] == entity_type_name, canonical_types.items())))
print("Printing selected entity type:")
pp.pprint(entity_type_dict)
implements = entity_type_dict[entity_type_name]["implements"]
params_to_edit_names = []
for i in implements:
params_to_edit_names.append("Implements_"+i)
print(params_to_edit_names)
print("The following instances will be modified according to Entity Type: {}".format(entity_type_name))
pp.pprint(family_instances)
warnings = []
t = Transaction(doc, "Populate BOS parameters")
t.Start()
for family_instance in family_instances:
print("Editing family instance {}...".format(family_instance.Id.ToString()))
# MODIFY ENTITY TYPE
try:
p_entity_type = family_instance.LookupParameter("Entity_Type")
p_entity_type.Set(entity_type_name)
print("Entity_Type parameter successfully edited for family instance {}.".format(family_instance.Id.ToString()))
except:
message = "Couldn't edit parameter Entity_Type for family instance {}.".format(family_instance.Id.ToString())
warnings.append(message)
# MODIFY YESNO PARAMETERS
all_implements_params = []
for p in family_instance.Parameters:
if "Implements_" in p.Definition.Name:
all_implements_params.append(p)
for p in all_implements_params:
try:
if p.Definition.Name in params_to_edit_names:
p.Set(True)
else:
p.Set(False)
print("{} parameter successfully edited for family instance {}.".format(p.Definition.Name, family_instance.Id.ToString()))
except:
message = "Couldn't edit parameter {} for family instance {}.".format(p.Definition.Name, family_instance.Id.ToString())
warnings.append(message)
t.Commit()
print("Script has ended")
if warnings == []:
print("Warnings: None")
else:
print("Warnings:")
for w in warnings:
print(w)
| 38.601594 | 158 | 0.680462 |
3ff6bad744395c2228278988f9b9886b23c17ebf | 8,110 | py | Python | Code/src/models/optim/SimCLR_trainer.py | antoine-spahr/X-ray-Anomaly-Detection | 850b6195d6290a50eee865b4d5a66f5db5260e8f | [
"MIT"
] | 2 | 2020-10-12T08:25:13.000Z | 2021-08-16T08:43:43.000Z | Code/src/models/optim/SimCLR_trainer.py | antoine-spahr/X-ray-Anomaly-Detection | 850b6195d6290a50eee865b4d5a66f5db5260e8f | [
"MIT"
] | null | null | null | Code/src/models/optim/SimCLR_trainer.py | antoine-spahr/X-ray-Anomaly-Detection | 850b6195d6290a50eee865b4d5a66f5db5260e8f | [
"MIT"
] | 1 | 2020-06-17T07:40:17.000Z | 2020-06-17T07:40:17.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.manifold import TSNE
from src.models.optim.CustomLosses import NT_Xent_loss, SupervisedContrastiveLoss
from src.utils.utils import print_progessbar
| 38.990385 | 181 | 0.561159 |
3ff6e816cd8b898e3be215d0d77841e6ad25c848 | 543 | py | Python | patients/migrations/0008_alter_patient_age.py | Curewell-Homeo-Clinic/admin-system | c8ce56a2bdbccfe1e6bec09068932f1943498b9f | [
"MIT"
] | 1 | 2021-11-29T15:24:41.000Z | 2021-11-29T15:24:41.000Z | patients/migrations/0008_alter_patient_age.py | Curewell-Homeo-Clinic/admin-system | c8ce56a2bdbccfe1e6bec09068932f1943498b9f | [
"MIT"
] | 46 | 2021-11-29T16:05:55.000Z | 2022-03-01T13:04:45.000Z | patients/migrations/0008_alter_patient_age.py | Curewell-Homeo-Clinic/admin-system | c8ce56a2bdbccfe1e6bec09068932f1943498b9f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-20 16:13
import django.core.validators
from django.db import migrations, models
| 27.15 | 179 | 0.67035 |
3ff70f0f8e53ee1c511ea409b894a75564f6138d | 4,348 | py | Python | kitsune/questions/tests/test_utils.py | AndrewDVXI/kitsune | 84bd4fa60346681c3fc5a03b0b1540fd1335cee2 | [
"BSD-3-Clause"
] | 1 | 2021-07-18T00:41:16.000Z | 2021-07-18T00:41:16.000Z | kitsune/questions/tests/test_utils.py | AndrewDVXI/kitsune | 84bd4fa60346681c3fc5a03b0b1540fd1335cee2 | [
"BSD-3-Clause"
] | 9 | 2021-04-08T22:05:53.000Z | 2022-03-12T00:54:11.000Z | kitsune/questions/tests/test_utils.py | AndrewDVXI/kitsune | 84bd4fa60346681c3fc5a03b0b1540fd1335cee2 | [
"BSD-3-Clause"
] | 1 | 2020-07-28T15:53:02.000Z | 2020-07-28T15:53:02.000Z | from kitsune.questions.models import Answer, Question
from kitsune.questions.tests import AnswerFactory, QuestionFactory
from kitsune.questions.utils import (
get_mobile_product_from_ua,
mark_content_as_spam,
num_answers,
num_questions,
num_solutions,
)
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import UserFactory
from nose.tools import eq_
from parameterized import parameterized
| 34.507937 | 207 | 0.606486 |
3ff942a422edefd4743417af8a01150a5a71f98a | 10,122 | py | Python | scripts/create_fluseverity_figs_v2/export_zOR_classif_swap.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | 3 | 2018-03-29T23:02:43.000Z | 2020-08-10T12:01:50.000Z | scripts/create_fluseverity_figs_v2/export_zOR_classif_swap.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | scripts/create_fluseverity_figs_v2/export_zOR_classif_swap.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/14/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
# 10/14/14 swap OR age groups
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif_swap.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions_v2 as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: region-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: state-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s_swap.csv' %(nw)
print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s_swap.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg_swap.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst_swap.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4) | 59.893491 | 206 | 0.742936 |
3ff99e7156481e3c6520089236ad30d435cc64ca | 3,346 | py | Python | sonar/endpoints.py | sharm294/sonar | 99de16dd16d0aa77734584e67263c78a37abef86 | [
"MIT"
] | 5 | 2018-11-21T02:33:38.000Z | 2020-10-30T12:22:05.000Z | sonar/endpoints.py | sharm294/sonar | 99de16dd16d0aa77734584e67263c78a37abef86 | [
"MIT"
] | 2 | 2018-12-28T18:31:45.000Z | 2020-06-12T19:24:57.000Z | sonar/endpoints.py | sharm294/sonar | 99de16dd16d0aa77734584e67263c78a37abef86 | [
"MIT"
] | 1 | 2019-03-10T13:48:50.000Z | 2019-03-10T13:48:50.000Z | """
Signal endpoints that can be used in testbenches
"""
import textwrap
from typing import Dict
import sonar.base_types as base
| 22.456376 | 104 | 0.545129 |
3ff9c9e147dda16eeaf022e601e081b35faea86c | 15,400 | py | Python | minemeld/ft/condition/BoolExprParser.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 1 | 2021-01-02T07:25:04.000Z | 2021-01-02T07:25:04.000Z | minemeld/ft/condition/BoolExprParser.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | null | null | null | minemeld/ft/condition/BoolExprParser.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 1 | 2019-03-14T06:52:52.000Z | 2019-03-14T06:52:52.000Z | # Generated from BoolExpr.g4 by ANTLR 4.5.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
# flake8: noqa
| 33.04721 | 241 | 0.586104 |
3ffb7c0442cbda7e7c873ec775ef33cdb0c000d2 | 398 | py | Python | nodes/networkedSingleStepper/temporaryURLNode.py | imoyer/pygestalt | d332df64264cce4a2bec8a73d698c386f1eaca7b | [
"MIT"
] | 1 | 2017-07-03T08:34:39.000Z | 2017-07-03T08:34:39.000Z | nodes/networkedSingleStepper/temporaryURLNode.py | imoyer/pygestalt | d332df64264cce4a2bec8a73d698c386f1eaca7b | [
"MIT"
] | 3 | 2015-12-04T23:14:50.000Z | 2016-11-08T16:24:32.000Z | nodes/networkedSingleStepper/temporaryURLNode.py | imnp/pygestalt | d332df64264cce4a2bec8a73d698c386f1eaca7b | [
"MIT"
] | 1 | 2017-09-13T00:17:39.000Z | 2017-09-13T00:17:39.000Z | <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>404 Not Found</title>
</head><body>
<h1>Not Found</h1>
<p>The requested URL /vn/testNode.py was not found on this server.</p>
<p>Additionally, a 404 Not Found
error was encountered while trying to use an ErrorDocument to handle the request.</p>
<hr>
<address>Apache Server at www.pygestalt.org Port 80</address>
</body></html>
| 33.166667 | 85 | 0.718593 |
3ffbaac7ded264cd662d18071c85e8138b2662eb | 4,761 | py | Python | cppwg/writers/header_collection_writer.py | josephsnyder/cppwg | 265117455ed57eb250643a28ea6029c2bccf3ab3 | [
"MIT"
] | 21 | 2017-10-03T14:29:36.000Z | 2021-12-07T08:54:43.000Z | cppwg/writers/header_collection_writer.py | josephsnyder/cppwg | 265117455ed57eb250643a28ea6029c2bccf3ab3 | [
"MIT"
] | 2 | 2017-12-29T19:17:44.000Z | 2020-03-27T14:59:27.000Z | cppwg/writers/header_collection_writer.py | josephsnyder/cppwg | 265117455ed57eb250643a28ea6029c2bccf3ab3 | [
"MIT"
] | 6 | 2019-03-21T11:55:52.000Z | 2021-07-13T20:49:50.000Z | #!/usr/bin/env python
"""
Generate the file classes_to_be_wrapped.hpp, which contains includes,
instantiation and naming typedefs for all classes that are to be
automatically wrapped.
"""
import os
import ntpath
| 36.068182 | 91 | 0.603235 |
3ffbd01add7dfacc772a2751a5811b5cb60b641e | 6,590 | py | Python | 22-crab-combat/solution22_2.py | johntelforduk/advent-of-code-2020 | 138df3a7b12e418f371f641fed02e57a98a7392e | [
"MIT"
] | 1 | 2020-12-03T13:20:49.000Z | 2020-12-03T13:20:49.000Z | 22-crab-combat/solution22_2.py | johntelforduk/advent-of-code-2020 | 138df3a7b12e418f371f641fed02e57a98a7392e | [
"MIT"
] | null | null | null | 22-crab-combat/solution22_2.py | johntelforduk/advent-of-code-2020 | 138df3a7b12e418f371f641fed02e57a98a7392e | [
"MIT"
] | null | null | null | # Solution to part 2 of day 22 of AOC 2020, Crab Combat.
# https://adventofcode.com/2020/day/22
import sys
VERBOSE = ('-v' in sys.argv)
def text_to_cards(text: str) -> list:
"""For parm text file, return a list of integers which are the cards in that text file."""
cards = []
# Each card starts on a new line. Ignore the first line, as it is the player number.
for card in text.split('\n')[1:]:
cards.append(int(card))
return cards
def main():
filename = sys.argv[1]
f = open(filename)
whole_text = f.read()
f.close()
p1_text, p2_text = whole_text.split('\n\n') # There is a blank line between the 2 players.
p1_cards_list = text_to_cards(p1_text)
p2_cards_list = text_to_cards(p2_text)
game = Combat(game=1, p1_cards=p1_cards_list, p2_cards=p2_cards_list)
print('== Post-game results ==')
game.p1_deck.display()
game.p2_deck.display()
print('Part 2:', game.calculate_winning_score())
if __name__ == "__main__":
main()
| 35.621622 | 119 | 0.582398 |
3ffc66c1a55abdcb165f5612bc7ea3c265086406 | 246 | py | Python | consts.py | mauroreisvieira/sublime-tailwindcss-intellisense | 140edc90c59c045fc8a9d7f6bcff0b727660ee64 | [
"MIT"
] | null | null | null | consts.py | mauroreisvieira/sublime-tailwindcss-intellisense | 140edc90c59c045fc8a9d7f6bcff0b727660ee64 | [
"MIT"
] | null | null | null | consts.py | mauroreisvieira/sublime-tailwindcss-intellisense | 140edc90c59c045fc8a9d7f6bcff0b727660ee64 | [
"MIT"
] | null | null | null | import os
# @see https://marketplace.visualstudio.com/items?itemName=bradlc.vscode-tailwindcss
EXTENSION_UID = "bradlc.vscode-tailwindcss"
EXTENSION_VERSION = "0.5.2"
SERVER_BINARY_PATH = os.path.join("extension", "dist", "server", "index.js")
| 30.75 | 84 | 0.764228 |
3ffe70804c74668d12ccd199fbcd96d4fb1cfb92 | 2,426 | py | Python | backend/app/alembic/versions/491383f70589_add_separate_reported_and_deleted_tables.py | Pinafore/Karl-flashcards-web-app | 2f4d9925c545f83eb3289dfef85d9b0bf9bfeb8c | [
"Apache-2.0"
] | 7 | 2020-09-13T06:06:32.000Z | 2021-11-15T11:37:16.000Z | backend/app/alembic/versions/491383f70589_add_separate_reported_and_deleted_tables.py | Pinafore/Karl-flashcards-web-app | 2f4d9925c545f83eb3289dfef85d9b0bf9bfeb8c | [
"Apache-2.0"
] | 16 | 2020-08-28T20:38:27.000Z | 2021-03-18T04:03:00.000Z | backend/app/alembic/versions/491383f70589_add_separate_reported_and_deleted_tables.py | Pinafore/Karl-flashcards-web-app | 2f4d9925c545f83eb3289dfef85d9b0bf9bfeb8c | [
"Apache-2.0"
] | null | null | null | """add separate reported and deleted tables
Revision ID: 491383f70589
Revises: 9afc4e3a9bf3
Create Date: 2020-06-26 05:23:30.267933
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '491383f70589'
down_revision = '9afc4e3a9bf3'
branch_labels = None
depends_on = None
| 41.827586 | 162 | 0.694559 |
3fffb39e0047b218b9939ad4a6b88417807e3ce7 | 17,935 | py | Python | test/test_viscous.py | nchristensen/mirgecom | f27285d1fc7e077e0b1ac6872712d88517588e33 | [
"MIT"
] | null | null | null | test/test_viscous.py | nchristensen/mirgecom | f27285d1fc7e077e0b1ac6872712d88517588e33 | [
"MIT"
] | null | null | null | test/test_viscous.py | nchristensen/mirgecom | f27285d1fc7e077e0b1ac6872712d88517588e33 | [
"MIT"
] | null | null | null | """Test the viscous fluid helper functions."""
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.random
import numpy.linalg as la # noqa
import pyopencl.clmath # noqa
import logging
import pytest # noqa
from pytools.obj_array import make_obj_array
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL
import grudge.op as op
from grudge.eager import (
EagerDGDiscretization,
interior_trace_pair
)
from meshmode.array_context import ( # noqa
pytest_generate_tests_for_pyopencl_array_context
as pytest_generate_tests)
from mirgecom.fluid import make_conserved
from mirgecom.transport import (
SimpleTransport,
PowerLawTransport
)
from mirgecom.eos import IdealSingleGas
logger = logging.getLogger(__name__)
# Box grid generator widget lifted from @majosm and slightly bent
def _get_box_mesh(dim, a, b, n, t=None):
dim_names = ["x", "y", "z"]
bttf = {}
for i in range(dim):
bttf["-"+str(i+1)] = ["-"+dim_names[i]]
bttf["+"+str(i+1)] = ["+"+dim_names[i]]
from meshmode.mesh.generation import generate_regular_rect_mesh as gen
return gen(a=a, b=b, npoints_per_axis=n, boundary_tag_to_face=bttf, mesh_type=t)
def test_species_diffusive_flux(actx_factory):
"""Test species diffusive flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
def test_diffusive_heat_flux(actx_factory):
"""Test diffusive heat flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
| 34.292543 | 84 | 0.652188 |
b200470663bb7eee02e9c82ffb877d8af91ad93e | 216 | py | Python | aiobotocore_refreshable_credentials/__init__.py | aweber/aiobotocore-refreshable-credentials | 3310d3fa29ac657f7cd5f64829da5f9b12c7a86d | [
"BSD-3-Clause"
] | null | null | null | aiobotocore_refreshable_credentials/__init__.py | aweber/aiobotocore-refreshable-credentials | 3310d3fa29ac657f7cd5f64829da5f9b12c7a86d | [
"BSD-3-Clause"
] | 2 | 2021-05-21T14:18:52.000Z | 2022-03-15T12:34:45.000Z | aiobotocore_refreshable_credentials/__init__.py | aweber/aiobotocore-refreshable-credentials | 3310d3fa29ac657f7cd5f64829da5f9b12c7a86d | [
"BSD-3-Clause"
] | 1 | 2021-06-18T18:37:15.000Z | 2021-06-18T18:37:15.000Z | """
aiobotocore-refreshable-credentials
===================================
"""
from aiobotocore_refreshable_credentials.session import get_session
version = '1.0.3'
__all__ = [
'get_session',
'version'
]
| 15.428571 | 67 | 0.606481 |
b2021676535704ccb7bbd4b21a330bdfa74bae2e | 702 | py | Python | g13gui/bitwidgets/label_tests.py | jtgans/g13gui | aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc | [
"MIT"
] | 3 | 2021-10-16T01:28:24.000Z | 2021-12-07T21:49:54.000Z | g13gui/bitwidgets/label_tests.py | jtgans/g13gui | aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc | [
"MIT"
] | 12 | 2021-05-09T16:57:18.000Z | 2021-06-16T19:20:57.000Z | g13gui/bitwidgets/label_tests.py | jtgans/g13gui | aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc | [
"MIT"
] | null | null | null | import unittest
import time
from g13gui.bitwidgets.display import Display
from g13gui.bitwidgets.x11displaydevice import X11DisplayDevice
from g13gui.bitwidgets.label import Label
if __name__ == '__main__':
unittest.main()
| 22.645161 | 63 | 0.64245 |
b206b349123d73fd230c868195f898309f10c8ec | 7,772 | py | Python | padre/git_utils.py | krislindgren/padre | 56e3342a953fdc472adc11ce301acabf6c595760 | [
"MIT"
] | null | null | null | padre/git_utils.py | krislindgren/padre | 56e3342a953fdc472adc11ce301acabf6c595760 | [
"MIT"
] | null | null | null | padre/git_utils.py | krislindgren/padre | 56e3342a953fdc472adc11ce301acabf6c595760 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# MIT License
#
# Modified from https://github.com/wzpan/git-repo-sync/
import os
import subprocess
import sys
| 34.237885 | 76 | 0.556356 |
b2080b7d4050889b2b37d9d988f89eaa6cb3c1e8 | 11,358 | py | Python | domain_clf_analysis.py | xiaoleihuang/Domain_Adaptation_ACL2018 | c077ceb7f67f1836043df88ac16ffed53cd3a9cb | [
"Apache-2.0"
] | 3 | 2018-06-12T01:43:18.000Z | 2019-10-01T16:21:43.000Z | domain_clf_analysis.py | xiaoleihuang/Domain_Adaptation_ACL2018 | c077ceb7f67f1836043df88ac16ffed53cd3a9cb | [
"Apache-2.0"
] | null | null | null | domain_clf_analysis.py | xiaoleihuang/Domain_Adaptation_ACL2018 | c077ceb7f67f1836043df88ac16ffed53cd3a9cb | [
"Apache-2.0"
] | null | null | null | """
Test on one domain, and train on the other domains,
Output f1 scores and visualize them by heat map
"""
from utils import data_helper, model_helper
from sklearn.metrics import f1_score
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import cross_val_score
import numpy as np
np.random.seed(0)
from pandas import DataFrame
import seaborn as sns
import matplotlib.pyplot as plt
import argparse
def cross_test_domain_clf(dataset, domain2label, data_name=None, balance=False, binary=False, ):
"""
Train on one domain, test on others
:return:
"""
uniq_domains = sorted(list(set([item[-2] for item in dataset])))
results = DataFrame([[0.0]*len(uniq_domains)]*len(uniq_domains),
index=[domain2label(item, data_name) for item in uniq_domains],
columns=[domain2label(item, data_name) for item in uniq_domains])
print(uniq_domains)
# loop through each domain
for domain in uniq_domains:
# build train_data
train_x = []
train_y = []
for item in dataset:
if domain == item[-2]:
train_x.append(item[0])
train_y.append(item[-1])
# build vectorizer and encoder
label_encoder = LabelEncoder()
if len(dataset) > 15469: # this number is length of "./yelp/yelp_Hotels_year_sample.tsv" - 1000
if not binary:
vectorizer = TfidfVectorizer(min_df=2, tokenizer=lambda x: x.split())
else:
vectorizer = TfidfVectorizer(min_df=2, tokenizer=lambda x: x.split(),
binary=True, use_idf=False, smooth_idf=False)
else:
if not binary:
vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=2)
else:
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 3),
binary=True, use_idf=False, smooth_idf=False)
# encode the data
train_y = label_encoder.fit_transform(train_y)
train_x = vectorizer.fit_transform(train_x)
# balance
if balance:
random_sampler = RandomOverSampler(random_state=0)
train_x, train_y = random_sampler.fit_sample(train_x, train_y)
# build classifier
clf = model_helper.build_lr_clf()
clf.fit(train_x, train_y)
# instead of skipping self-domain, we take the 5-fold cross-validation for this domain
results[domain2label(domain, data_name)][domain2label(domain, data_name)] = np.mean(
cross_val_score(model_helper.build_lr_clf(),
train_x, train_y, cv=5,
scoring='f1_weighted')
)
train_x = None
train_y = None
# test and evaluation
for test_domain in [item for item in uniq_domains if item != domain]:
if int(test_domain) == int(domain):
continue
test_x = []
test_y = []
for item in dataset:
if test_domain == item[-2]:
test_x.append(item[0])
test_y.append(item[-1])
# encode the data
test_y = label_encoder.transform(test_y)
test_x = vectorizer.transform(test_x)
tmp_result = str(f1_score(y_true=test_y, y_pred=clf.predict(test_x), average='weighted'))
# results[domain][test_domain] = str(f1_score(y_true=test_y, y_pred=clf.predict(test_x), average='weighted'))
# print(str(domain)+','+str(test_domain)+','+str(f1_score(y_true=test_y, y_pred=clf.predict(test_x), average='weighted')))
results[domain2label(test_domain, data_name)][domain2label(domain, data_name)] = tmp_result
test_x = None
test_y = None
# pickle.dump(results, open('cross_test_domain_results_'+str(balance)+'.pkl', 'wb'))
print(results)
return results
def viz_perform(df, title, outpath='./image/output.pdf'):
"""
Heatmap visualization
:param df: an instance of pandas DataFrame
:return:
"""
a4_dims = (11.7, 11.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.set(font_scale=1.2)
viz_plot = sns.heatmap(df, annot=True, cbar=False, ax=ax, annot_kws={"size": 24}, cmap="YlGnBu", vmin=df.values.min(), fmt='.3f')
plt.xticks(rotation=20, fontsize=25)
plt.xlabel('Train', fontsize=25)
plt.ylabel('Test', fontsize=25)
plt.title(title, fontsize=25)
viz_plot.get_figure().savefig(outpath, format='pdf')
plt.close()
if __name__ == '__main__':
"""
"""
# parser = argparse.ArgumentParser()
# parser.add_argument('--month', default=None,
# type=str, help='The path raw csv or tsv file')
# parser.add_argument('--year', default=None,
# type=str, help='The path raw csv or tsv file')
# parser.add_argument('--output', default='vaccine',
# type=str, help='data source name')
# args = parser.parse_args()
# for is_binary in [True, False]:
# # on month
# if args.month:
# dataset = data_helper.load_data(args.month)
# # test on balanced data
# print('Test on balanced data')
# test_balance = cross_test_domain_clf(dataset, balance=True, binary=is_binary)
#
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, balance=False, binary=is_binary)
#
# viz_perform(test_balance, './image/'+args.output+'/cross_clf_balance_month_'+str(is_binary)+'.png')
# viz_perform(test_unbalance, './image/'+args.output+'/cross_clf_unbalance_month_'+str(is_binary)+'.png')
#
# # on year
# if args.year:
# dataset = data_helper.load_data(args.year)
# # test on balanced data
# print('Test on balanced data')
# test_balance = cross_test_domain_clf(dataset, balance=True, binary=is_binary)
#
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, balance=False, binary=is_binary)
#
# viz_perform(test_balance, './image/'+args.output+'/cross_clf_balance_year_'+str(is_binary)+'.png')
# viz_perform(test_unbalance, './image/'+args.output+'/cross_clf_unbalance_year_'+str(is_binary)+'.png')
file_list = [
('./data/vaccine/vaccine_month_sample.tsv', './data/vaccine/vaccine_year_sample.tsv', 'vaccine', 'Twitter data - vaccine'),
('./data/amazon/amazon_month_sample.tsv', './data/amazon/amazon_year_sample.tsv', 'amazon', 'Reviews data - music'),# './data/amazon/amazon_review_month_sample.tsv'
('./data/yelp/yelp_Hotels_month_sample.tsv', './data/yelp/yelp_Hotels_year_sample.tsv', 'yelp_hotel', 'Reviews data - hotels'),
(None, './data/parties/parties_year_sample.tsv', 'parties', 'Politics - US political data'),
('./data/economy/economy_month_sample.tsv', './data/economy/economy_year_sample.tsv', 'economy', 'News data - economy'),
('./data/yelp/yelp_Restaurants_month_sample.tsv', './data/yelp/yelp_Restaurants_year_sample.tsv', 'yelp_rest', 'Reviews data - restaurants'), # './data/yelp/yelp_Restaurants_month_sample.tsv'
]
for pair in file_list:
print(pair)
for is_binary in [False]: # True, skip binary currently
# on month
month_file = pair[0]
year_file = pair[1]
output = pair[2]
if month_file:
dataset = data_helper.load_data(month_file)
# test on balanced data
print('Test on balanced data')
test_balance = cross_test_domain_clf(dataset, domain2month, data_name=None, balance=True, binary=is_binary)
test_balance.to_csv('./tmp/' + output+ '_month.tsv', sep='\t')
viz_perform(test_balance, pair[3],'./image/' + output + '/cross_clf_balance_month_' + str(is_binary) + '.pdf')
test_balance = None
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, domain2month, data_name=None, balance=False, binary=is_binary)
# viz_perform(test_unbalance, pair[3], './image/'+output+'/cross_clf_unbalance_month_'+str(is_binary)+'.pdf')
# test_unbalance = None
# dataset = None
# on year
if year_file:
dataset = data_helper.load_data(year_file)
# test on balanced data
print('Test on balanced data')
test_balance = cross_test_domain_clf(dataset, domain2year, data_name=output, balance=True, binary=is_binary)
test_balance.to_csv('./tmp/' + output+ '_year.tsv', sep='\t')
viz_perform(test_balance, pair[3], './image/' + output + '/cross_clf_balance_year_' + str(is_binary) + '.pdf')
test_balance = None
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, domain2year, data_name=output, balance=False, binary=is_binary)
# viz_perform(test_unbalance, pair[3], './image/'+output+'/cross_clf_unbalance_year_'+str(is_binary)+'.pdf')
test_unbalance = None
| 39.992958 | 199 | 0.588044 |
b209d756a7a9dd9b0a6aa608dc616fb5501e9ff4 | 219 | py | Python | 01 - Expressions, variables and assignments/exercises/perimeter-of-rectangle.py | PableraShow/python-exercises | e1648fd42f3009ec6fb1e2096852b6d399e91d5b | [
"MIT"
] | 8 | 2018-10-01T17:35:57.000Z | 2022-02-01T08:12:12.000Z | 01 - Expressions, variables and assignments/exercises/perimeter-of-rectangle.py | PableraShow/python-exercises | e1648fd42f3009ec6fb1e2096852b6d399e91d5b | [
"MIT"
] | null | null | null | 01 - Expressions, variables and assignments/exercises/perimeter-of-rectangle.py | PableraShow/python-exercises | e1648fd42f3009ec6fb1e2096852b6d399e91d5b | [
"MIT"
] | 6 | 2018-07-22T19:15:21.000Z | 2022-02-05T07:54:58.000Z | """
Prints the length in inches of the perimeter of a rectangle
with sides of length 4 and 7 inches.
"""
# Rectangle perimeter formula
length = 4
inches = 7
perimeter = 2 * length + 2 * inches
# Output
print perimeter | 18.25 | 59 | 0.726027 |
b20aba712b1ab01e3fb65465b63bc20687698132 | 123 | py | Python | x_3_4.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | null | null | null | x_3_4.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | 1 | 2021-11-13T08:03:04.000Z | 2021-11-13T08:03:04.000Z | x_3_4.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | null | null | null | # x_3_4
#
# mathfloor
from statistics import mean
data = [7, 4, 3, 9]
print(mean(data))
| 12.3 | 43 | 0.739837 |
b20c24ef9d6d64b2c1eb48b70a055569f3cf0291 | 690 | py | Python | 2018/21/reverse_engineered.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2018/21/reverse_engineered.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2018/21/reverse_engineered.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
print(simulate(7041048, True))
print(simulate(7041048, False))
| 22.258065 | 65 | 0.401449 |
b20ca1a11af5328342bece8c8b28ae8ca5c425a2 | 7,025 | py | Python | pybilt/lipid_grid/lipid_grid_curv.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 11 | 2019-07-29T16:21:53.000Z | 2022-02-02T11:44:57.000Z | pybilt/lipid_grid/lipid_grid_curv.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 11 | 2019-05-15T09:30:05.000Z | 2021-07-19T16:49:59.000Z | pybilt/lipid_grid/lipid_grid_curv.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 9 | 2019-08-12T11:14:45.000Z | 2020-12-22T18:22:55.000Z | '''
Classes and functions to implement gridding and curvature correlation analysis for lipid bilayers.
The gridding and anlaysis procedures are based on
the decription given in section "Correlation between bilayer surface curvature and the
clustering of lipid molecules" of Koldso H, Shorthouse D, He lie J, Sansom MSP (2014)
Lipid Clustering Correlates with Membrane Curvature as Revealed by Molecular Simulations of
Complex Lipid Bilayers. PLoS Comput Biol 10(10): e1003911. doi:10.1371/journal.pcbi.1003911
However, this implementation currently uses the z position (or normal position) of the lipids' centers of mass, while
their implementaion uses "the z coordinate of the interface between the head groups of the
lipids (excluding the current species being calculated and tails in
that box."
'''
import numpy as np
from six.moves import range
| 39.914773 | 117 | 0.540641 |
b20cc44e10c5f1d7b1d539469ba4792e3e3334fc | 492 | py | Python | security.py | Raghav714/intruder-alarm | c27825e5b483b6dc18704e0da76500b348174432 | [
"MIT"
] | 4 | 2018-10-02T06:37:50.000Z | 2021-10-31T16:41:59.000Z | security.py | Raghav714/intruder-alarm | c27825e5b483b6dc18704e0da76500b348174432 | [
"MIT"
] | null | null | null | security.py | Raghav714/intruder-alarm | c27825e5b483b6dc18704e0da76500b348174432 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import pygame
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
pygame.mixer.init()
pygame.mixer.music.load("1.mp3")
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
flag = np.std(fgmask)
if flag>50:
print("some one came")
pygame.mixer.music.play()
cv2.imshow('fgmask',frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
pygame.mixer.music.stop()
break
cap.release()
cv2.destroyAllWindows()
| 20.5 | 43 | 0.707317 |
b20cdd9f8c550b03afcaa9236a4a608b7379d8bd | 453 | py | Python | pygeos/measurements.py | jorisvandenbossche/pygeos | 0a25af4ae1c96d11752318d2755f4f3342611b17 | [
"BSD-3-Clause"
] | null | null | null | pygeos/measurements.py | jorisvandenbossche/pygeos | 0a25af4ae1c96d11752318d2755f4f3342611b17 | [
"BSD-3-Clause"
] | null | null | null | pygeos/measurements.py | jorisvandenbossche/pygeos | 0a25af4ae1c96d11752318d2755f4f3342611b17 | [
"BSD-3-Clause"
] | null | null | null | from . import ufuncs
__all__ = ["area", "distance", "length", "hausdorff_distance"]
| 19.695652 | 63 | 0.697572 |
b20ea0b58e52db3ee0246fdb58558d2834cf2129 | 9,539 | py | Python | naff/models/naff/extension.py | Discord-Snake-Pit/dis_snek | 45748467838b31d871a7166dbeb3aaa238ad94e3 | [
"MIT"
] | 64 | 2021-10-12T15:31:36.000Z | 2022-03-29T18:25:47.000Z | naff/models/naff/extension.py | Discord-Snake-Pit/dis_snek | 45748467838b31d871a7166dbeb3aaa238ad94e3 | [
"MIT"
] | 166 | 2021-10-10T16:27:52.000Z | 2022-03-30T09:04:54.000Z | naff/models/naff/extension.py | Discord-Snake-Pit/dis_snek | 45748467838b31d871a7166dbeb3aaa238ad94e3 | [
"MIT"
] | 34 | 2021-10-10T13:26:41.000Z | 2022-03-23T13:59:35.000Z | import asyncio
import inspect
import logging
from typing import Awaitable, List, TYPE_CHECKING, Callable, Coroutine, Optional
import naff.models.naff as naff
from naff.client.const import logger_name, MISSING
from naff.client.utils.misc_utils import wrap_partial
from naff.models.naff.tasks import Task
if TYPE_CHECKING:
from naff.client import Client
from naff.models.naff import AutoDefer, BaseCommand, Listener
from naff.models.naff import Context
log = logging.getLogger(logger_name)
__all__ = ("Extension",)
def add_ext_auto_defer(self, ephemeral: bool = False, time_until_defer: float = 0.0) -> None:
"""
Add a auto defer for all commands in this extension.
Args:
ephemeral: Should the command be deferred as ephemeral
time_until_defer: How long to wait before deferring automatically
"""
self.auto_defer = naff.AutoDefer(enabled=True, ephemeral=ephemeral, time_until_defer=time_until_defer)
def add_ext_check(self, coroutine: Callable[["Context"], Awaitable[bool]]) -> None:
"""
Add a coroutine as a check for all commands in this extension to run. This coroutine must take **only** the parameter `context`.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_ext_check(self.example)
@staticmethod
async def example(context: Context):
if context.author.id == 123456789:
return True
return False
```
Args:
coroutine: The coroutine to use as a check
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Check must be a coroutine")
if not self.extension_checks:
self.extension_checks = []
self.extension_checks.append(coroutine)
def add_extension_prerun(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to be run **before** all commands in this Extension.
Note:
Pre-runs will **only** be run if the commands checks pass
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_extension_prerun(self.example)
async def example(self, context: Context):
await ctx.send("I ran first")
```
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if not self.extension_prerun:
self.extension_prerun = []
self.extension_prerun.append(coroutine)
def add_extension_postrun(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to be run **after** all commands in this Extension.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_extension_postrun(self.example)
async def example(self, context: Context):
await ctx.send("I ran first")
```
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if not self.extension_postrun:
self.extension_postrun = []
self.extension_postrun.append(coroutine)
def set_extension_error(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to handle any exceptions raised in this extension.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.set_extension_error(self.example)
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if self.extension_error:
log.warning("Extension error callback has been overridden!")
self.extension_error = coroutine
| 35.726592 | 200 | 0.605095 |
b20eabd7816b307c80c7a57deaf784b914a0c831 | 2,619 | py | Python | model/State.py | BrandonTheBuilder/thermawesome | b2f2cb95e1181f05a112193be11baa18e10d39b1 | [
"MIT"
] | null | null | null | model/State.py | BrandonTheBuilder/thermawesome | b2f2cb95e1181f05a112193be11baa18e10d39b1 | [
"MIT"
] | null | null | null | model/State.py | BrandonTheBuilder/thermawesome | b2f2cb95e1181f05a112193be11baa18e10d39b1 | [
"MIT"
] | null | null | null | from CoolProp import CoolProp as CP
| 33.576923 | 82 | 0.544101 |
b20ed9c65d8b7c88f2047aafe3f3e3d7c3016629 | 2,401 | py | Python | dashboard_api/widget_def/migrations/0059_auto_20160701_0929.py | data61/Openboard | aaf7ef49e05c0771094efc6be811c6ae88055252 | [
"Apache-2.0"
] | 2 | 2017-08-29T23:05:51.000Z | 2019-04-02T21:11:35.000Z | dashboard_api/widget_def/migrations/0059_auto_20160701_0929.py | data61/Openboard | aaf7ef49e05c0771094efc6be811c6ae88055252 | [
"Apache-2.0"
] | 1 | 2019-04-02T21:11:26.000Z | 2019-04-03T15:12:57.000Z | dashboard_api/widget_def/migrations/0059_auto_20160701_0929.py | data61/Openboard | aaf7ef49e05c0771094efc6be811c6ae88055252 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-30 23:29
from __future__ import unicode_literals
from django.db import migrations, models
| 28.247059 | 110 | 0.549354 |
b74658fcd0b086ae391a31278701946a2e7748a0 | 7,649 | py | Python | ngraph/python/tests/test_ngraph/test_ops_reshape.py | mnosov/openvino | c52c4916be0369f092f7da6c162b6c61c37c08d7 | [
"Apache-2.0"
] | null | null | null | ngraph/python/tests/test_ngraph/test_ops_reshape.py | mnosov/openvino | c52c4916be0369f092f7da6c162b6c61c37c08d7 | [
"Apache-2.0"
] | 21 | 2021-02-16T13:02:05.000Z | 2022-02-21T13:05:06.000Z | ngraph/python/tests/test_ngraph/test_ops_reshape.py | mmakridi/openvino | 769bb7709597c14debdaa356dd60c5a78bdfa97e | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_40957
def test_broadcast_numpy():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter)
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_broadcast_bidirectional():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL")
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_gather():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_axes = np.array([1], np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ng.gather, input_indices, input_axes)
assert np.allclose(result, expected)
def test_transpose():
input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape(
(3, 3, 224, 224)
)
input_order = np.array([0, 2, 3, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.transpose, input_order)
expected = np.transpose(input_tensor, input_order)
assert np.allclose(result, expected)
def test_reshape_v1():
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
shape = np.array([0, -1, 4], dtype=np.int32)
special_zero = True
expected_shape = np.array([2, 150, 4])
expected = np.reshape(A, expected_shape)
result = run_op_node([A], ng.reshape, shape, special_zero)
assert np.allclose(result, expected)
def test_shape_of():
input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
result = run_op_node([input_tensor], ng.shape_of)
assert np.allclose(result, [3, 3])
| 32.969828 | 109 | 0.651458 |
b746b8cda074f334edc7ccba71a84d7a2cd55be1 | 1,980 | py | Python | malwarescan/wsclient.py | lbahtarliev/MalwareScan | 495e2fd3ceb3498c651ddd360a4cc2eb9571a10b | [
"Unlicense"
] | 3 | 2018-12-06T03:09:16.000Z | 2021-02-25T01:13:05.000Z | malwarescan/wsclient.py | lbahtarliev/MalwareScan | 495e2fd3ceb3498c651ddd360a4cc2eb9571a10b | [
"Unlicense"
] | 9 | 2018-12-10T18:44:14.000Z | 2019-02-06T21:13:31.000Z | malwarescan/wsclient.py | lbahtarliev/MalwareScan | 495e2fd3ceb3498c651ddd360a4cc2eb9571a10b | [
"Unlicense"
] | 4 | 2019-06-04T13:46:24.000Z | 2021-02-25T02:23:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import ssl
import click
from json.decoder import JSONDecodeError
from websocket import WebSocketException
from websocket import WebSocketConnectionClosedException
from websocket import create_connection
from datetime import datetime as dtime
from .app import create_app
flask_app = create_app()
| 33 | 92 | 0.59899 |
b748129a257264ee78fbb33c2f52b2552698dcea | 2,418 | py | Python | CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowDigisProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTrackClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowRechitClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTracksProducer_cfi import *
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
from RecoTracker.TrackProducer.TrackRefitters_cff import *
bigNtupleTrackCollectionTag = cms.InputTag("bigNtupleTracksRefit")
bigNtupleClusterCollectionTag = cms.InputTag("siStripClusters")
bigNtupleTracksRefit = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(src = "generalTracks")
bigNtupleEventRun = shallowEventRun.clone()
bigNtupleDigis = shallowDigis.clone()
bigNtupleClusters = shallowClusters.clone(Clusters=bigNtupleClusterCollectionTag)
bigNtupleRecHits = shallowRechitClusters.clone(Clusters=bigNtupleClusterCollectionTag)
bigNtupleTrackClusters = shallowTrackClusters.clone(Tracks = bigNtupleTrackCollectionTag,Clusters=bigNtupleClusterCollectionTag)
bigNtupleTracks = shallowTracks.clone(Tracks = bigNtupleTrackCollectionTag)
bigShallowTree = cms.EDAnalyzer("ShallowTree",
outputCommands = cms.untracked.vstring(
'drop *',
'keep *_bigNtupleEventRun_*_*',
'keep *_bigNtupleDigis_*_*',
'keep *_bigNtupleClusters_*_*' ,
'keep *_bigNtupleRechits_*_*',
'keep *_bigNtupleTracks_*_*',
'keep *_bigNtupleTrackClusters_*_*'
)
)
from Configuration.StandardSequences.RawToDigi_Data_cff import *
from Configuration.StandardSequences.Reconstruction_cff import *
theBigNtuple = cms.Sequence( ( siPixelRecHits+siStripMatchedRecHits +
offlineBeamSpot +
bigNtupleTracksRefit)
* (bigNtupleEventRun +
bigNtupleClusters +
bigNtupleRecHits +
bigNtupleTracks +
bigNtupleTrackClusters
)
)
theBigNtupleDigi = cms.Sequence( siStripDigis + bigNtupleDigis )
| 43.178571 | 130 | 0.700165 |
b748865dafd57226e01bad7504ce06ab355e363a | 75 | py | Python | anti_freeze/__main__.py | Donluigimx/anti-freeze | 03699e5c4f82ccd06f37b4e8b51da22cc5841b57 | [
"MIT"
] | null | null | null | anti_freeze/__main__.py | Donluigimx/anti-freeze | 03699e5c4f82ccd06f37b4e8b51da22cc5841b57 | [
"MIT"
] | null | null | null | anti_freeze/__main__.py | Donluigimx/anti-freeze | 03699e5c4f82ccd06f37b4e8b51da22cc5841b57 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
from .system import MyApp
MyApp().run()
| 18.75 | 29 | 0.626667 |
b74908cfbdafb8fdf6ed4e638d485501633fe75d | 18,656 | py | Python | classic_NN/nn.py | disooqi/learning-machine-learning | 5fcef0a18f0c2e9aeab4abf45b968eb6ca5ba463 | [
"MIT"
] | 1 | 2020-09-30T18:09:51.000Z | 2020-09-30T18:09:51.000Z | classic_NN/nn.py | disooqi/learning-machine-learning | 5fcef0a18f0c2e9aeab4abf45b968eb6ca5ba463 | [
"MIT"
] | null | null | null | classic_NN/nn.py | disooqi/learning-machine-learning | 5fcef0a18f0c2e9aeab4abf45b968eb6ca5ba463 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.special import expit, logit
import time
import logging
np.random.seed(4) # 4
logger = logging.getLogger(__name__)
fr = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
sh = logging.StreamHandler()
# sh.setFormatter(fr)
logger.addHandler(sh)
logger.setLevel(logging.DEBUG)
logger2 = logging.getLogger('other')
file_handler = logging.FileHandler('run.log')
file_handler.setFormatter(fr)
file_handler.setLevel(logging.INFO)
logger2.addHandler(file_handler)
logger2.setLevel(logging.INFO)
class NN:
def __init__(self, n_features, n_classes):
self.n = n_features
self.n_classes = n_classes
self.layers = list()
class Optimization:
def __init__(self, loss='cross_entropy', method='gradient-descent'):
self.method = method
self.VsnSs = list()
if loss == 'cross_entropy':
self.loss = self.cross_entropy_loss
self.activation_prime = self.cross_entropy_loss_prime
if method == 'gradient-descent':
self.optimizer = self.gradient_descent
elif method == 'gd-with-momentum':
self.optimizer = self.gradient_descent_with_momentum
elif method == 'rmsprop':
self.optimizer = self.RMSprop
elif method == 'adam':
self.optimizer = self.adam
def discrete_staircase_learning_rate_decay(self):
pass
def cost(self, network, X, y, lmbda=0):
A = X
for layer in network.layers:
Z = np.dot(layer.W, A) + layer.b
A = layer.activation(Z)
else:
loss_matrix = self.loss(y, A)
sum_over_all_examples = np.sum(loss_matrix, axis=1) / loss_matrix.shape[1]
return (np.sum(sum_over_all_examples) / sum_over_all_examples.size) + self.regularization_term(network,
X.shape[1],
lmbda=lmbda)
def _update_weights(self, X, y, network, alpha, lmbda, t, beta1, beta2, decay_rate, epoch_num):
A = X
for layer in network.layers:
layer.A_l_1 = A # this is A-1 from last loop step
Z = np.dot(layer.W, A) + layer.b # (called "logits" in ML folklore)
A = layer.activation(Z)
# NB! we don't not apply dropout to the input layer or output layer.
D = np.random.rand(*A.shape) <= layer.keep_prob # dropout
A = np.multiply(A, D) / layer.keep_prob # inverted dropout
layer.D = D
layer.A = A
with np.errstate(invalid='raise'):
try:
dLdA = self.activation_prime(y, A)
except FloatingPointError:
raise
# To avoid the confusion: reversed() doesn't modify the list. reversed() doesn't make a copy of the list
# (otherwise it would require O(N) additional memory). If you need to modify the list use alist.reverse(); if
# you need a copy of the list in reversed order use alist[::-1]
for l, layer, VsnSs in zip(range(len(network.layers), 0, -1), reversed(network.layers), reversed(self.VsnSs)):
dLdA, dJdW, dJdb = network._calculate_single_layer_gradients(dLdA, layer, compute_dLdA_1=(l > 1))
layer.W, layer.b = self.optimizer(dJdW, dJdb, layer.W, layer.b, X.shape[1], alpha=alpha, lmbda=lmbda,
VS=VsnSs, beta1=beta1, beta2=beta2, t=t, decay_rate=decay_rate, epoch=epoch_num)
if __name__ == '__main__':
pass
| 39.609342 | 187 | 0.582547 |
b749f4714d0c5e5ad919fdd5ae7b07a02ccd8628 | 71 | py | Python | sensorAtlas/__init__.py | iosefa/pyMatau | 7b3f768db578771ba55a912bc4a9b8be58619070 | [
"MIT"
] | 2 | 2021-05-28T10:26:17.000Z | 2021-07-03T03:11:22.000Z | sensorAtlas/__init__.py | iosefa/pyMatau | 7b3f768db578771ba55a912bc4a9b8be58619070 | [
"MIT"
] | 2 | 2020-11-19T00:51:19.000Z | 2020-11-19T01:18:03.000Z | sensorAtlas/__init__.py | sensoratlas/sensoratlas | 7b3f768db578771ba55a912bc4a9b8be58619070 | [
"MIT"
] | 1 | 2019-10-10T14:03:42.000Z | 2019-10-10T14:03:42.000Z | # app config
default_app_config = 'sensorAtlas.apps.sensorAtlasConfig'
| 23.666667 | 57 | 0.830986 |
b74a328698a70e0b159b7d2e8ddf8ec1e64183ed | 376 | py | Python | api/urls.py | yasminfarza/country-state-address-api | 39c8d349095dcca4f2411f7097497d6a8f39c1e1 | [
"MIT"
] | 4 | 2021-06-06T14:16:33.000Z | 2021-06-09T03:42:11.000Z | api/urls.py | yasminfarza/country-state-address-api | 39c8d349095dcca4f2411f7097497d6a8f39c1e1 | [
"MIT"
] | null | null | null | api/urls.py | yasminfarza/country-state-address-api | 39c8d349095dcca4f2411f7097497d6a8f39c1e1 | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from api import views
router = DefaultRouter()
router.register('countries', views.CountryViewSet)
router.register('states/(?P<country>[^/.]+)', views.StateViewSet)
router.register('addresses', views.AddressViewSet)
app_name = 'api'
urlpatterns = [
path('', include(router.urls))
]
| 23.5 | 65 | 0.755319 |
b74a946738ed6712ecf1be81551ad79c1bd928a1 | 1,401 | py | Python | tests/test_protocol.py | gimbas/openinput | 9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693 | [
"MIT"
] | 38 | 2020-05-11T10:54:15.000Z | 2022-03-30T13:19:09.000Z | tests/test_protocol.py | gimbas/openinput | 9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693 | [
"MIT"
] | 45 | 2020-04-21T23:52:22.000Z | 2022-02-19T20:29:27.000Z | tests/test_protocol.py | gimbas/openinput | 9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693 | [
"MIT"
] | 5 | 2020-08-29T02:10:42.000Z | 2021-08-31T03:12:15.000Z | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Filipe Lans <lains@riseup.net>
| 32.581395 | 95 | 0.712348 |
b74acbae89490d10494c82735b42d81274199ebb | 4,314 | py | Python | zaqar-8.0.0/zaqar/storage/sqlalchemy/driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 97 | 2015-01-02T09:35:23.000Z | 2022-03-25T00:38:45.000Z | zaqar-8.0.0/zaqar/storage/sqlalchemy/driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | zaqar-8.0.0/zaqar/storage/sqlalchemy/driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 44 | 2015-01-28T03:01:28.000Z | 2021-05-13T18:55:19.000Z | # Copyright (c) 2013 Red Hat, Inc.
# Copyright 2014 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from oslo_db.sqlalchemy import engines
from osprofiler import profiler
from osprofiler import sqlalchemy as sa_tracer
import sqlalchemy as sa
from zaqar.common import decorators
from zaqar.conf import drivers_management_store_sqlalchemy
from zaqar import storage
from zaqar.storage.sqlalchemy import controllers
| 36.871795 | 79 | 0.653454 |
b74c264ab951da49d482e8b5b2b953e6b1285a3b | 792 | py | Python | tests/explainers/test_explainer.py | zduey/shap | 1bb8203f2d43f7552396a5f26167a258cbdc505c | [
"MIT"
] | 1 | 2021-03-03T11:00:32.000Z | 2021-03-03T11:00:32.000Z | tests/explainers/test_explainer.py | zduey/shap | 1bb8203f2d43f7552396a5f26167a258cbdc505c | [
"MIT"
] | null | null | null | tests/explainers/test_explainer.py | zduey/shap | 1bb8203f2d43f7552396a5f26167a258cbdc505c | [
"MIT"
] | null | null | null | """ Tests for Explainer class.
"""
import pytest
import shap
def test_wrapping_for_text_to_text_teacher_forcing_logits_model():
""" This tests using the Explainer class to auto choose a text to text setup.
"""
transformers = pytest.importorskip("transformers")
tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
model = transformers.AutoModelForCausalLM.from_pretrained("gpt2")
wrapped_model = shap.models.TeacherForcingLogits(f, similarity_model=model, similarity_tokenizer=tokenizer)
masker = shap.maskers.Text(tokenizer, mask_token="...")
explainer = shap.Explainer(wrapped_model, masker)
assert shap.utils.safe_isinstance(explainer.masker, "shap.maskers.FixedComposite")
| 31.68 | 111 | 0.753788 |
b74d8e9763f51be71d9332444a4477006848a8de | 1,301 | py | Python | main/urls.py | guinslym/django-Django-Code-Review-CodeEntrepreneurs | 2ad9bd3d352f7eba46e16a7bf24e06b809049d62 | [
"BSD-3-Clause"
] | 2 | 2017-07-31T13:52:40.000Z | 2017-09-19T15:07:09.000Z | main/urls.py | guinslym/Django-Code-Review-CodeEntrepreneurs | 2ad9bd3d352f7eba46e16a7bf24e06b809049d62 | [
"BSD-3-Clause"
] | null | null | null | main/urls.py | guinslym/Django-Code-Review-CodeEntrepreneurs | 2ad9bd3d352f7eba46e16a7bf24e06b809049d62 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.views.decorators.cache import cache_page
from django.conf.urls import url, include, handler404, handler500
admin.autodiscover()
from applications.elearning.views.general import robot_files
urlpatterns = [
#Robot and Humans.txt
url(
r'^(?P<filename>(robots.txt)|(humans.txt))$',
robot_files,
name='home-files'
),
#Main application
url(
r'^elearning/',
include(
'applications.elearning.urls',
namespace="elearning"
)
),
url(r'^', include('applications.elearning.urls')),
#admin
url(r'^admin/',
include('admin_honeypot.urls',
namespace='admin_honeypot')
),
url(r'^ilovemyself/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#handler404 = 'applications.elearning.views.views_general.handler404'
#handler500 = 'applications.elearning.views.views_general.handler500'
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
] | 27.104167 | 69 | 0.687164 |
b74eef5240ddb793f5798e460265805a101c2233 | 486 | py | Python | examples/simpleform/app/forms.py | ezeev/Flask-AppBuilder | d95f0ed934272629ee44ad3241646fa7ba09cdf8 | [
"BSD-3-Clause"
] | 71 | 2016-11-02T06:45:42.000Z | 2021-11-15T12:33:48.000Z | examples/simpleform/app/forms.py | ezeev/Flask-AppBuilder | d95f0ed934272629ee44ad3241646fa7ba09cdf8 | [
"BSD-3-Clause"
] | 3 | 2021-06-08T23:39:54.000Z | 2022-03-12T00:50:13.000Z | examples/simpleform/app/forms.py | ezeev/Flask-AppBuilder | d95f0ed934272629ee44ad3241646fa7ba09cdf8 | [
"BSD-3-Clause"
] | 23 | 2016-11-02T06:45:44.000Z | 2022-02-08T14:55:13.000Z | from wtforms import Form, StringField
from wtforms.validators import DataRequired
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_appbuilder.forms import DynamicForm
| 37.384615 | 76 | 0.751029 |
b74ef20d1f5294557f6193fe99adc3a01e0224ec | 403 | py | Python | comms.py | kajusz/ufscreenadsclient | 0151edec0117161c522a87643eef2f7be214210c | [
"MIT"
] | null | null | null | comms.py | kajusz/ufscreenadsclient | 0151edec0117161c522a87643eef2f7be214210c | [
"MIT"
] | null | null | null | comms.py | kajusz/ufscreenadsclient | 0151edec0117161c522a87643eef2f7be214210c | [
"MIT"
] | null | null | null | import zmq
context = zmq.Context()
socket = context.socket(zmq.PAIR)
address = "tcp://127.0.0.1:5000"
# print("No message received yet")
| 18.318182 | 45 | 0.66005 |
b74f2a4a74090ecd5db981f0f8052fb5379e118a | 410 | py | Python | runtime/python/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | runtime/python/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | runtime/python/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | from pathlib import Path
import numpy as np
path: Path
d1: np.DataSource
d1.abspath(path) # E: incompatible type
d1.abspath(b"...") # E: incompatible type
d1.exists(path) # E: incompatible type
d1.exists(b"...") # E: incompatible type
d1.open(path, "r") # E: incompatible type
d1.open(b"...", encoding="utf8") # E: incompatible type
d1.open(None, newline="/n") # E: incompatible type
| 25.625 | 57 | 0.656098 |
b7509767f47f312767bff162702df8fc8da90b4c | 2,821 | py | Python | applications/admin/controllers/gae.py | otaviocarvalho/forca-inf | 93b61f1d6988d4fb00a1736633d85b4f99a2f259 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T21:31:51.000Z | 2017-03-28T21:31:51.000Z | applications/admin/controllers/gae.py | murray3/augmi-a | 9f8cff457fa3966d67d3752ccd86876b08bb19b1 | [
"BSD-3-Clause"
] | null | null | null | applications/admin/controllers/gae.py | murray3/augmi-a | 9f8cff457fa3966d67d3752ccd86876b08bb19b1 | [
"BSD-3-Clause"
] | 1 | 2022-03-10T19:53:44.000Z | 2022-03-10T19:53:44.000Z | ### this works on linux only
try:
import fcntl
import subprocess
import signal
import os
except:
session.flash='sorry, only on Unix systems'
redirect(URL(request.application,'default','site'))
forever=10**8
| 36.636364 | 90 | 0.515066 |
b751a3b9de29d209e3c48a06bc158c7966ca65b5 | 1,110 | py | Python | basicts/archs/AGCRN_arch/AGCRNCell.py | zezhishao/GuanCang_BasicTS | bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c | [
"Apache-2.0"
] | 3 | 2022-02-22T12:50:08.000Z | 2022-03-13T03:38:46.000Z | basicts/archs/AGCRN_arch/AGCRNCell.py | zezhishao/GuanCang_BasicTS | bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c | [
"Apache-2.0"
] | null | null | null | basicts/archs/AGCRN_arch/AGCRNCell.py | zezhishao/GuanCang_BasicTS | bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from basicts.archs.AGCRN_arch.AGCN import AVWGCN
| 42.692308 | 81 | 0.648649 |
b7529f85e20a09a7d94f12902a504b82d6d2f333 | 1,763 | py | Python | lib/python2.7/site-packages/openopt/kernel/iterPrint.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lib/python2.7/site-packages/openopt/kernel/iterPrint.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lib/python2.7/site-packages/openopt/kernel/iterPrint.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from numpy import log10, isnan
textOutputDict = {\
'objFunVal': lambda p: p.iterObjFunTextFormat % (-p.Fk if p.invertObjFunc else p.Fk),
'log10(maxResidual)': lambda p: '%0.2f' % log10(p.rk+1e-100),
'log10(MaxResidual/ConTol)':lambda p: '%0.2f' % log10(max((p.rk/p.contol, 1e-100))),
'residual':lambda p: '%0.1e' % p._Residual,
'isFeasible': signOfFeasible,
'nSolutions': lambda p: '%d' % p._nObtainedSolutions,
'front length':lambda p: '%d' % p._frontLength,
'outcome': lambda p: ('%+d' % -p._nOutcome if p._nOutcome != 0 else ''),
'income': lambda p: ('%+d' % p._nIncome if p._nIncome != 0 else ''),
'f*_distance_estim': lambda p: ('%0.1g' % p.f_bound_distance if not isnan(p.f_bound_distance) else 'N/A'),
'f*_bound_estim': lambda p: (p.iterObjFunTextFormat % \
p.f_bound_estimation) if not isnan(p.f_bound_estimation) else 'N/A',
}
delimiter = ' '
| 37.510638 | 123 | 0.604651 |
b752f435d4eed268979210bf9a7cb3d5c6b5fde1 | 1,833 | py | Python | src/cli.py | blu3r4y/ccc-linz-mar2019 | a012a8e8d0cbf01c495385c62f2571bfb1b01962 | [
"MIT"
] | null | null | null | src/cli.py | blu3r4y/ccc-linz-mar2019 | a012a8e8d0cbf01c495385c62f2571bfb1b01962 | [
"MIT"
] | null | null | null | src/cli.py | blu3r4y/ccc-linz-mar2019 | a012a8e8d0cbf01c495385c62f2571bfb1b01962 | [
"MIT"
] | null | null | null | import os
from main import main
from pprint import pprint
if __name__ == "__main__":
level, quests = 4, 5
for i in range(1, quests + 1):
input_file = r'..\data\level{0}\level{0}_{1}.in'.format(level, i)
output_file = os.path.splitext(input_file)[0] + ".out"
with open(input_file, 'r') as fi:
data = parse(fi.readlines())
# pprint(data)
print("=== Output {}".format(i))
print("======================")
result = main(data)
pprint(result)
with open(output_file, 'w+') as fo:
fo.write(result)
| 24.118421 | 73 | 0.505728 |
b75565cf56b991351466f79c8a9946c1474351a6 | 5,749 | py | Python | card_utils/games/gin/ricky/utils.py | cdrappi/card_utils | dd12d3be22774cf35d7a6ce6b5f05ff6ee527929 | [
"MIT"
] | null | null | null | card_utils/games/gin/ricky/utils.py | cdrappi/card_utils | dd12d3be22774cf35d7a6ce6b5f05ff6ee527929 | [
"MIT"
] | null | null | null | card_utils/games/gin/ricky/utils.py | cdrappi/card_utils | dd12d3be22774cf35d7a6ce6b5f05ff6ee527929 | [
"MIT"
] | null | null | null | import itertools
from typing import List, Tuple
from card_utils import deck
from card_utils.deck.utils import (
rank_partition,
suit_partition,
ranks_to_sorted_values
)
from card_utils.games.gin.deal import new_game
def deal_new_game():
""" shuffle up and deal each player 7 cards,
put one card in the discard list,
and put remaining cards in deck
:return: (dict)
{
'p1_hand': [str],
'p2_hand': [str],
'discard': [str],
'deck': [str]
}
"""
return new_game(n_cards=7)
def sorted_hand_points(hand):
"""
:param hand: ([str]) list of cards
:return: ([str], int)
"""
runs_3, runs_4 = get_runs(hand)
sets_3, sets_4 = get_sets(hand)
melds_3 = runs_3 + sets_3
melds_4 = runs_4 + sets_4
sorted_hand = sort_cards_by_rank(hand)
hand_points_ = sum_points_by_ranks(hand)
if len(hand) == 8:
hand_points_ -= max(deck.rank_to_value[r] for r, _ in hand)
if len(melds_3 + melds_4) == 0:
return sorted_hand, hand_points_
for meld_3, meld_4 in itertools.product(melds_3, melds_4):
cards_in_meld = {*meld_3, *meld_4}
if len(cards_in_meld) == 7:
# if there is a non-intersecting 3-meld and 4-meld,
# then you have 0 points and win
remaining_cards = list(set(hand) - set(cards_in_meld))
return meld_4 + meld_3 + remaining_cards, 0
for meld in melds_3 + melds_4:
hand_without_meld = [card for card in hand if card not in meld]
# print(hand, hand_without_meld, meld)
meld_points = sum_points_by_ranks(hand_without_meld)
if len(hand) == 8:
meld_points -= max(deck.rank_to_value[r] for r, _ in hand_without_meld)
if meld_points < hand_points_:
sorted_hand = meld + sort_cards_by_rank(hand_without_meld)
hand_points_ = min(hand_points_, meld_points)
return sorted_hand, hand_points_
def rank_straights(ranks, straight_length, aces_high=True, aces_low=True, suit=''):
"""
:param ranks: ([str])
e.g. ['A', '2', '7', 'T', 'J', 'Q', 'K']
:param straight_length: (int) e.g. 5
:param aces_high: (bool)
:param aces_low: (bool)
:param suit: (str) optional: inject a suit in the final returned value
:return: ([[str]]) list of list of straights,
each with length straight_length
e.g. [['T','J','Q','K','A']]
or [['Th', 'Jh', 'Qh', 'Kh', 'Ah']]
"""
if len(ranks) < straight_length:
# don't waste our time if its impossible to make a straight
return []
if suit not in {'', *deck.suits}:
raise ValueError(
f'rank_straights: suit parameter must either be '
f'the empty string "" or one of {deck.suits}'
)
values = ranks_to_sorted_values(ranks, aces_high=aces_high, aces_low=aces_low)
values_in_a_row = 0
num_values = len(values)
last_value = values[0]
straights = []
for ii, value in enumerate(values[1:]):
if last_value + 1 == value:
values_in_a_row += 1
else:
values_in_a_row = 0
if values_in_a_row >= straight_length - 1:
straights.append([
f'{deck.value_to_rank[v]}{suit}'
for v in range(value - straight_length + 1, value + 1)
])
if num_values + values_in_a_row < straight_length + ii:
# exit early if there aren't enough cards left
# to complete a straight
return straights
last_value = value
return straights
def get_runs(hand):
""" cleaner but slower (!?) method to get runs
:param hand: ([str])
:return: ([[str]], [[str]])
"""
suit_to_ranks = suit_partition(hand)
runs_3, runs_4 = [], []
for suit, ranks in suit_to_ranks.items():
runs_3.extend(rank_straights(ranks, 3, True, True, suit=suit))
runs_4.extend(rank_straights(ranks, 4, True, True, suit=suit))
return runs_3, runs_4
def get_sets(hand):
"""
:param hand: ([str])
:return: ([[str]], [[str]])
"""
rank_to_suits = rank_partition(hand)
sets_3, sets_4 = [], []
for rank, suits in rank_to_suits.items():
if len(suits) == 4:
sets_4.append([f'{rank}{s}' for s in suits])
sets_3.extend([
[f'{rank}{s}' for s in suit_combo]
for suit_combo in itertools.combinations(suits, 3)
])
elif len(suits) == 3:
sets_3.append([f'{rank}{s}' for s in suits])
return sets_3, sets_4
def get_melds(hand) -> Tuple:
"""
:param hand: ([str])
:return: ([[str], [str]])
"""
runs_3, runs_4 = get_runs(hand)
sets_3, sets_4 = get_sets(hand)
return runs_3 + sets_3, runs_4 + sets_4
def are_two_distinct_3_melds(melds_3: List[List]):
"""
:param melds_3: ([[str]])
:return: (bool)
"""
if len(melds_3) < 2:
return False
for m1, m2 in itertools.combinations(melds_3, 2):
if len({*m1, *m2}) == 6:
return True
return False
def sum_points_by_ranks(hand):
"""
:param hand: ([str])
:return: (int)
"""
return sum(deck.rank_to_value[r] for r, _ in hand)
def sort_cards_by_rank(cards):
"""
:param cards: ([str])
:return: ([str])
"""
return sorted(cards, key=lambda c: deck.rank_to_value[c[0]])
def sort_hand(hand):
"""
:param hand: ([str])
:return: ([str])
"""
sorted_hand, _ = sorted_hand_points(hand)
return sorted_hand
def hand_points(hand):
"""
:param hand: ([str])
:return: (int)
"""
_, points = sorted_hand_points(hand)
return points
| 27.117925 | 83 | 0.584623 |
b7569ffd8bee128efc51f5bcf493cd00aa1b2d94 | 899 | py | Python | evennia/contrib/rpg/dice/tests.py | davidrideout/evennia | 879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b | [
"BSD-3-Clause"
] | null | null | null | evennia/contrib/rpg/dice/tests.py | davidrideout/evennia | 879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b | [
"BSD-3-Clause"
] | null | null | null | evennia/contrib/rpg/dice/tests.py | davidrideout/evennia | 879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b | [
"BSD-3-Clause"
] | null | null | null | """
Testing of TestDice.
"""
from evennia.commands.default.tests import BaseEvenniaCommandTest
from mock import patch
from . import dice
| 37.458333 | 100 | 0.657397 |
b75755658b51065a953a59f32b666762d1790a50 | 9,247 | py | Python | ardour_tally_relay.py | Jajcus/ardour_tally_relay | aa69035a86bd282238f70ef17c427068249efd59 | [
"BSD-2-Clause"
] | null | null | null | ardour_tally_relay.py | Jajcus/ardour_tally_relay | aa69035a86bd282238f70ef17c427068249efd59 | [
"BSD-2-Clause"
] | null | null | null | ardour_tally_relay.py | Jajcus/ardour_tally_relay | aa69035a86bd282238f70ef17c427068249efd59 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
import argparse
import logging
import signal
import time
from logging import debug, error, info, warning
import pythonosc.osc_server
import pythonosc.udp_client
from pythonosc.dispatcher import Dispatcher
import hid
LOG_FORMAT = '%(message)s'
POLL_INTERVAL = 1
# Supported USB relay vendor-id and product-id
USB_VID = 0x16c0
USB_PID = 0x05df
ON_COMMAND = [0x00,0xff,0x01,0x00,0x00,0x00,0x00,0x00,0x00]
OFF_COMMAND = [0x00,0xfd,0x01,0x00,0x00,0x00,0x00,0x00,0x00]
if __name__ == "__main__":
osc_relay = OSCRelay()
osc_relay.main()
| 37.589431 | 106 | 0.54861 |
b757a3fb8db3b96f5cc0d1f1dd19f7847059351f | 1,408 | py | Python | python second semester working scripts/electrode_fcn.py | pm2111/Heart-Defibrillation-Project | 48ea3570c360aac7c3ff46354891998f4f364fab | [
"MIT"
] | null | null | null | python second semester working scripts/electrode_fcn.py | pm2111/Heart-Defibrillation-Project | 48ea3570c360aac7c3ff46354891998f4f364fab | [
"MIT"
] | null | null | null | python second semester working scripts/electrode_fcn.py | pm2111/Heart-Defibrillation-Project | 48ea3570c360aac7c3ff46354891998f4f364fab | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import os
path = "/Users/petermarinov/msci project/electrode data/test data/data/"
filenames = []
for f in os.listdir(path):
if not f.startswith('.'):
filenames.append(f)
i=-12
data = np.genfromtxt(path + filenames[i])
V = np.zeros((200,200))
for i in range (0,200):
for j in range (0,200):
if data[j+200*i][0] == 0:
V[i,j] = -90.0
if data[j+200*i][0] >1:
V[i,j] = 20.-(110./data[j+200*i][1])*(data[j+200*i][0]-1)
if data[j+200*i][0] ==1:
V[i,j] = 20.
i1 = 50
k= 3
total = []
x=0 #dummy
elec = np.zeros((200,200,200))
for j1 in range(0,200):
for i in range (1,200):
for j in range (1,200):
#elec[j1,i,j] = np.divide(float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1])),float(((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)))
#x +=((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)
x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
total.append(x)
x=0
plt.plot(total)
plt.xlabel("time [dimentionless]", fontsize = 18)
plt.ylabel("Voltage [mV]" , fontsize = 18)
plt.title("Electrode measurement for a healthy pacing heart")
plt.grid()
plt.show() | 31.288889 | 139 | 0.496449 |
b757a454248faaffeb488872e86cf07d801bf71c | 1,355 | py | Python | resources/lib/IMDbPY/bin/get_first_movie.py | bopopescu/ServerStatus | a883598248ad6f5273eb3be498e3b04a1fab6510 | [
"MIT"
] | 1 | 2017-11-02T06:06:39.000Z | 2017-11-02T06:06:39.000Z | resources/lib/IMDbPY/bin/get_first_movie.py | bopopescu/ServerStatus | a883598248ad6f5273eb3be498e3b04a1fab6510 | [
"MIT"
] | 1 | 2015-04-21T22:05:02.000Z | 2015-04-22T22:27:15.000Z | resources/lib/IMDbPY/bin/get_first_movie.py | GetSomeBlocks/Score_Soccer | a883598248ad6f5273eb3be498e3b04a1fab6510 | [
"MIT"
] | 4 | 2017-11-01T19:24:31.000Z | 2018-09-13T00:05:41.000Z | #!/usr/bin/env python
"""
get_first_movie.py
Usage: get_first_movie "movie title"
Search for the given title and print the best matching result.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You bad boy! You need to install the IMDbPY package!'
sys.exit(1)
if len(sys.argv) != 2:
print 'Only one argument is required:'
print ' %s "movie title"' % sys.argv[0]
sys.exit(2)
title = sys.argv[1]
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
title = unicode(title, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of Movie objects).
results = i.search_movie(title)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
if not results:
print 'No matches for "%s", sorry.' % title.encode(out_encoding, 'replace')
sys.exit(0)
# Print only the first result.
print ' Best match for "%s"' % title.encode(out_encoding, 'replace')
# This is a Movie instance.
movie = results[0]
# So far the Movie object only contains basic information like the
# title and the year; retrieve main information:
i.update(movie)
print movie.summary().encode(out_encoding, 'replace')
| 22.583333 | 79 | 0.702583 |
b7592e3ec4b70120c5e12cf12590570b289d59a3 | 14,079 | py | Python | ID3.py | idiomatic/id3.py | 574b2a6bd52897e07c220198d451e5971577fc02 | [
"MIT"
] | null | null | null | ID3.py | idiomatic/id3.py | 574b2a6bd52897e07c220198d451e5971577fc02 | [
"MIT"
] | null | null | null | ID3.py | idiomatic/id3.py | 574b2a6bd52897e07c220198d451e5971577fc02 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python -*-
import re
import struct
import types
def items_in_order(dict, order=[]):
"""return all items of dict, but starting in the specified order."""
done = { }
items = [ ]
for key in order + dict.keys():
if not done.has_key(key) and dict.has_key(key):
done[key] = None
items.append((key, dict[key]))
return items
genres = [
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco', 'Funk',
'Grunge', 'Hip-Hop', 'Jazz', 'Metal', 'New Age', 'Oldies', 'Other',
'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack',
'Euro-Techno', 'Ambient', 'Trip-Hop', 'Vocal', 'Jazz+Funk', 'Fusion',
'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game',
'Sound Clip', 'Gospel', 'Noise', 'Alt. Rock', 'Bass', 'Soul',
'Punk', 'Space', 'Meditative', 'Instrum. Pop', 'Instrum. Rock',
'Ethnic', 'Gothic', 'Darkwave', 'Techno-Indust.', 'Electronic',
'Pop-Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy',
'Cult', 'Gangsta', 'Top 40', 'Christian Rap', 'Pop/Funk', 'Jungle',
'Native American', 'Cabaret', 'New Wave', 'Psychadelic', 'Rave',
'Showtunes', 'Trailer', 'Lo-Fi', 'Tribal', 'Acid Punk', 'Acid Jazz',
'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock', 'Folk',
'Folk/Rock', 'National Folk', 'Swing', 'Fusion', 'Bebob', 'Latin',
'Revival', 'Celtic', 'Bluegrass', 'Avantgarde', 'Gothic Rock',
'Progress. Rock', 'Psychadel. Rock', 'Symphonic Rock', 'Slow Rock',
'Big Band', 'Chorus', 'Easy Listening', 'Acoustic', 'Humour',
'Speech', 'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony',
'Booty Bass', 'Primus', 'Porn Groove', 'Satire', 'Slow Jam',
'Club', 'Tango', 'Samba', 'Folklore', 'Ballad', 'Power Ballad',
'Rhythmic Soul', 'Freestyle', 'Duet', 'Punk Rock', 'Drum Solo',
'A Capella', 'Euro-House', 'Dance Hall', 'Goa', 'Drum & Bass',
'Club-House', 'Hardcore', 'Terror', 'Indie', 'BritPop', 'Negerpunk',
'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal',
'Black Metal', 'Crossover', 'Contemporary Christian', 'Christian Rock',
'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'Jpop', 'Synthpop',
]
frame_id_names = {
'BUF' : 'Recommended buffer size',
'CNT' : 'Play counter',
'COM' : 'Comments',
'CRA' : 'Audio encryption',
'CRM' : 'Encrypted meta frame',
'ETC' : 'Event timing codes',
'EQU' : 'Equalization',
'GEO' : 'General encapsulated object',
'IPL' : 'Involved people list',
'LNK' : 'Linked information',
'MCI' : 'Music CD Identifier',
'MLL' : 'MPEG location lookup table',
'PIC' : 'Attached picture',
'POP' : 'Popularimeter',
'REV' : 'Reverb',
'RVA' : 'Relative volume adjustment',
'SLT' : 'Synchronized lyric/text',
'STC' : 'Synced tempo codes',
'TAL' : 'Title',
'TBP' : 'Beats per minute',
'TCM' : 'Composer',
'TCO' : 'Content type',
'TCR' : 'Copyright message',
'TDA' : 'Date',
'TDY' : 'Playlist delay',
'TEN' : 'Encoded by',
'TFT' : 'File type',
'TIM' : 'Time',
'TKE' : 'Initial key',
'TLA' : 'Language(s)',
'TLE' : 'Length',
'TMT' : 'Media type',
'TOA' : 'Original artist(s)/performer(s)',
'TOF' : 'Original filename',
'TOL' : 'Original Lyricist(s)/text writer(s)',
'TOR' : 'Original release year',
'TOT' : 'Original album/Movie/Show title',
'TP1' : 'Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group',
'TP2' : 'Band/Orchestra/Accompaniment',
'TP3' : 'Conductor/Performer refinement',
'TP4' : 'Interpreted, remixed, or otherwise modified by',
'TPA' : 'Part of a set',
'TPB' : 'Publisher',
'TRC' : 'ISRC (International Standard Recording Code)',
'TRD' : 'Recording dates',
'TRK' : 'Track number/Position in set',
'TSI' : 'Size',
'TSS' : 'Software/hardware and settings used for encoding',
'TT1' : 'Content group description',
'TT2' : 'Title/Songname/Content description',
'TT3' : 'Subtitle/Description refinement',
'TXT' : 'Lyricist/text writer',
'TXX' : 'User defined text information frame',
'TYE' : 'Year',
'UFI' : 'Unique file identifier',
'ULT' : 'Unsychronized lyric/text transcription',
'WAF' : 'Official audio file webpage',
'WAR' : 'Official artist/performer webpage',
'WAS' : 'Official audio source webpage',
'WCM' : 'Commercial information',
'WCP' : 'Copyright/Legal information',
'WPB' : 'Publishers official webpage',
'WXX' : 'User defined URL link frame',
}
text_frame_ids = ( 'TT1', 'TT2', 'TT3', 'TP1', 'TP2', 'TP3', 'TP4',
'TCM', 'TXT', 'TLA', 'TCO', 'TAL', 'TPA', 'TRK',
'TRC', 'TYE', 'TDA', 'TIM', 'TRD', 'TMT', 'TFT',
'TBP', 'TCR', 'TPB', 'TEN', 'TSS', 'TOF', 'TLE',
'TSI', 'TDY', 'TKE', 'TOT', 'TOA', 'TOL', 'TOR',
'IPL' )
_genre_number_re = re.compile("^\((\d+)\)$")
_track_re = re.compile("^(\d+)/(\d+)$")
#def info(filename):
# f = open(filename, 'rb')
# try:
# return id3().read(f).attributes()
# finally:
# f.close()
# composer
# disc
# part_of_a_compilation
# volume_adjustment
# equalizer_preset
# my_rating
# start_time
# stop_time
def test(filename="2_3.mp3"):
import StringIO
f = open(filename)
i = id3_file(f)
i.read()
i._f = StringIO.StringIO()
i.write()
v = i._f.getvalue()
f.seek(0)
v2 = f.read(len(v))
f.close()
return v == v2
def scan():
import os
os.path.walk('.', walkfn, 0)
if __name__ == '__main__':
scan()
| 32.291284 | 79 | 0.539882 |
b75a00768c2cceed8ca46774029ad378bc7cc2e6 | 1,180 | py | Python | workflow/pnmlpy/pmnl_model.py | SODALITE-EU/verification | 584e3c61bc20e65944e34b875eb5ed0ec02d6fa9 | [
"Apache-2.0"
] | null | null | null | workflow/pnmlpy/pmnl_model.py | SODALITE-EU/verification | 584e3c61bc20e65944e34b875eb5ed0ec02d6fa9 | [
"Apache-2.0"
] | 2 | 2020-03-30T12:02:32.000Z | 2021-04-20T19:09:25.000Z | workflow/pnmlpy/pmnl_model.py | SODALITE-EU/verification | 584e3c61bc20e65944e34b875eb5ed0ec02d6fa9 | [
"Apache-2.0"
] | null | null | null | from xml.dom import minidom
from xml.etree import ElementTree
from xml.etree.cElementTree import Element, SubElement, ElementTree, tostring
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
| 31.891892 | 115 | 0.582203 |
b75be5ebe9cb0ad6772b99405564c425be4f2dda | 969 | py | Python | examples/truss/truss_01.py | ofgod2/Analisis-matricial-nusa-python | 7cea329ba00449b97711a0c67725053a0d194335 | [
"MIT"
] | 92 | 2016-11-14T01:39:55.000Z | 2022-03-27T17:23:41.000Z | examples/truss/truss_01.py | ofgod2/Analisis-matricial-nusa-python | 7cea329ba00449b97711a0c67725053a0d194335 | [
"MIT"
] | 1 | 2017-11-30T05:04:02.000Z | 2018-08-29T04:31:39.000Z | examples/truss/truss_01.py | ofgod2/Analisis-matricial-nusa-python | 7cea329ba00449b97711a0c67725053a0d194335 | [
"MIT"
] | 31 | 2017-05-17T18:50:18.000Z | 2022-03-12T03:08:00.000Z | # -*- coding: utf-8 -*-
# ***********************************
# Author: Pedro Jorge De Los Santos
# E-mail: delossantosmfq@gmail.com
# Blog: numython.github.io
# License: MIT License
# ***********************************
from nusa import *
"""
Logan, D. (2007). A first course in the finite element analysis.
Example 3.1, pp. 70.
"""
# Input data
E = 30e6 # psi
A = 2.0 # in^2
P = 10e3 # lbf
# Model
m = TrussModel("Truss Model")
# Nodes
n1 = Node((0,0))
n2 = Node((0,120))
n3 = Node((120,120))
n4 = Node((120,0))
# Elements
kdg = np.pi/180.0
e1 = Truss((n1,n2),E,A)
e2 = Truss((n1,n3),E,A)
e3 = Truss((n1,n4),E,A)
# Add elements
for nd in (n1,n2,n3,n4):
m.add_node(nd)
for el in (e1,e2,e3):
m.add_element(el)
m.add_force(n1,(0,-P))
m.add_constraint(n2,ux=0,uy=0) # fixed
m.add_constraint(n3,ux=0,uy=0) # fixed
m.add_constraint(n4,ux=0,uy=0) # fixed
m.plot_model()
m.solve() # Solve model
m.plot_deformed_shape() # plot deformed shape
m.show()
| 21.065217 | 64 | 0.585139 |
b75dd73022d3840c6328953902299b38ebc5ba18 | 2,919 | py | Python | Profiles/Mahmoud Higazy/logistic_regression.py | AhmedHani/FCIS-Machine-Learning-2017 | f241d989fdccfabfe351cd9c01f5de4da8df6ef3 | [
"Apache-2.0"
] | 13 | 2017-07-02T06:45:46.000Z | 2020-12-26T16:35:24.000Z | Profiles/Mahmoud Higazy/logistic_regression.py | AhmedHani/FCIS-Machine-Learning-2017 | f241d989fdccfabfe351cd9c01f5de4da8df6ef3 | [
"Apache-2.0"
] | 4 | 2017-07-22T00:09:41.000Z | 2017-12-15T15:54:33.000Z | Profiles/Mahmoud Higazy/logistic_regression.py | AhmedHani/FCIS-Machine-Learning-2017 | f241d989fdccfabfe351cd9c01f5de4da8df6ef3 | [
"Apache-2.0"
] | 25 | 2017-07-01T23:07:08.000Z | 2019-01-24T09:45:08.000Z | from data_reader.reader import CsvReader
from util import *
import numpy as np
import matplotlib.pyplot as plt
reader = CsvReader("./data/Iris.csv")
iris_features, iris_labels = reader.get_iris_data()
ignore_verginica = [i for i, v in enumerate(iris_labels) if v == 'Iris-virginica']
iris_features = [v for i, v in enumerate(iris_features) if i not in ignore_verginica]
iris_labels = [v for i, v in enumerate(iris_labels) if i not in ignore_verginica]
print(len(iris_features))
print(len(iris_labels))
iris_features, iris_labels = shuffle(iris_features, iris_labels)
iris_labels = to_onehot(iris_labels)
iris_labels = list(map(lambda v: v.index(max(v)), iris_labels))
train_x, train_y, test_x, test_y = iris_features[0:89], iris_labels[0:89], iris_features[89:], iris_labels[89:]
train_x, train_y, test_x, test_y = np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
train_x, means, stds = standardize(train_x)
test_x = standardize(test_x, means, stds)
lr = LogisticRegression(learning_rate=0.1, epochs=50)
lr.fit(train_x, train_y)
plt.plot(range(1, len(lr.cost_) + 1), np.log10(lr.cost_))
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.title('Logistic Regression - Learning rate 0.1')
plt.tight_layout()
plt.show()
predicted_test = lr.predict(test_x)
print("Test Accuracy: " + str(((sum([predicted_test[i] == test_y[i] for i in range(0, len(predicted_test))]) / len(predicted_test)) * 100.0)) + "%")
| 34.341176 | 148 | 0.656732 |
b75eb4207857101d04d38eb0f52b4294fd616690 | 1,413 | py | Python | wavefront_reader/reading/readobjfile.py | SimLeek/wavefront_reader | 4504f5b6185a03fcdd1722dbea660f7af35b8b8c | [
"MIT"
] | null | null | null | wavefront_reader/reading/readobjfile.py | SimLeek/wavefront_reader | 4504f5b6185a03fcdd1722dbea660f7af35b8b8c | [
"MIT"
] | null | null | null | wavefront_reader/reading/readobjfile.py | SimLeek/wavefront_reader | 4504f5b6185a03fcdd1722dbea660f7af35b8b8c | [
"MIT"
] | null | null | null | from wavefront_reader.wavefront_classes.objfile import ObjFile
from .readface import read_face
def read_objfile(fname):
"""Takes .obj filename and return an ObjFile class."""
obj_file = ObjFile()
with open(fname) as f:
lines = f.read().splitlines()
if 'OBJ' not in lines[0]:
raise ValueError("File not .obj-formatted.")
# todo: assumes one object per .obj file, which is wrong
# todo: doesn't properly ignore comments
for line in lines:
if line:
prefix, value = line.split(' ', 1)
if prefix == 'o':
obj_file.add_prop(value)
if obj_file.has_prop():
if prefix == 'v':
obj_file.last_obj_prop.vertices.append([float(val) for val in value.split(' ')])
elif prefix == 'vn':
obj_file.last_obj_prop.vertex_normals.append([float(val) for val in value.split(' ')])
elif prefix == 'vt':
obj_file.last_obj_prop.vertex_textures.append([float(val) for val in value.split(' ')])
elif prefix == 'usemtl':
obj_file.last_obj_prop.material_name = value
elif prefix == 'f':
obj_file.last_obj_prop.faces.append(read_face(value, obj_file.last_obj_prop))
else:
obj_file.misc[prefix] = value
return obj_file
| 39.25 | 107 | 0.573248 |
b760116d8d8fe2d046e6af340b2d6bd9cb6fc8e2 | 157 | py | Python | constants.py | Guedelho/snake-ai | 176db202aaec76ff5c7cac6cc9d7a7bc46ff2b16 | [
"MIT"
] | null | null | null | constants.py | Guedelho/snake-ai | 176db202aaec76ff5c7cac6cc9d7a7bc46ff2b16 | [
"MIT"
] | null | null | null | constants.py | Guedelho/snake-ai | 176db202aaec76ff5c7cac6cc9d7a7bc46ff2b16 | [
"MIT"
] | null | null | null | # Directions
UP = 'UP'
DOWN = 'DOWN'
LEFT = 'LEFT'
RIGHT = 'RIGHT'
# Colors
RED = (255, 0, 0)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
| 13.083333 | 23 | 0.547771 |
b76026927b6eb058284eefad5002a87c72c21db0 | 520 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/utils.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/utils.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/utils.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Utils for video_pipeline app.
"""
from django.conf import settings
from edx_rest_api_client.client import OAuthAPIClient
def create_video_pipeline_api_client(api_client_id, api_client_secret):
"""
Returns an API client which can be used to make Video Pipeline API requests.
Arguments:
api_client_id(unicode): Video pipeline client id.
api_client_secret(unicode): Video pipeline client secret.
"""
return OAuthAPIClient(settings.LMS_ROOT_URL, api_client_id, api_client_secret)
| 28.888889 | 82 | 0.765385 |
b76161b7b67049e769a1af4d2aa06f728082679c | 2,695 | py | Python | run.py | Galaxy-SynBioCAD/extractTaxonomy | da3a1da443909dbefe143a3b7de66905c43eaf82 | [
"MIT"
] | null | null | null | run.py | Galaxy-SynBioCAD/extractTaxonomy | da3a1da443909dbefe143a3b7de66905c43eaf82 | [
"MIT"
] | null | null | null | run.py | Galaxy-SynBioCAD/extractTaxonomy | da3a1da443909dbefe143a3b7de66905c43eaf82 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Created on March 18 2020
@author: Melchior du Lac
@description: Extract the taxonomy ID from an SBML file
"""
import argparse
import tempfile
import os
import logging
import shutil
import docker
def main(inputfile, output):
"""Call the extractTaxonomy docker to return the JSON file
:param inputfile: The path to the SBML file
:param output: The path to the output json file
:type inputfile: str
:type output: str
:rtype: None
:return: None
"""
docker_client = docker.from_env()
image_str = 'brsynth/extracttaxonomy-standalone'
try:
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.warning('Could not find the image, trying to pull it')
try:
docker_client.images.pull(image_str)
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.error('Cannot pull image: '+str(image_str))
exit(1)
with tempfile.TemporaryDirectory() as tmpOutputFolder:
if os.path.exists(inputfile):
shutil.copy(inputfile, tmpOutputFolder+'/input.dat')
command = ['/home/tool_extractTaxonomy.py',
'-input',
'/home/tmp_output/input.dat',
'-output',
'/home/tmp_output/output.dat']
container = docker_client.containers.run(image_str,
command,
detach=True,
stderr=True,
volumes={tmpOutputFolder+'/': {'bind': '/home/tmp_output', 'mode': 'rw'}})
container.wait()
err = container.logs(stdout=False, stderr=True)
err_str = err.decode('utf-8')
if 'ERROR' in err_str:
print(err_str)
elif 'WARNING' in err_str:
print(err_str)
if not os.path.exists(tmpOutputFolder+'/output.dat'):
print('ERROR: Cannot find the output file: '+str(tmpOutputFolder+'/output.dat'))
else:
shutil.copy(tmpOutputFolder+'/output.dat', output)
container.remove()
else:
logging.error('Cannot find the input file: '+str(inputfile))
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Extract the t')
parser.add_argument('-input', type=str)
parser.add_argument('-output', type=str)
params = parser.parse_args()
main(params.input, params.output)
| 35 | 127 | 0.565121 |
b761fb951040af2347c2dd2aa478c82dca9ff08e | 10,460 | py | Python | src/ebay_rest/api/buy_browse/models/refinement.py | matecsaj/ebay_rest | dd23236f39e05636eff222f99df1e3699ce47d4a | [
"MIT"
] | 3 | 2021-12-12T04:28:03.000Z | 2022-03-10T03:29:18.000Z | src/ebay_rest/api/buy_browse/models/refinement.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 33 | 2021-06-16T20:44:36.000Z | 2022-03-30T14:55:06.000Z | src/ebay_rest/api/buy_browse/models/refinement.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 7 | 2021-06-03T09:30:23.000Z | 2022-03-08T19:51:33.000Z | # coding: utf-8
"""
Browse API
<p>The Browse API has the following resources:</p> <ul> <li><b> item_summary: </b> Lets shoppers search for specific items by keyword, GTIN, category, charity, product, or item aspects and refine the results by using filters, such as aspects, compatibility, and fields values.</li> <li><b> search_by_image: </b><a href=\"https://developer.ebay.com/api-docs/static/versioning.html#experimental\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" /> (Experimental)</a> Lets shoppers search for specific items by image. You can refine the results by using URI parameters and filters.</li> <li><b> item: </b> <ul><li>Lets you retrieve the details of a specific item or all the items in an item group, which is an item with variations such as color and size and check if a product is compatible with the specified item, such as if a specific car is compatible with a specific part.</li> <li>Provides a bridge between the eBay legacy APIs, such as <b> Finding</b>, and the RESTful APIs, which use different formats for the item IDs.</li> </ul> </li> <li> <b> shopping_cart: </b> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#experimental\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" /> (Experimental)</a> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> Provides the ability for eBay members to see the contents of their eBay cart, and add, remove, and change the quantity of items in their eBay cart. <b> Note: </b> This resource is not available in the eBay API Explorer.</li></ul> <p>The <b> item_summary</b>, <b> search_by_image</b>, and <b> item</b> resource calls require an <a href=\"/api-docs/static/oauth-client-credentials-grant.html\">Application access token</a>. The <b> shopping_cart</b> resource calls require a <a href=\"/api-docs/static/oauth-authorization-code-grant.html\">User access token</a>.</p> # noqa: E501
OpenAPI spec version: v1.11.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Refinement):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 46.488889 | 2,332 | 0.67782 |
b76571e31217da708c1ce0ba259ecc1d18b070d9 | 1,878 | py | Python | tests/dataio_tests/test_import_data_filter_empty_directories.py | cdeitrick/Lolipop | 5b87b00a2c7ccbeeb3876bddb32e54aedf6bdf6d | [
"MIT"
] | 6 | 2020-04-18T15:43:19.000Z | 2022-02-19T18:43:23.000Z | tests/dataio_tests/test_import_data_filter_empty_directories.py | cdeitrick/Lolipop | 5b87b00a2c7ccbeeb3876bddb32e54aedf6bdf6d | [
"MIT"
] | 5 | 2020-05-04T16:09:03.000Z | 2020-10-13T03:52:56.000Z | tests/dataio_tests/test_import_data_filter_empty_directories.py | cdeitrick/muller_diagrams | 5b87b00a2c7ccbeeb3876bddb32e54aedf6bdf6d | [
"MIT"
] | 3 | 2020-03-23T17:12:56.000Z | 2020-07-24T22:22:12.000Z | from pathlib import Path
import pandas
from muller.dataio import import_tables
from loguru import logger
DATA_FOLDER = Path(__file__).parent.parent / "data" | 33.535714 | 102 | 0.746006 |