hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a0c55e81de2d1138689ce8a17aa9a9f32891ab7 | 2,703 | py | Python | src/genie/libs/parser/junos/tests/ShowOspfStatistics/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/junos/tests/ShowOspfStatistics/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/junos/tests/ShowOspfStatistics/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
"ospf-statistics-information": {
"ospf-statistics": {
"dbds-retransmit": "203656",
"dbds-retransmit-5seconds": "0",
"flood-queue-depth": "0",
"lsas-acknowledged": "225554974",
"lsas-acknowledged-5seconds": "0",
"lsas-flooded": "66582263",
"lsas-flooded-5seconds": "0",
"lsas-high-prio-flooded": "375568998",
"lsas-high-prio-flooded-5seconds": "0",
"lsas-nbr-transmit": "3423982",
"lsas-nbr-transmit-5seconds": "0",
"lsas-requested": "3517",
"lsas-requested-5seconds": "0",
"lsas-retransmit": "8064643",
"lsas-retransmit-5seconds": "0",
"ospf-errors": {
"subnet-mismatch-error": "12"
},
"packet-statistics": [
{
"ospf-packet-type": "Hello",
"packets-received": "5703920",
"packets-received-5seconds": "3",
"packets-sent": "6202169",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "DbD",
"packets-received": "185459",
"packets-received-5seconds": "0",
"packets-sent": "212983",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSReq",
"packets-received": "208",
"packets-received-5seconds": "0",
"packets-sent": "214",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSUpdate",
"packets-received": "16742100",
"packets-received-5seconds": "0",
"packets-sent": "15671465",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSAck",
"packets-received": "2964236",
"packets-received-5seconds": "0",
"packets-sent": "5229203",
"packets-sent-5seconds": "0"
}
],
"total-database-summaries": "0",
"total-linkstate-request": "0",
"total-retransmits": "0"
}
}
}
| 41.584615 | 57 | 0.371439 |
1a0cfe1974d3fead0e36d406bfbe33d55d632379 | 6,981 | py | Python | marc_5gempower/run_5gempower.py | arled-papa/marc | cb94636d786e215195e914b37131277f835bcf52 | [
"Apache-2.0"
] | 1 | 2021-11-30T00:07:28.000Z | 2021-11-30T00:07:28.000Z | marc_5gempower/run_5gempower.py | arled-papa/marc | cb94636d786e215195e914b37131277f835bcf52 | [
"Apache-2.0"
] | null | null | null | marc_5gempower/run_5gempower.py | arled-papa/marc | cb94636d786e215195e914b37131277f835bcf52 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2021 Arled Papa
# Author: Arled Papa <arled.papa@tum.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marc_5gempower.agent_func import empower_agent
import marc_5gempower.measurement_helper_func.measurement_func as util_stats_report
import time
import asyncio
import aiomultiprocess as mp
controller_ip = "Your Controller's PC IP" # Place your own controller ip
controller_port = 2210 # Typical 5G-EmPOWER controller port
measurement_time = 600 # Place your own measurement time (currently 10 min)
"""
This dictionary stores all the agent ids that are recognized by the 5G-EmPOWER controller.
Since the controller only accepts agents that registered at the database this has to be done beforehand
Currently 100 agent ids are registered as follows
"""
agntMAC = {0: b'\x00\x00\x00\x00\x00\x00', 1: b'\x00\x00\x00\x00\x00\x01', 2: b'\x00\x00\x00\x00\x00\x02',
3: b'\x00\x00\x00\x00\x00\x03', 4: b'\x00\x00\x00\x00\x00\x04', 5: b'\x00\x00\x00\x00\x00\x05',
6: b'\x00\x00\x00\x00\x00\x06', 7: b'\x00\x00\x00\x00\x00\x07', 8: b'\x00\x00\x00\x00\x00\x08',
9: b'\x00\x00\x00\x00\x01\x00', 10: b'\x00\x00\x00\x00\x01\x01', 11: b'\x00\x00\x00\x00\x01\x02',
12: b'\x00\x00\x00\x00\x01\x03', 13: b'\x00\x00\x00\x00\x01\x04', 14: b'\x00\x00\x00\x00\x01\x05',
15: b'\x00\x00\x00\x00\x01\x06', 16: b'\x00\x00\x00\x00\x01\x07', 17: b'\x00\x00\x00\x00\x01\x08',
18: b'\x00\x00\x00\x00\x02\x00', 19: b'\x00\x00\x00\x00\x02\x01', 20: b'\x00\x00\x00\x00\x02\x02',
21: b'\x00\x00\x00\x00\x02\x03', 22: b'\x00\x00\x00\x00\x02\x04', 23: b'\x00\x00\x00\x00\x02\x05',
24: b'\x00\x00\x00\x00\x02\x06', 25: b'\x00\x00\x00\x00\x02\x07', 26: b'\x00\x00\x00\x00\x02\x08',
27: b'\x00\x00\x00\x00\x03\x00', 28: b'\x00\x00\x00\x00\x03\x01', 29: b'\x00\x00\x00\x00\x03\x02',
30: b'\x00\x00\x00\x00\x03\x03', 31: b'\x00\x00\x00\x00\x03\x04', 32: b'\x00\x00\x00\x00\x03\x05',
33: b'\x00\x00\x00\x00\x03\x06', 34: b'\x00\x00\x00\x00\x03\x07', 35: b'\x00\x00\x00\x00\x03\x08',
36: b'\x00\x00\x00\x00\x04\x00', 37: b'\x00\x00\x00\x00\x04\x01', 38: b'\x00\x00\x00\x00\x04\x02',
39: b'\x00\x00\x00\x00\x04\x03', 40: b'\x00\x00\x00\x00\x04\x04', 41: b'\x00\x00\x00\x00\x04\x05',
42: b'\x00\x00\x00\x00\x04\x06', 43: b'\x00\x00\x00\x00\x04\x07', 44: b'\x00\x00\x00\x00\x04\x08',
45: b'\x00\x00\x00\x00\x05\x00', 46: b'\x00\x00\x00\x00\x05\x01', 47: b'\x00\x00\x00\x00\x05\x02',
48: b'\x00\x00\x00\x00\x05\x03', 49: b'\x00\x00\x00\x00\x05\x04', 50: b'\x00\x00\x00\x00\x05\x05',
51: b'\x00\x00\x00\x00\x05\x06', 52: b'\x00\x00\x00\x00\x05\x07', 53: b'\x00\x00\x00\x00\x05\x08',
54: b'\x00\x00\x00\x00\x06\x00', 55: b'\x00\x00\x00\x00\x06\x01', 56: b'\x00\x00\x00\x00\x06\x02',
57: b'\x00\x00\x00\x00\x06\x03', 58: b'\x00\x00\x00\x00\x06\x04', 59: b'\x00\x00\x00\x00\x06\x05',
60: b'\x00\x00\x00\x00\x06\x06', 61: b'\x00\x00\x00\x00\x06\x07', 62: b'\x00\x00\x00\x00\x06\x08',
63: b'\x00\x00\x00\x00\x07\x00', 64: b'\x00\x00\x00\x00\x07\x01', 65: b'\x00\x00\x00\x00\x07\x02',
66: b'\x00\x00\x00\x00\x07\x03', 67: b'\x00\x00\x00\x00\x07\x04', 68: b'\x00\x00\x00\x00\x07\x05',
69: b'\x00\x00\x00\x00\x07\x06', 70: b'\x00\x00\x00\x00\x07\x07', 71: b'\x00\x00\x00\x00\x07\x08',
72: b'\x00\x00\x00\x00\x08\x00', 73: b'\x00\x00\x00\x00\x08\x01', 74: b'\x00\x00\x00\x00\x08\x02',
75: b'\x00\x00\x00\x00\x08\x03', 76: b'\x00\x00\x00\x00\x08\x04', 77: b'\x00\x00\x00\x00\x08\x05',
78: b'\x00\x00\x00\x00\x08\x06', 79: b'\x00\x00\x00\x00\x08\x07', 80: b'\x00\x00\x00\x00\x08\x08',
81: b'\x00\x00\x00\x01\x00\x00', 82: b'\x00\x00\x00\x01\x00\x01', 83: b'\x00\x00\x00\x01\x00\x02',
84: b'\x00\x00\x00\x01\x00\x03', 85: b'\x00\x00\x00\x01\x00\x04', 86: b'\x00\x00\x00\x01\x00\x05',
87: b'\x00\x00\x00\x01\x00\x06', 88: b'\x00\x00\x00\x01\x00\x07', 89: b'\x00\x00\x00\x01\x00\x08',
90: b'\x00\x00\x00\x01\x01\x00', 91: b'\x00\x00\x00\x01\x01\x01', 92: b'\x00\x00\x00\x01\x01\x02',
93: b'\x00\x00\x00\x01\x01\x03', 94: b'\x00\x00\x00\x01\x01\x04', 95: b'\x00\x00\x00\x01\x01\x05',
96: b'\x00\x00\x00\x01\x01\x06', 97: b'\x00\x00\x00\x01\x01\x07', 98: b'\x00\x00\x00\x01\x01\x08',
99: b'\x00\x00\x00\x01\x02\x00'}
# Function that terminates all processes once the measurement time has been finalized
"""
Function that initiates the run of 5G-EmPOWER.
Args:
agents: The number of 5G-EmPOWER agents to initiate
users: The number of users per each initiated FlexRAN agent
delay: Binary that indicated if agent related delay measurement are taking place
"""
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run_5gempower())
loop.close()
| 56.756098 | 116 | 0.627847 |
1a0dcd546c9fb9cfb2c22a03b6cf3ce13d629047 | 3,531 | py | Python | jina/peapods/peas/gateway/grpc/__init__.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | 1 | 2020-12-23T08:58:49.000Z | 2020-12-23T08:58:49.000Z | jina/peapods/peas/gateway/grpc/__init__.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | jina/peapods/peas/gateway/grpc/__init__.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | import asyncio
import argparse
import os
from multiprocessing.synchronize import Event
from typing import Union, Dict
import grpc
import zmq.asyncio
from .async_call import AsyncPrefetchCall
from ... import BasePea
from ....zmq import send_message_async, recv_message_async, _init_socket
from .....enums import SocketType
from .....proto import jina_pb2
from .....proto import jina_pb2_grpc
__all__ = ['GatewayPea']
| 38.802198 | 112 | 0.652223 |
1a0ddf6aed80f212b94b5faabe9879bd5b5f6957 | 895 | py | Python | Spell Compendium/scr/Spell1059 - Improvisation.py | Sagenlicht/ToEE_Mods | a4b07f300df6067f834e09fcbc4c788f1f4e417b | [
"MIT"
] | 1 | 2021-04-26T08:03:56.000Z | 2021-04-26T08:03:56.000Z | Spell Compendium/scr/Spell1059 - Improvisation.py | Sagenlicht/ToEE_Mods | a4b07f300df6067f834e09fcbc4c788f1f4e417b | [
"MIT"
] | 2 | 2021-06-11T05:55:01.000Z | 2021-08-03T23:41:02.000Z | Spell Compendium/scr/Spell1059 - Improvisation.py | Sagenlicht/ToEE_Mods | a4b07f300df6067f834e09fcbc4c788f1f4e417b | [
"MIT"
] | 1 | 2021-05-17T15:37:58.000Z | 2021-05-17T15:37:58.000Z | from toee import * | 35.8 | 121 | 0.755307 |
1a0ee9a3148043007875afdc8ae0b227516a59d4 | 131,586 | py | Python | pybind/slxos/v17r_2_00/mpls_state/lsp/secondary_path/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/mpls_state/lsp/secondary_path/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/mpls_state/lsp/secondary_path/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import admin_group_lists
import auto_bandwidth
| 75.973441 | 1,982 | 0.766221 |
1a11560f409eb43a0ed24b3b54e89719dbd21b76 | 171 | py | Python | Theseus/Tests/__init__.py | amias-iohk/theseus | 88d9294721e3bbbb756b983f55df6d669e632da4 | [
"MIT"
] | 4 | 2018-08-08T07:11:29.000Z | 2018-11-08T02:43:11.000Z | Theseus/Tests/__init__.py | amias-iohk/theseus | 88d9294721e3bbbb756b983f55df6d669e632da4 | [
"MIT"
] | null | null | null | Theseus/Tests/__init__.py | amias-iohk/theseus | 88d9294721e3bbbb756b983f55df6d669e632da4 | [
"MIT"
] | 3 | 2018-10-18T13:42:24.000Z | 2021-01-20T15:21:25.000Z | __author__ = 'Amias Channer <amias.channer@iohk.io> for IOHK'
__doc__ = 'Daedalus Testing functions'
from .Cardano import *
from .Daedalus import *
from .Common import *
| 24.428571 | 61 | 0.754386 |
1a118f7d8b03da075a37997cfb06c80ceb08fc58 | 907 | py | Python | mobula/operators/Multiply.py | wkcn/mobula | 4eec938d6477776f5f2d68bcf41de83fb8da5195 | [
"MIT"
] | 47 | 2017-07-15T02:13:18.000Z | 2022-01-01T09:37:59.000Z | mobula/operators/Multiply.py | wkcn/mobula | 4eec938d6477776f5f2d68bcf41de83fb8da5195 | [
"MIT"
] | 3 | 2018-06-22T13:55:12.000Z | 2020-01-29T01:41:13.000Z | mobula/operators/Multiply.py | wkcn/mobula | 4eec938d6477776f5f2d68bcf41de83fb8da5195 | [
"MIT"
] | 8 | 2017-09-03T12:42:54.000Z | 2020-09-27T03:38:59.000Z | from .Layer import *
Multiply.OP_L = MultiplyConstant
Multiply.OP_R = MultiplyConstant
| 32.392857 | 84 | 0.624035 |
1a1196d66c0c37b1c2d9a2fa6bdb80334a47691b | 581 | py | Python | project/settings_deploy.py | djstein/vue-django-webpack | d072e116d45800064b3972decbc6ec493801ea5b | [
"MIT"
] | 43 | 2017-02-24T17:59:27.000Z | 2020-02-04T16:49:38.000Z | project/settings_deploy.py | djstein/vue-django-webpack | d072e116d45800064b3972decbc6ec493801ea5b | [
"MIT"
] | 6 | 2017-01-10T01:49:03.000Z | 2017-10-03T06:12:59.000Z | project/settings_deploy.py | djstein/vue-django-webpack | d072e116d45800064b3972decbc6ec493801ea5b | [
"MIT"
] | 13 | 2017-05-18T20:00:24.000Z | 2021-01-22T06:32:45.000Z | from project.settings import INSTALLED_APPS, ALLOWED_HOSTS, BASE_DIR
import os
INSTALLED_APPS.append( 'webpack_loader',)
INSTALLED_APPS.append( 'app',)
ALLOWED_HOSTS.append('*',)
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static',)
os.path.join(BASE_DIR, 'app', 'vueapp','dist', 'static')
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'static/vueapp/',
'STATS_FILE': os.path.join(BASE_DIR, 'app', 'vueapp', 'webpack-stats.json')
}
}
INTERNAL_IPS = (
'0.0.0.0',
'127.0.0.1',
) | 22.346154 | 83 | 0.652324 |
1a124f44649021a9482a062fee582fc8ecf4209e | 268 | py | Python | dockerfiles/igv/igv.py | leipzig/gatk-sv | 96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a | [
"BSD-3-Clause"
] | 76 | 2020-06-18T21:31:43.000Z | 2022-03-02T18:42:58.000Z | dockerfiles/igv/igv.py | iamh2o/gatk-sv | bf3704bd1d705339577530e267cd4d1b2f77a17f | [
"BSD-3-Clause"
] | 195 | 2020-06-22T15:12:28.000Z | 2022-03-28T18:06:46.000Z | dockerfiles/igv/igv.py | iamh2o/gatk-sv | bf3704bd1d705339577530e267cd4d1b2f77a17f | [
"BSD-3-Clause"
] | 39 | 2020-07-03T06:47:18.000Z | 2022-03-03T03:47:25.000Z | import sys
[_, varfile] = sys.argv
plotdir = "plots"
igvfile = "igv.txt"
igvsh = "igv.sh"
with open(varfile, 'r') as f:
for line in f:
dat = line.split('\t')
chr = dat[0]
start = dat[1]
end = dat[2]
data = dat[3].split(',')
| 20.615385 | 32 | 0.511194 |
1a12b43b837e725bb85bfe8e14b6c166c2be8e99 | 691 | py | Python | model/sample/adg.py | sdy99/PowerAI | ef40bacddbad72322e3e423417ae13d478d56a6d | [
"MIT"
] | 7 | 2020-04-11T03:28:50.000Z | 2021-03-29T14:53:36.000Z | model/sample/adg.py | sdy99/PowerAI | ef40bacddbad72322e3e423417ae13d478d56a6d | [
"MIT"
] | null | null | null | model/sample/adg.py | sdy99/PowerAI | ef40bacddbad72322e3e423417ae13d478d56a6d | [
"MIT"
] | 5 | 2020-04-11T03:28:52.000Z | 2021-11-27T05:23:12.000Z | # coding: gbk
"""
@author: sdy
@email: sdy@epri.sgcc.com.cn
Abstract distribution and generation class
"""
| 20.323529 | 49 | 0.662808 |
1a135f93a11c5cc15bf96c7f89491d4b0c295264 | 903 | py | Python | src/validatesigner.py | harryttd/remote-signer | a1af4e58b1d6628b09166368362d05d6e876e466 | [
"MIT"
] | null | null | null | src/validatesigner.py | harryttd/remote-signer | a1af4e58b1d6628b09166368362d05d6e876e466 | [
"MIT"
] | null | null | null | src/validatesigner.py | harryttd/remote-signer | a1af4e58b1d6628b09166368362d05d6e876e466 | [
"MIT"
] | null | null | null | #
# The ValidateSigner applies a ChainRatchet to the signature request
# and then passes it down to a signer. In order to do this, it must
# parse the request and to obtain the level and round to pass to the
# ratchet code.
import logging
from src.sigreq import SignatureReq
| 30.1 | 68 | 0.663344 |
1a146284d92debe9f0fcbd843d3eb7e8ae94afbb | 25,766 | py | Python | CODE/web_server/server/audio_processing/DataLoader.py | andrewbartels1/Marine-Mammal-Acoustics | 9f833d97ac26ecd51b4c4e276a815ab9d2b67bb6 | [
"MIT"
] | 1 | 2022-03-29T12:24:11.000Z | 2022-03-29T12:24:11.000Z | CODE/web_server/server/audio_processing/DataLoader.py | andrewbartels1/Marine-Mammal-Acoustics | 9f833d97ac26ecd51b4c4e276a815ab9d2b67bb6 | [
"MIT"
] | null | null | null | CODE/web_server/server/audio_processing/DataLoader.py | andrewbartels1/Marine-Mammal-Acoustics | 9f833d97ac26ecd51b4c4e276a815ab9d2b67bb6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 1 17:48:01 2021
@author: bartelsaa
"""
# boiler plate stuff
import re, os, io
from glob import glob
import pandas as pd
import sys
import pytz
from models import Sensor
from datetime import datetime, timezone
import pathlib
from data_schema import data_schema
from maad.rois import (select_rois, create_mask)
from maad.features import (centroid_features)
from maad.sound import (load, resample, spectrogram, remove_background, median_equalizer,
remove_background_morpho, remove_background_along_axis,
sharpness, spectral_snr, trim, write, smooth)
from maad.util import (power2dB, plot2d, dB2power,format_features, overlay_rois,
overlay_centroid, crop_image)
import base64
# setup django settings and configs
import django
import argparse
# Setup django env and add to models
sys.path.append("/app")
os.environ['DJANGO_SETTINGS_MODULE'] = 'pams.settings'
django.setup()
from django.utils.timezone import make_aware
from django import db
print(db.connections.databases)
print("DB NAME ")
from pams.models.audio_clip import AudioClip
if __name__ == "__main__":
main()
| 46.175627 | 168 | 0.557595 |
1a150533d8cad7a2aba7a53cd1cb833f76eb2499 | 3,078 | py | Python | tests/lib/test_otping.py | reputage/py-didery | 2d54a9e39fb01a81d4d6f7814ca7a611a7418a47 | [
"Apache-2.0"
] | null | null | null | tests/lib/test_otping.py | reputage/py-didery | 2d54a9e39fb01a81d4d6f7814ca7a611a7418a47 | [
"Apache-2.0"
] | 15 | 2018-05-24T23:30:21.000Z | 2018-05-25T17:39:51.000Z | tests/lib/test_otping.py | reputage/py-didery | 2d54a9e39fb01a81d4d6f7814ca7a611a7418a47 | [
"Apache-2.0"
] | null | null | null | import pytest
try:
import simplejson as json
except ImportError:
import json
from ioflo.aio.http import Valet
# import didery.routing
from diderypy.lib import generating as gen
from diderypy.lib import otping as otp
vk, sk, did = gen.keyGen()
otpData = {
"id": did,
"blob": "AeYbsHot0pmdWAcgTo5sD8iAuSQAfnH5U6wiIGpVNJQQoYKBYrPPxAoIc1i5SHCIDS8KFFgf8i0tDq8XGizaCgo9yjuKHHNJZFi0QD9K"
"6Vpt6fP0XgXlj8z_4D-7s3CcYmuoWAh6NVtYaf_GWw_2sCrHBAA2mAEsml3thLmu50Dw"
}
url1, url2 = "http://localhost:8080/blob", "http://localhost:8000/blob"
urls = ["http://localhost:8080", "http://localhost:8000"]
| 24.624 | 118 | 0.692982 |
1a16d3dc97a2e27b58e28cd919840f007fc7b43f | 3,722 | py | Python | src/DCGMM/measuring/Logging.py | anon-scientist/dcgmm | 1d2d96d1d9811c387ee11d462ff0a3819a66e137 | [
"Apache-2.0",
"MIT"
] | null | null | null | src/DCGMM/measuring/Logging.py | anon-scientist/dcgmm | 1d2d96d1d9811c387ee11d462ff0a3819a66e137 | [
"Apache-2.0",
"MIT"
] | null | null | null | src/DCGMM/measuring/Logging.py | anon-scientist/dcgmm | 1d2d96d1d9811c387ee11d462ff0a3819a66e137 | [
"Apache-2.0",
"MIT"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import json
from collections import defaultdict
from DCGMM.utils import log
from DCGMM.parsers import Kwarg_Parser
import numpy as np
| 39.595745 | 166 | 0.610962 |
1a16e50ba3f5373ee622624d05fe6164d2927423 | 1,863 | py | Python | demos/demo_pyqtgraph_threadsafe_static.py | Dennis-van-Gils/python-dvg-pyqtgraph-threadsafe | c766cef85c60195ecfdeacc6b62f16fd1b90dcf0 | [
"MIT"
] | null | null | null | demos/demo_pyqtgraph_threadsafe_static.py | Dennis-van-Gils/python-dvg-pyqtgraph-threadsafe | c766cef85c60195ecfdeacc6b62f16fd1b90dcf0 | [
"MIT"
] | 1 | 2020-10-24T05:18:48.000Z | 2020-10-24T11:37:09.000Z | demos/demo_pyqtgraph_threadsafe_static.py | Dennis-van-Gils/python-dvg-pyqtgraph-threadsafe | c766cef85c60195ecfdeacc6b62f16fd1b90dcf0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import numpy as np
from PyQt5 import QtWidgets as QtWid
import pyqtgraph as pg
from dvg_pyqtgraph_threadsafe import PlotCurve
USE_OPENGL = True
if USE_OPENGL:
print("OpenGL acceleration: Enabled")
pg.setConfigOptions(useOpenGL=True)
pg.setConfigOptions(antialias=True)
pg.setConfigOptions(enableExperimental=True)
# ------------------------------------------------------------------------------
# MainWindow
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
app = QtWid.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| 27.397059 | 80 | 0.500805 |
1a17b1be2074f64913108dde8915d54ffd44bd53 | 1,889 | py | Python | froide/team/services.py | manonthemat/froide | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | [
"MIT"
] | null | null | null | froide/team/services.py | manonthemat/froide | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | [
"MIT"
] | null | null | null | froide/team/services.py | manonthemat/froide | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | [
"MIT"
] | null | null | null | import hashlib
import hmac
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.crypto import constant_time_compare
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from froide.helper.email_sending import send_mail
from .models import Team
| 29.061538 | 65 | 0.599788 |
1a183c6499418e4965e990ea2623cf57e6ec50c1 | 340 | py | Python | turtle/snowflake.py | yunzhang599/Python3_Package_Examples | 3e479925f3f6818bf35e46123f720839acf075eb | [
"MIT"
] | 1 | 2019-11-16T05:06:01.000Z | 2019-11-16T05:06:01.000Z | turtle/snowflake.py | yunzhang599/Python3_Package_Examples | 3e479925f3f6818bf35e46123f720839acf075eb | [
"MIT"
] | null | null | null | turtle/snowflake.py | yunzhang599/Python3_Package_Examples | 3e479925f3f6818bf35e46123f720839acf075eb | [
"MIT"
] | null | null | null |
from turtle import forward, left, right, width, color, clearscreen
clearscreen()
color("lightblue")
width(3)
for i in range(6):
forward(50)
left(60)
forward(25)
left(180)
forward(25)
left(60)
forward(25)
left(180)
forward(25)
right(120)
forward(25)
left(180)
forward(75)
left(120)
| 14.782609 | 66 | 0.608824 |
1a1935f678ac36846905eb87be171d453fa2af35 | 1,185 | py | Python | pyechonest/config.py | gleitz/automaticdj | 3880c175bc09c17ed9f71ba9902e348a00bb64ef | [
"MIT"
] | 14 | 2015-06-19T22:00:41.000Z | 2021-03-14T07:41:38.000Z | pyechonest/config.py | gleitz/automaticdj | 3880c175bc09c17ed9f71ba9902e348a00bb64ef | [
"MIT"
] | null | null | null | pyechonest/config.py | gleitz/automaticdj | 3880c175bc09c17ed9f71ba9902e348a00bb64ef | [
"MIT"
] | 2 | 2015-07-19T10:51:23.000Z | 2019-04-10T14:46:23.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-04-25.
Global configuration variables for accessing the Echo Nest web API.
"""
__version__ = "4.2.8"
import os
if('ECHO_NEST_API_KEY' in os.environ):
ECHO_NEST_API_KEY = os.environ['ECHO_NEST_API_KEY']
else:
ECHO_NEST_API_KEY = None
API_HOST = 'developer.echonest.com'
API_SELECTOR = 'api'
"Locations for the Analyze API calls."
API_VERSION = 'v4'
"Version of api to use... only 4 for now"
HTTP_USER_AGENT = 'PyEchonest'
"""
You may change this to be a user agent string of your
own choosing.
"""
MP3_BITRATE = 128
"""
Default bitrate for MP3 output. Conventionally an
integer divisible by 32kbits/sec.
"""
CACHE = True
"""
You may change this to False to prevent local caching
of API results.
"""
TRACE_API_CALLS = True
"""
If true, API calls will be traced to the console
"""
CALL_TIMEOUT = 10
"""
The API call timeout in seconds.
"""
CODEGEN_BINARY_OVERRIDE = None
"""
Location of your codegen binary. If not given, we will guess codegen.platform-architecture on your system path, e.g. codegen.Darwin, codegen.Linux-i386
"""
| 19.42623 | 151 | 0.729958 |
1a19acc97f7f0626e13396cbd8314c6fbb0fd66e | 18,836 | py | Python | norbert/__init__.py | AppleHolic/norbert | cceaa24bce625bcba3146198271a20e4c265f2c8 | [
"MIT"
] | 142 | 2019-03-19T18:36:28.000Z | 2022-03-22T21:28:25.000Z | norbert/__init__.py | AppleHolic/norbert | cceaa24bce625bcba3146198271a20e4c265f2c8 | [
"MIT"
] | 15 | 2019-03-07T15:54:31.000Z | 2022-03-04T15:13:21.000Z | norbert/__init__.py | AppleHolic/norbert | cceaa24bce625bcba3146198271a20e4c265f2c8 | [
"MIT"
] | 27 | 2018-10-28T14:13:34.000Z | 2021-09-13T12:12:41.000Z | import numpy as np
import itertools
from .contrib import compress_filter, smooth, residual_model
from .contrib import reduce_interferences
def expectation_maximization(y, x, iterations=2, verbose=0, eps=None):
r"""Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and
N. Takahashi and Y. Mitsufuji, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] N.Q. Duong and E. Vincent and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
initial estimates for the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex STFT of the mixture signal
iterations: int [scalar]
number of iterations for the EM algorithm.
verbose: boolean
display some information if True
eps: float or None [scalar]
The epsilon value to use for regularization and filters.
If None, the default will use the epsilon of np.real(x) dtype.
Returns
-------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
estimated sources after iterations
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
estimated spatial covariance matrices
Note
-----
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning
-------
It is *very* important to make sure `x.dtype` is `np.complex`
if you want double precision, because this function will **not**
do such conversion for you from `np.complex64`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.astype(np.complex)``.
This is notably needed if you let common deep learning frameworks like
PyTorch or TensorFlow do the STFT, because this usually happens in
single precision.
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
# dimensions
(nb_frames, nb_bins, nb_channels) = x.shape
nb_sources = y.shape[-1]
# allocate the spatial covariance matrices and PSD
R = np.zeros((nb_bins, nb_channels, nb_channels, nb_sources), x.dtype)
v = np.zeros((nb_frames, nb_bins, nb_sources))
if verbose:
print('Number of iterations: ', iterations)
regularization = np.sqrt(eps) * (
np.tile(np.eye(nb_channels, dtype=np.complex64),
(1, nb_bins, 1, 1)))
for it in range(iterations):
# constructing the mixture covariance matrix. Doing it with a loop
# to avoid storing anytime in RAM the whole 6D tensor
if verbose:
print('EM, iteration %d' % (it+1))
for j in range(nb_sources):
# update the spectrogram model for source j
v[..., j], R[..., j] = get_local_gaussian_model(
y[..., j],
eps)
for t in range(nb_frames):
Cxx = get_mix_model(v[None, t, ...], R)
Cxx += regularization
inv_Cxx = _invert(Cxx, eps)
# separate the sources
for j in range(nb_sources):
W_j = wiener_gain(v[None, t, ..., j], R[..., j], inv_Cxx)
y[t, ..., j] = apply_filter(x[None, t, ...], W_j)[0]
return y, v, R
def wiener(v, x, iterations=1, use_softmask=True, eps=None):
"""Wiener-based separation for multichannel audio.
The method uses the (possibly multichannel) spectrograms `v` of the
sources to separate the (complex) Short Term Fourier Transform `x` of the
mix. Separation is done in a sequential way by:
* Getting an initial estimate. This can be done in two ways: either by
directly using the spectrograms with the mixture phase, or
by using :func:`softmask`.
* Refinining these initial estimates through a call to
:func:`expectation_maximization`.
This implementation also allows to specify the epsilon value used for
regularization. It is based on [1]_, [2]_, [3]_, [4]_.
References
----------
.. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and
N. Takahashi and Y. Mitsufuji, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [4] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, {1,nb_channels}, nb_sources)]
spectrograms of the sources. This is a nonnegative tensor that is
usually the output of the actual separation method of the user. The
spectrograms may be mono, but they need to be 4-dimensional in all
cases.
x: np.ndarray [complex, shape=(nb_frames, nb_bins, nb_channels)]
STFT of the mixture signal.
iterations: int [scalar]
number of iterations for the EM algorithm
use_softmask: boolean
* if `False`, then the mixture phase will directly be used with the
spectrogram as initial estimates.
* if `True`, a softmasking strategy will be used as described in
:func:`softmask`.
eps: {None, float}
Epsilon value to use for computing the separations. This is used
whenever division with a model energy is performed, i.e. when
softmasking and when iterating the EM.
It can be understood as the energy of the additional white noise
that is taken out when separating.
If `None`, the default value is taken as `np.finfo(np.real(x[0])).eps`.
Returns
-------
y: np.ndarray
[complex, shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
STFT of estimated sources
Note
----
* Be careful that you need *magnitude spectrogram estimates* for the
case `softmask==False`.
* We recommand to use `softmask=False` only if your spectrogram model is
pretty good, e.g. when the output of a deep neural net. In the case
it is not so great, opt for an initial softmasking strategy.
* The epsilon value will have a huge impact on performance. If it's large,
only the parts of the signal with a significant energy will be kept in
the sources. This epsilon then directly controls the energy of the
reconstruction error.
Warning
-------
As in :func:`expectation_maximization`, we recommend converting the
mixture `x` to double precision `np.complex` *before* calling
:func:`wiener`.
"""
if use_softmask:
y = softmask(v, x, eps=eps)
else:
y = v * np.exp(1j*np.angle(x[..., None]))
if not iterations:
return y
# we need to refine the estimates. Scales down the estimates for
# numerical stability
max_abs = max(1, np.abs(x).max()/10.)
x /= max_abs
y = expectation_maximization(y/max_abs, x, iterations, eps=eps)[0]
return y*max_abs
def softmask(v, x, logit=None, eps=None):
"""Separates a mixture with a ratio mask, using the provided sources
spectrograms estimates. Additionally allows compressing the mask with
a logit function for soft binarization.
The filter does *not* take multichannel correlations into account.
The masking strategy can be traced back to the work of N. Wiener in the
case of *power* spectrograms [1]_. In the case of *fractional* spectrograms
like magnitude, this filter is often referred to a "ratio mask", and
has been shown to be the optimal separation procedure under alpha-stable
assumptions [2]_.
References
----------
.. [1] N. Wiener,"Extrapolation, Inerpolation, and Smoothing of Stationary
Time Series." 1949.
.. [2] A. Liutkus and R. Badeau. "Generalized Wiener filtering with
fractional power spectrograms." 2015 IEEE International Conference on
Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2015.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
spectrograms of the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
mixture signal
logit: {None, float between 0 and 1}
enable a compression of the filter. If not None, it is the threshold
value for the logit function: a softmask above this threshold is
brought closer to 1, and a softmask below is brought closer to 0.
Returns
-------
ndarray, shape=(nb_frames, nb_bins, nb_channels, nb_sources)
estimated sources
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
total_energy = np.sum(v, axis=-1, keepdims=True)
filter = v/(eps + total_energy.astype(x.dtype))
if logit is not None:
filter = compress_filter(filter, eps, thresh=logit, multichannel=False)
return filter * x[..., None]
def _invert(M, eps):
"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
matrices to invert: must be square along the last two dimensions
eps: [scalar]
regularization parameter to use _only in the case of matrices
bigger than 2x2
Returns
-------
invM: np.ndarray, [shape=M.shape]
inverses of M
"""
nb_channels = M.shape[-1]
if nb_channels == 1:
# scalar case
invM = 1.0/(M+eps)
elif nb_channels == 2:
# two channels case: analytical expression
det = (
M[..., 0, 0]*M[..., 1, 1] -
M[..., 0, 1]*M[..., 1, 0])
invDet = 1.0/(det)
invM = np.empty_like(M)
invM[..., 0, 0] = invDet*M[..., 1, 1]
invM[..., 1, 0] = -invDet*M[..., 1, 0]
invM[..., 0, 1] = -invDet*M[..., 0, 1]
invM[..., 1, 1] = invDet*M[..., 0, 0]
else:
# general case : no use of analytical expression (slow!)
invM = np.linalg.pinv(M, eps)
return invM
def wiener_gain(v_j, R_j, inv_Cxx):
"""
Compute the wiener gain for separating one source, given all parameters.
It is the matrix applied to the mix to get the posterior mean of the source
as in [1]_
References
----------
.. [1] N.Q. Duong and E. Vincent and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
Parameters
----------
v_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
power spectral density of the target source.
R_j: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
spatial covariance matrix of the target source
inv_Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
inverse of the mixture covariance matrices
Returns
-------
G: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
wiener filtering matrices, to apply to the mix, e.g. through
:func:`apply_filter` to get the target source estimate.
"""
(_, nb_channels) = R_j.shape[:2]
# computes multichannel Wiener gain as v_j R_j inv_Cxx
G = np.zeros_like(inv_Cxx)
for (i1, i2, i3) in itertools.product(*(range(nb_channels),)*3):
G[..., i1, i2] += (R_j[None, :, i1, i3] * inv_Cxx[..., i3, i2])
G *= v_j[..., None, None]
return G
def apply_filter(x, W):
"""
Applies a filter on the mixture. Just corresponds to a matrix
multiplication.
Parameters
----------
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
STFT of the signal on which to apply the filter.
W: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
filtering matrices, as returned, e.g. by :func:`wiener_gain`
Returns
-------
y_hat: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
filtered signal
"""
nb_channels = W.shape[-1]
# apply the filter
y_hat = 0+0j
for i in range(nb_channels):
y_hat += W[..., i] * x[..., i, None]
return y_hat
def get_mix_model(v, R):
"""
Compute the model covariance of a mixture based on local Gaussian models.
simply adds up all the v[..., j] * R[..., j]
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
Power spectral densities for the sources
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
Spatial covariance matrices of each sources
Returns
-------
Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
Covariance matrix for the mixture
"""
nb_channels = R.shape[1]
(nb_frames, nb_bins, nb_sources) = v.shape
Cxx = np.zeros((nb_frames, nb_bins, nb_channels, nb_channels), R.dtype)
for j in range(nb_sources):
Cxx += v[..., j, None, None] * R[None, ..., j]
return Cxx
def _covariance(y_j):
"""
Compute the empirical covariance for a source.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)].
complex stft of the source.
Returns
-------
Cj: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
just y_j * conj(y_j.T): empirical covariance for each TF bin.
"""
(nb_frames, nb_bins, nb_channels) = y_j.shape
Cj = np.zeros((nb_frames, nb_bins, nb_channels, nb_channels),
y_j.dtype)
for (i1, i2) in itertools.product(*(range(nb_channels),)*2):
Cj[..., i1, i2] += y_j[..., i1] * np.conj(y_j[..., i2])
return Cj
def get_local_gaussian_model(y_j, eps=1.):
r"""
Compute the local Gaussian model [1]_ for a source given the complex STFT.
First get the power spectral densities, and then the spatial covariance
matrix, as done in [1]_, [2]_
References
----------
.. [1] N.Q. Duong and E. Vincent and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [2] A. Liutkus and R. Badeau and G. Richard. "Low bitrate informed
source separation of realistic mixtures." 2013 IEEE International
Conference on Acoustics, Speech and Signal Processing. IEEE, 2013.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex stft of the source.
eps: float [scalar]
regularization term
Returns
-------
v_j: np.ndarray [shape=(nb_frames, nb_bins)]
power spectral density of the source
R_J: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
Spatial covariance matrix of the source
"""
v_j = np.mean(np.abs(y_j)**2, axis=2)
# updates the spatial covariance matrix
nb_frames = y_j.shape[0]
R_j = 0
weight = eps
for t in range(nb_frames):
R_j += _covariance(y_j[None, t, ...])
weight += v_j[None, t, ...]
R_j /= weight[..., None, None]
return v_j, R_j
| 36.223077 | 79 | 0.646369 |
1a1ab30134ffb46a2768f3d6e4b82bd7fcbd06d6 | 9,170 | py | Python | postr/twitter_postr.py | dbgrigsby/Postr | c374648134123f857babb65aff161a4c3c470502 | [
"MIT"
] | 3 | 2018-10-09T17:02:05.000Z | 2022-03-21T08:58:49.000Z | postr/twitter_postr.py | dbgrigsby/Postr | c374648134123f857babb65aff161a4c3c470502 | [
"MIT"
] | 11 | 2018-09-26T05:33:30.000Z | 2019-04-06T04:06:51.000Z | postr/twitter_postr.py | dbgrigsby/Postr | c374648134123f857babb65aff161a4c3c470502 | [
"MIT"
] | 3 | 2018-12-20T18:35:25.000Z | 2022-03-21T08:58:54.000Z | import csv
import datetime
import json
import re
import os
import time
from typing import List
import matplotlib
import matplotlib.pyplot as plt
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.api import API
from tweepy.streaming import StreamListener
from tweepy.cursor import Cursor
from textblob import TextBlob
from .api_interface import ApiInterface
from .twitter.twitter_key import TwitterKey
from .twitter.twitter_info import TwitterInfo
from .twitter.twitter_bio import TwitterBio
matplotlib.use('TkAgg')
# Precision to truncate on a datetime object, down to the minute
DATETIME_MILLISECOND_PRECISION = 23
# Precision to truncate scores when plotting twitter stream scores
SCORE_PRECISION = 5
def examples() -> None:
""" Runs through major use cases """
t = Twitter()
# text and picture posting
t.post_text('sample API text')
# t.post_photo('enter path here', 'sample API text'), put a valid path here to use
# Get/Set info about the authenticated user
print(t.bio.username())
print(t.bio.bio())
t.update_bio('sample API bio')
t.bio.update_name('Postr Project')
# Get info about the authenticated user's tweets
twt = t.info.last_tweet() # Returns a Status object. Let's use it.
# All methods for a Status object:: https://gist.github.com/dev-techmoe/ef676cdd03ac47ac503e856282077bf2
print(twt.text)
print(twt.retweet_count)
print(twt.favorite_count)
# Let's stream some hashtags and graph them in real time
t.stream_and_graph(['Politics', 'News', 'School'])
if __name__ == '__main__':
examples()
| 33.589744 | 108 | 0.619738 |
1a1ae330b5d97b072b7de9905431059440f2b93a | 2,149 | py | Python | Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py | Acidburn0zzz/Sub-Zero.bundle | eb3a0d52fde281773ba5109fad9801ede9c938ba | [
"MIT"
] | 1 | 2018-02-01T18:00:59.000Z | 2018-02-01T18:00:59.000Z | Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py | Acidburn0zzz/Sub-Zero.bundle | eb3a0d52fde281773ba5109fad9801ede9c938ba | [
"MIT"
] | null | null | null | Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py | Acidburn0zzz/Sub-Zero.bundle | eb3a0d52fde281773ba5109fad9801ede9c938ba | [
"MIT"
] | null | null | null | # coding=utf-8
import logging
from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \
LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guess_matches, guessit, sanitize
logger = logging.getLogger(__name__)
| 33.578125 | 109 | 0.635179 |
1a1b2daed4ebe5ea602e637a406c3b3e1a5fa4ac | 339 | py | Python | takeyourmeds/groups/groups_billing/plans.py | takeyourmeds/takeyourmeds-web | edf24188f26948902cfb69793b4d5aa3cf8b6dea | [
"MIT"
] | 11 | 2015-06-01T16:31:42.000Z | 2022-03-01T01:20:58.000Z | takeyourmeds/groups/groups_billing/plans.py | takeyourmeds/takeyourmeds-web | edf24188f26948902cfb69793b4d5aa3cf8b6dea | [
"MIT"
] | 111 | 2015-07-20T13:23:16.000Z | 2017-09-08T08:17:10.000Z | takeyourmeds/groups/groups_billing/plans.py | takeyourmeds/takeyourmeds-web | edf24188f26948902cfb69793b4d5aa3cf8b6dea | [
"MIT"
] | 6 | 2015-07-15T08:08:12.000Z | 2018-06-23T00:13:13.000Z | """
This file must be kept up-to-date with Stripe, especially the slugs:
https://manage.stripe.com/plans
"""
PLANS = {}
FREE = Plan(1, 'free', "Free plan")
| 18.833333 | 68 | 0.613569 |
1a1bc945602cc44132190cba60d0afbf196c8e4e | 34,864 | py | Python | footprint/socialnetwork/views.py | hairleng/Footprint | 3c5ab2743584bcdf19161972f4a7e7581ba9d1ee | [
"MIT"
] | null | null | null | footprint/socialnetwork/views.py | hairleng/Footprint | 3c5ab2743584bcdf19161972f4a7e7581ba9d1ee | [
"MIT"
] | null | null | null | footprint/socialnetwork/views.py | hairleng/Footprint | 3c5ab2743584bcdf19161972f4a7e7581ba9d1ee | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
import json
from django.http import HttpResponse, Http404
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.utils import timezone
# Create your views here.
from socialnetwork.forms import *
from socialnetwork.models import *
from socialnetwork.forms import ProfileForm, UpdateProfileForm
from socialnetwork.models import Profile
from allauth.account.views import SignupView, LoginView
from .models import User
import requests
from notifications.signals import notify
from notifications.models import Notification
import datetime
def serialize_log(logs, request):
request_user_profile = get_object_or_404(Profile, user=request.user)
following_list = request_user_profile.following.all()
bookmark_list = request_user_profile.bookmarked_logs.all()
all_logs = []
for log in logs:
log_creator = log.user
# If log creator is already followed, pass this information
creator_profile, _ = Profile.objects.get_or_create(user=log.user)
is_self = False
if creator_profile == request_user_profile:
is_self = True
follow_status = False
if creator_profile in following_list:
follow_status = True
bookmarked = False
if log in bookmark_list:
bookmarked = True
liked = False
if request_user_profile in log.liked_users.all():
liked = True
num_likes = len(log.liked_users.all())
comments = []
for comment_item in Comment.objects.all():
if comment_item.of_log.id == log.id:
commentor_profile = get_object_or_404(Profile, user=comment_item.created_by)
comment = {
'comment_id': comment_item.id,
'text': comment_item.comment_content,
'date': comment_item.created_at.isoformat(),
'comment_profile_pic': str(commentor_profile.picture),
'username': comment_item.created_by.username,
'user_fn': comment_item.created_by.first_name,
'user_ln': comment_item.created_by.last_name,
}
comments.append(comment)
log_info = {
'user_id': log_creator.id,
'already_followed': follow_status,
'log_id': log.id,
'username': log_creator.username,
'profile_pic': str(creator_profile.picture),
'log_title': log.log_title,
'log_text': log.log_text,
'log_location': log.location.location_name,
'date': log.creation_time.isoformat(),
'log_pic': str(log.picture),
'bookmark_status': bookmarked,
'num_likes': num_likes,
'already_liked': liked,
'comments': comments,
'is_self': is_self,
'visibility': log.visibility
}
all_logs.append(log_info)
response_json = json.dumps(all_logs)
return response_json
def getLocationNameFromLatLng(latLng):
# detailed retured json information please visit: https: // maps.googleapis.com/maps/api/geocode/json?latlng = 40.714224, -73.961452 & key = AIzaSyBAzuMuqCtP0j8Yd7hJ6CG5jdei-Y4Pdlw
URL = "https://maps.googleapis.com/maps/api/geocode/json"
lat = latLng['lat']
lng = latLng['lng']
latLng_ = "{},{}".format(lat, lng)
# defining a params dict for the parameters to be sent to the API
PARAMS = {'latlng': latLng_,
'key': 'AIzaSyBAzuMuqCtP0j8Yd7hJ6CG5jdei-Y4Pdlw'
}
# sending get request and saving the response as response object
r = requests.get(url=URL, params=PARAMS)
# extracting data in json format
data = r.json()
# extracting latitude, longitude and formatted address
# of the first matching location(the nearest location to the given latlng)
latitude = data['results'][0]['geometry']['location']['lat']
longitude = data['results'][0]['geometry']['location']['lng']
formatted_address = data['results'][0]['formatted_address']
# # printing the output
return formatted_address
# Add this log to User's bookmarked collection
# Remove this log from User's bookmarked collection
# Like this log, add liked users to this log
# Unlike this log, remove request user from liked_users of this Log
# Like this log, add liked users to this log
| 40.776608 | 184 | 0.645336 |
1a1cb891a28d6f1130bc984167bf2fda46be3fe3 | 24,095 | py | Python | Application/datasources/datapod_backup/utils.py | GraphicalDot/datapod-backend-layer | ab38a5b0e969cd0d762e9d7720ab89174c333c37 | [
"Apache-2.0"
] | null | null | null | Application/datasources/datapod_backup/utils.py | GraphicalDot/datapod-backend-layer | ab38a5b0e969cd0d762e9d7720ab89174c333c37 | [
"Apache-2.0"
] | null | null | null | Application/datasources/datapod_backup/utils.py | GraphicalDot/datapod-backend-layer | ab38a5b0e969cd0d762e9d7720ab89174c333c37 | [
"Apache-2.0"
] | null | null | null | from abc import ABC,abstractmethod
import os
import binascii
import subprocess
import time
import datetime
import platform
import tempfile
import requests
import json
import aiohttp
from sanic import response
from asyncinit import asyncinit
from errors_module.errors import MnemonicRequiredError
from errors_module.errors import APIBadRequest, PathDoesntExists
from loguru import logger
from .variables import DATASOURCE_NAME
from .db_calls import get_credentials, update_percentage
import subprocess
import shutil
import humanize
import aiomisc
##imported from another major module
from ..datapod_users.variables import DATASOURCE_NAME as USER_DATASOURCE_NAME
import boto3
from Crypto.Cipher import AES # pycryptodome
from Crypto import Random
import struct
class BotoBackup(S3Backup):
def decrypt_file(self, key, in_filename, iv, original_size, out_filename, chunksize=16*1024):
with open(in_filename, 'rb') as infile:
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(original_size)
class AWSCliBackup(S3Backup):
# if __name__ == "__main__":
# s3 = boto3.client('s3')
# location_info = s3.get_bucket_location(Bucket="datapod-backups-beta")
# bucket_region = location_info['LocationConstraint']
# # kms = boto3.client('kms')
# # encrypt_ctx = {"kms_cmk_id":kms_arn}
# # key_data = kms.generate_data_key(KeyId=kms_arn, EncryptionContext=encrypt_ctx, KeySpec="AES_256")
# new_iv = Random.new().read(AES.block_size)
# size_infile = os.stat(infile).st_size # unencrypted length
# outfile = infile + '.enc'
# encrypt_file(key_data['Plaintext'], infile, new_iv, size_infile, outfile, chunksize=16*1024)
# put_file(key_data['CiphertextBlob'], new_iv, encrypt_ctx, outfile, size_infile, bucket_name, key_name) | 40.838983 | 210 | 0.638431 |
1a1fd264d38d2e67d8ce555d1064ae3d9aad16df | 141 | py | Python | abc/abc145/abc145b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc145/abc145b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc145/abc145b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | N = int(input())
S = input()
if N % 2 == 1:
print('No')
exit()
if S[:N // 2] == S[N // 2:]:
print('Yes')
else:
print('No')
| 11.75 | 28 | 0.411348 |
1a20018810aca71e231531dd6b4c27f07d98ddd0 | 289 | py | Python | gadget/reboot.py | vaginessa/RaspberryPiZero_HID_MultiTool | c6227c7263cb1321a5655f938462392eb014a352 | [
"Apache-2.0"
] | 54 | 2017-01-06T21:43:40.000Z | 2022-02-14T02:57:57.000Z | gadget/reboot.py | vaginessa/RaspberryPiZero_HID_MultiTool | c6227c7263cb1321a5655f938462392eb014a352 | [
"Apache-2.0"
] | null | null | null | gadget/reboot.py | vaginessa/RaspberryPiZero_HID_MultiTool | c6227c7263cb1321a5655f938462392eb014a352 | [
"Apache-2.0"
] | 13 | 2017-01-31T23:35:21.000Z | 2021-12-22T12:48:59.000Z | #!/usr/bin/python
import RPi.GPIO as GPIO
import os
gpio_pin_number=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_pin_number, GPIO.IN, pull_up_down=GPIO.PUD_UP)
try:
GPIO.wait_for_edge(gpio_pin_number, GPIO.FALLING)
os.system("sudo shutdown -h now")
except:
pass
GPIO.cleanup()
| 18.0625 | 62 | 0.750865 |
1a209ab2fb009b89d259657281d619b4962c46e2 | 64 | py | Python | code/sample_1-2-16.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | 1 | 2022-03-29T13:50:12.000Z | 2022-03-29T13:50:12.000Z | code/sample_1-2-16.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | null | null | null | code/sample_1-2-16.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | null | null | null | rows = int(input())
x = [input() for i in range(rows)]
print(x)
| 16 | 34 | 0.609375 |
1a20d5d763008a4d9582e33481f4795a17bbec47 | 1,056 | py | Python | barcap/main.py | Barmaley13/CaptureBarcode | e19556dd515a1b86cf32b5bdca4dca398d1f0ef1 | [
"MIT"
] | 1 | 2021-04-17T18:04:19.000Z | 2021-04-17T18:04:19.000Z | barcap/main.py | Barmaley13/CaptureBarcode | e19556dd515a1b86cf32b5bdca4dca398d1f0ef1 | [
"MIT"
] | 1 | 2021-07-08T09:48:07.000Z | 2021-07-08T17:36:22.000Z | barcap/main.py | Barmaley13/CaptureBarcode | e19556dd515a1b86cf32b5bdca4dca398d1f0ef1 | [
"MIT"
] | 1 | 2019-09-27T12:37:25.000Z | 2019-09-27T12:37:25.000Z | """
Run capture as a separate process
"""
import time
from barcap.barcode import BarcodeCapture
| 22.956522 | 95 | 0.602273 |
1a215082fa2f89d1a45dff26f70391daf14feaea | 6,162 | py | Python | gabriel_lego/lego_engine/config.py | molguin92/gabriel-lego-py3 | 2f8828326ca025997687a19d1af80bc1590a9290 | [
"Apache-2.0"
] | null | null | null | gabriel_lego/lego_engine/config.py | molguin92/gabriel-lego-py3 | 2f8828326ca025997687a19d1af80bc1590a9290 | [
"Apache-2.0"
] | 1 | 2019-09-10T23:41:41.000Z | 2019-09-11T20:21:11.000Z | gabriel_lego/lego_engine/config.py | molguin92/gabriel-lego-py3 | 2f8828326ca025997687a19d1af80bc1590a9290 | [
"Apache-2.0"
] | 1 | 2022-02-22T15:29:27.000Z | 2022-02-22T15:29:27.000Z | #!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
# - Task Assistance
#
# Author: Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If True, configurations are set to process video stream in real-time (use
# with lego_server.py)
# If False, configurations are set to process one independent image (use with
# img.py)
IS_STREAMING = True
RECOGNIZE_ONLY = False
# Port for communication between proxy and task server
TASK_SERVER_PORT = 6090
BEST_ENGINE = "LEGO_FAST"
CHECK_ALGORITHM = "table"
CHECK_LAST_TH = 1
# Port for communication between master and workder proxies
MASTER_SERVER_PORT = 6091
# Whether or not to save the displayed image in a temporary directory
SAVE_IMAGE = False
# Convert all incoming frames to a fixed size to ease processing
IMAGE_HEIGHT = 360
IMAGE_WIDTH = 640
BLUR_KERNEL_SIZE = int(IMAGE_WIDTH // 16 + 1)
# Display
DISPLAY_MAX_PIXEL = 640
DISPLAY_SCALE = 5
DISPLAY_LIST_ALL = ['test', 'input', 'DoB', 'mask_black', 'mask_black_dots',
'board', 'board_border_line', 'board_edge', 'board_grey',
'board_mask_black', 'board_mask_black_dots', 'board_DoB',
'edge_inv',
'edge',
'board_n0', 'board_n1', 'board_n2', 'board_n3', 'board_n4',
'board_n5', 'board_n6',
'lego_u_edge_S', 'lego_u_edge_norm_L', 'lego_u_dots_L',
'lego_full', 'lego', 'lego_only_color',
'lego_correct', 'lego_rect', 'lego_cropped', 'lego_color',
'plot_line', 'lego_syn',
'guidance']
DISPLAY_LIST_TEST = ['input', 'board', 'lego_u_edge_S', 'lego_u_edge_norm_L',
'lego_u_dots_L', 'lego_syn']
DISPLAY_LIST_STREAM = ['input', 'lego_syn']
# DISPLAY_LIST_TASK = ['input', 'board', 'lego_syn', 'guidance']
DISPLAY_LIST_TASK = []
if not IS_STREAMING:
DISPLAY_LIST = DISPLAY_LIST_TEST
else:
if RECOGNIZE_ONLY:
DISPLAY_LIST = DISPLAY_LIST_STREAM
else:
DISPLAY_LIST = DISPLAY_LIST_TASK
DISPLAY_WAIT_TIME = 1 if IS_STREAMING else 500
## Black dots
BD_COUNT_N_ROW = 9
BD_COUNT_N_COL = 16
BD_BLOCK_HEIGHT = IMAGE_HEIGHT // BD_COUNT_N_ROW
BD_BLOCK_WIDTH = IMAGE_WIDTH // BD_COUNT_N_COL
BD_BLOCK_SPAN = max(BD_BLOCK_HEIGHT, BD_BLOCK_WIDTH)
BD_BLOCK_AREA = BD_BLOCK_HEIGHT * BD_BLOCK_WIDTH
BD_COUNT_THRESH = 25
BD_MAX_PERI = (IMAGE_HEIGHT + IMAGE_HEIGHT) // 40
BD_MAX_SPAN = int(BD_MAX_PERI / 4.0 + 0.5)
# Two ways to check black dot size:
# 'simple': check contour length and area
# 'complete": check x & y max span also
CHECK_BD_SIZE = 'simple'
## Color detection
# H: hue, S: saturation, V: value (which means brightness)
# L: lower_bound, U: upper_bound, TH: threshold
# TODO:
BLUE = {'H': 110, 'S_L': 100, 'B_TH': 110} # H: 108
YELLOW = {'H': 30, 'S_L': 100, 'B_TH': 170} # H: 25 B_TH: 180
GREEN = {'H': 70, 'S_L': 100, 'B_TH': 60} # H: 80 B_TH: 75
RED = {'H': 0, 'S_L': 100, 'B_TH': 130}
BLACK = {'S_U': 70, 'V_U': 60}
# WHITE = {'S_U' : 60, 'B_L' : 101, 'B_TH' : 160} # this includes side white,
# too
WHITE = {'S_U': 60, 'V_L': 150}
BD_DOB_MIN_V = 30
# If using a labels to represent color, this is the right color: 0 means
# nothing (background) and 7 means unsure
COLOR_ORDER = ['nothing', 'white', 'green', 'yellow', 'red', 'blue', 'black',
'unsure']
## Board
BOARD_MIN_AREA = BD_BLOCK_AREA * 7
BOARD_MIN_LINE_LENGTH = BD_BLOCK_SPAN
BOARD_MIN_VOTE = BD_BLOCK_SPAN // 2
# Once board is detected, convert it to a perspective-corrected standard size
# for further processing
BOARD_RECONSTRUCT_HEIGHT = 155 * 1
BOARD_RECONSTRUCT_WIDTH = 270 * 1
BOARD_BD_MAX_PERI = (BOARD_RECONSTRUCT_HEIGHT + BOARD_RECONSTRUCT_WIDTH) // 30
BOARD_BD_MAX_SPAN = int(BOARD_BD_MAX_PERI / 4.0 + 1.5)
BOARD_RECONSTRUCT_AREA = BOARD_RECONSTRUCT_HEIGHT * BOARD_RECONSTRUCT_WIDTH
BOARD_RECONSTRUCT_PERI = (
BOARD_RECONSTRUCT_HEIGHT +
BOARD_RECONSTRUCT_WIDTH) * 2
BOARD_RECONSTRUCT_CENTER = (
BOARD_RECONSTRUCT_HEIGHT // 2, BOARD_RECONSTRUCT_WIDTH // 2)
## Bricks
BRICK_HEIGHT = BOARD_RECONSTRUCT_HEIGHT / 12.25 # magic number
BRICK_WIDTH = BOARD_RECONSTRUCT_WIDTH / 26.2 # magic number
BRICK_HEIGHT_THICKNESS_RATIO = 15 / 12.25 # magic number
BLOCK_DETECTION_OFFSET = 2
BRICK_MIN_BM_RATIO = .85
## Optimizations
# If True, performs a second step fine-grained board detection algorithm.
# Depending on the other algorithms, this is usually not needed.
OPT_FINE_BOARD = False
# Treat background pixels differently
OPT_NOTHING = False
BM_WINDOW_MIN_TIME = 0.1
BM_WINDOW_MIN_COUNT = 1
# The percentage of right pixels in each block must be higher than this
# threshold
WORST_RATIO_BLOCK_THRESH = 0.6
# If True, do perspective correction first, then color normalization
# If False, do perspective correction after color has been normalized
# Not used anymore...
PERS_NORM = True
## Consts
ACTION_ADD = 0
ACTION_REMOVE = 1
ACTION_TARGET = 2
ACTION_MOVE = 3
DIRECTION_NONE = 0
DIRECTION_UP = 1
DIRECTION_DOWN = 2
GOOD_WORDS = ["Excellent. ", "Great. ", "Good job. ", "Wonderful. "]
| 33.857143 | 79 | 0.697988 |
1a22da72753f4f1b92c14d244c71af9de316f1cd | 5,670 | py | Python | civis_jupyter_notebooks/platform_persistence.py | menglewis/civis-jupyter-notebook | 71f9b3ae50d62280750a593e0125372f41ba90ab | [
"BSD-3-Clause"
] | null | null | null | civis_jupyter_notebooks/platform_persistence.py | menglewis/civis-jupyter-notebook | 71f9b3ae50d62280750a593e0125372f41ba90ab | [
"BSD-3-Clause"
] | null | null | null | civis_jupyter_notebooks/platform_persistence.py | menglewis/civis-jupyter-notebook | 71f9b3ae50d62280750a593e0125372f41ba90ab | [
"BSD-3-Clause"
] | null | null | null | """
This file contains utilities that bind the Jupyter notebook to our platform.
It performs two functions:
1. On startup, pull the contents of the notebook from platform to the local disk
2. As a Jupyter post-save hook, push the contents of the notebook and a HTML preview of the same back to platform.
3. Custom Error class for when a Notebook does not correctly initialize
"""
import civis
import nbformat
import os
import sys
import subprocess
import requests
from io import open
from subprocess import check_call
from subprocess import CalledProcessError
from civis_jupyter_notebooks import log_utils
def initialize_notebook_from_platform(notebook_path):
""" This runs on startup to initialize the notebook """
logger.info('Retrieving notebook information from Platform')
client = get_client()
notebook_model = client.notebooks.get(os.environ['PLATFORM_OBJECT_ID'])
logger.info('Pulling contents of notebook file from S3')
r = requests.get(notebook_model.notebook_url)
if r.status_code != 200:
raise NotebookManagementError('Failed to pull down notebook file from S3')
notebook = nbformat.reads(r.content, nbformat.NO_CONVERT)
s3_notebook_new = notebook.get('metadata', {}).get('civis', {}).get('new_notebook', False)
if s3_notebook_new:
notebook.metadata.pop('civis')
# Only overwrite the git version of the notebook with the S3 version if
# the S3 version is not the brand new empty template
git_notebook_exists = os.path.isfile(notebook_path)
if not git_notebook_exists or not s3_notebook_new:
logger.info('Restoring notebook file from S3')
directory = os.path.dirname(notebook_path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(notebook_path, mode='w', encoding='utf-8') as nb_file:
nbformat.write(notebook, nb_file)
logger.info('Notebook file ready')
if hasattr(notebook_model, 'requirements_url') and notebook_model.requirements_url:
__pull_and_load_requirements(notebook_model.requirements_url, notebook_path)
def post_save(model, os_path, contents_manager):
""" Called from Jupyter post-save hook. Manages save of NB """
if model['type'] != 'notebook':
return
logger.info('Getting URLs to update notebook')
update_url, update_preview_url = get_update_urls()
save_notebook(update_url, os_path)
generate_and_save_preview(update_preview_url, os_path)
logger.info('Notebook save complete')
def get_update_urls():
"""
Get the URLs needed to update the NB.
These URLs expire after a few minutes so do not cache them
"""
client = get_client()
urls = client.notebooks.list_update_links(os.environ['PLATFORM_OBJECT_ID'])
return (urls.update_url, urls.update_preview_url)
def save_notebook(url, os_path):
""" Push raw notebook to S3 """
with open(os_path, 'rb') as nb_file:
logger.info('Pushing latest notebook file to S3')
requests.put(url, data=nb_file.read())
logger.info('Notebook file updated')
def generate_and_save_preview(url, os_path):
""" Render NB-as-HTML and push that file to S3 """
d, fname = os.path.split(os_path)
logger.info('Rendering notebook to HTML')
try:
check_call(['jupyter', 'nbconvert', '--to', 'html', fname], cwd=d)
except CalledProcessError as e:
raise NotebookManagementError('nbconvert failed to convert notebook file to html: {}'.format(repr(e)))
preview_path = os.path.splitext(os_path)[0] + '.html'
with open(preview_path, 'rb') as preview_file:
logger.info('Pushing latest notebook preview to S3')
requests.put(url, data=preview_file.read())
logger.info('Notebook preview updated')
def get_client():
""" This gets a client that knows about our notebook endpoints """
# TODO: Simplify this once the notebooks endpoints are in the client
return civis.APIClient(resources='all')
logger = log_utils.setup_stream_logging()
| 36.580645 | 116 | 0.711817 |
1a241fc805a6084215d593c48583935b44833885 | 2,118 | py | Python | dbt_cloud/command/job/run.py | jeremyyeo/dbt-cloud-cli | f1253bcc343c08232e18ea01ef4a74c2e62a9999 | [
"Apache-2.0"
] | 33 | 2021-12-09T11:17:58.000Z | 2022-03-23T21:51:43.000Z | dbt_cloud/command/job/run.py | jeremyyeo/dbt-cloud-cli | f1253bcc343c08232e18ea01ef4a74c2e62a9999 | [
"Apache-2.0"
] | 20 | 2021-11-26T15:46:43.000Z | 2022-03-25T15:49:20.000Z | dbt_cloud/command/job/run.py | jeremyyeo/dbt-cloud-cli | f1253bcc343c08232e18ea01ef4a74c2e62a9999 | [
"Apache-2.0"
] | 4 | 2022-01-17T19:18:34.000Z | 2022-03-12T09:55:31.000Z | import os
import requests
from typing import Optional, List
from pydantic import Field, validator
from dbt_cloud.command.command import DbtCloudAccountCommand
from dbt_cloud.field import JOB_ID_FIELD
| 35.3 | 91 | 0.686497 |
1a257d44848898125832b45b07d58d12e6e91f60 | 1,266 | py | Python | modelzoo/migrations/0024_auto_20201014_1425.py | SuperElastix/ElastixModelZooWebsite | 00d7b4aec8eb04c285d3771d53310079a3443fab | [
"Apache-2.0"
] | 1 | 2021-11-15T07:30:24.000Z | 2021-11-15T07:30:24.000Z | modelzoo/migrations/0024_auto_20201014_1425.py | SuperElastix/ElastixModelZooWebsite | 00d7b4aec8eb04c285d3771d53310079a3443fab | [
"Apache-2.0"
] | null | null | null | modelzoo/migrations/0024_auto_20201014_1425.py | SuperElastix/ElastixModelZooWebsite | 00d7b4aec8eb04c285d3771d53310079a3443fab | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.3 on 2020-10-14 12:25
from django.db import migrations, models
| 26.93617 | 180 | 0.518167 |
1a270b137592d14be9f26784b9d3fa7001be71f2 | 5,982 | py | Python | src/misc/MBExp.py | akshatha-k/Calibrated_MOPO | 3b2e675003e9f6d31a0763be2ec784ceeae5099e | [
"MIT"
] | null | null | null | src/misc/MBExp.py | akshatha-k/Calibrated_MOPO | 3b2e675003e9f6d31a0763be2ec784ceeae5099e | [
"MIT"
] | null | null | null | src/misc/MBExp.py | akshatha-k/Calibrated_MOPO | 3b2e675003e9f6d31a0763be2ec784ceeae5099e | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
from time import time, localtime, strftime
import numpy as np
from scipy.io import savemat
from dotmap import DotMap
from src.modeling.trainers import BNN_trainer
from src.misc.DotmapUtils import get_required_argument
from src.misc.Agent import Agent
from src.modeling.trainers.registry import get_config
from src.controllers.MPC import MPC
SAVE_EVERY = 25
| 39.615894 | 99 | 0.531595 |
1a286a917af5eacc1b12d3158f1106f90974b451 | 252 | py | Python | lightnn/base/__init__.py | tongluocq/lightnn | 602b0742d1141efc73a7146c930c5ea9eb994d37 | [
"Apache-2.0"
] | 131 | 2017-04-05T06:03:25.000Z | 2021-05-20T03:05:36.000Z | ch4/lightnn/lightnn/base/__init__.py | helloqorld/book-of-qna-code | 54950478fb28d15cd73dae4dc39f3cd783721e08 | [
"Apache-2.0"
] | 27 | 2018-11-26T07:39:25.000Z | 2022-02-09T23:44:53.000Z | ch4/lightnn/lightnn/base/__init__.py | helloqorld/book-of-qna-code | 54950478fb28d15cd73dae4dc39f3cd783721e08 | [
"Apache-2.0"
] | 62 | 2018-11-26T07:44:02.000Z | 2022-01-13T08:31:00.000Z | #!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .activations import *
from .losses import *
from .initializers import *
from .optimizers import *
| 19.384615 | 38 | 0.797619 |
1a288c60c996fe5119fab7f4db833d65180f5556 | 3,273 | py | Python | tests/vvadd/Q.py | sa2257/llvm-runtime-pass | 6f2c92141465e7df56f9720ab7753826663d799f | [
"MIT"
] | null | null | null | tests/vvadd/Q.py | sa2257/llvm-runtime-pass | 6f2c92141465e7df56f9720ab7753826663d799f | [
"MIT"
] | null | null | null | tests/vvadd/Q.py | sa2257/llvm-runtime-pass | 6f2c92141465e7df56f9720ab7753826663d799f | [
"MIT"
] | null | null | null | import time
| 32.405941 | 84 | 0.513902 |
1a2b8e3fe78c16eea9e7fc19485f21ac4d60d622 | 22 | py | Python | middleman/api/application/__init__.py | scooterman/middleman | c765c9157cce02574e7191608dacd573156e333b | [
"Xnet",
"X11"
] | 5 | 2020-03-19T07:19:49.000Z | 2021-09-29T06:33:47.000Z | trends/__init__.py | victorkifer/SocialMediaTopTrends | 32098f1621059700d9ca6437a988956ebe1d319a | [
"MIT"
] | 22 | 2015-09-20T14:00:16.000Z | 2021-06-10T20:08:25.000Z | trends/__init__.py | victorkifer/SocialMediaTopTrends | 32098f1621059700d9ca6437a988956ebe1d319a | [
"MIT"
] | 6 | 2015-12-14T21:05:01.000Z | 2019-11-02T19:35:24.000Z | __author__ = 'victor'
| 11 | 21 | 0.727273 |
1a2c8ecde415f77d6438cc4d119dd253cc4b947d | 799 | py | Python | 2009/plotting_data_monitor/_distrib.py | mikiec84/code-for-blog | 79b2264f9a808eb14f624cb3c5ae7624038c043a | [
"Unlicense"
] | 1,199 | 2015-01-06T14:09:37.000Z | 2022-03-29T19:39:51.000Z | 2009/plotting_data_monitor/_distrib.py | mikiec84/code-for-blog | 79b2264f9a808eb14f624cb3c5ae7624038c043a | [
"Unlicense"
] | 25 | 2016-07-29T15:44:01.000Z | 2021-11-19T16:21:01.000Z | 2009/plotting_data_monitor/_distrib.py | mikiec84/code-for-blog | 79b2264f9a808eb14f624cb3c5ae7624038c043a | [
"Unlicense"
] | 912 | 2015-01-04T00:39:50.000Z | 2022-03-29T06:50:22.000Z | from eblib import libcollect
# Create a LibCollect object
lc = libcollect.LibCollect()
# Prepare arguments for do_collect
#
# Path to the script (can be absolute or relative)
scriptname = 'plotting_data_monitor.pyw'
# Ask the resulting distribution to be placed in
# directory distrib
targetdir = 'distrib'
# Specify which libraries to exclude from the
# distribution (because you know they're installed
# on the target machine)
excludes = ["PyQt4",
"numpy",
"serial",
"pywin",
"win32api",
"win32com"]
# This does the actual work
# See the documentation of LibCollect for more options
#
lc.do_collect( scriptname,
targetdir,
excludes,
verbose=True)
| 24.212121 | 55 | 0.624531 |
1a2d28dea2bb837c0dd72c1aefaedfb353d8cc72 | 3,751 | py | Python | tests/conftest.py | cread/aws-parallelcluster-node | 1f3bcd32f216d246d89e0e175be8027c923ae8ec | [
"Apache-2.0"
] | 33 | 2018-11-14T14:54:47.000Z | 2022-03-22T23:47:51.000Z | tests/conftest.py | cread/aws-parallelcluster-node | 1f3bcd32f216d246d89e0e175be8027c923ae8ec | [
"Apache-2.0"
] | 180 | 2019-02-21T09:33:10.000Z | 2022-03-31T08:01:28.000Z | tests/conftest.py | cread/aws-parallelcluster-node | 1f3bcd32f216d246d89e0e175be8027c923ae8ec | [
"Apache-2.0"
] | 35 | 2019-02-06T13:36:18.000Z | 2022-03-01T12:54:05.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import pytest
from botocore.stub import Stubber
| 41.21978 | 119 | 0.702479 |
1a2d480359b08490e3beec01917db1a8d876c6dd | 7,263 | py | Python | scripts/fastrfaa.py | Facenapalm/NapalmBot | ce775a270f374e626bcabc313676e4e2f9dbb843 | [
"MIT"
] | 4 | 2016-05-14T17:42:03.000Z | 2018-09-24T18:43:03.000Z | scripts/fastrfaa.py | Facenapalm/NapalmBot | ce775a270f374e626bcabc313676e4e2f9dbb843 | [
"MIT"
] | null | null | null | scripts/fastrfaa.py | Facenapalm/NapalmBot | ce775a270f374e626bcabc313676e4e2f9dbb843 | [
"MIT"
] | 1 | 2021-05-08T15:45:30.000Z | 2021-05-08T15:45:30.000Z | """
Maintainer script for ruwiki's administrator attention requests table
([[:ru::]]).
Log file is used for saving "" field in deleted requests.
Usage:
python fastrfaa.py [logfile]
"""
import re
import sys
from datetime import datetime
import pywikibot
REGEXP = re.compile(r"""
(?P<indent>\n*)
==[ ]*(?P<header>.*?)[ ]*==\s+
(?P<section>
<onlyinclude>\s*
(?P<template>
(?:[^<]|<(?!/?onlyinclude))*?
)
\s*</onlyinclude>
)
""", re.I | re.VERBOSE)
CONFIGURATION = {
# nickname or "*" for any: [done delay, undone delay, period of moving to rfaa]
"*": [24, 3 * 24, 7 * 24]
}
TIME_FORMAT = "%Y%m%d%H%M%S"
UTCNOW = datetime.utcnow()
UTCNOWSTR = UTCNOW.strftime(TIME_FORMAT)
MOVED_TEXT = ""
CORRECTED_COUNT = 0
DELETED_DONE_COUNT = 0
DELETED_UNDONE_COUNT = 0
MOVED_COUNT = 0
if len(sys.argv) > 1:
LOGFILE = open(sys.argv[1], "a", encoding="utf-8")
else:
LOGFILE = None
def load_configuration(config_text):
"""Load configuration and set individual delays."""
for line in config_text.split("\n"):
try:
if re.match(r"^(#|</?pre>)", line):
continue
parsed = [value.strip() for value in line.split("/")]
if len(parsed) != 4:
continue
CONFIGURATION[parsed[0]] = [int(value) for value in parsed[1:]]
except:
continue
def get_delays(user="*"):
"""Get delays for current user from configuration."""
if user in CONFIGURATION:
return CONFIGURATION[user]
else:
return CONFIGURATION["*"]
def minor_fixes(text):
"""Fix some minor errors before processing the page."""
text = re.sub(r"^==.*?==\n+(==.*?==)$", "\\1", text, flags=re.M) # empty sections
return text
def correct_request(match):
"""Fix some errors, for example, update header if it doesn't match the content."""
# initialization
corrected = False
indent = match.group("indent")
header = match.group("header")
section = match.group("section")
# missing timestamp fix
(section, flag) = re.subn(
r"(\|\s*\s*=[^/\n]*[^/\s][^/\n]*)\n",
"\\1/" + UTCNOWSTR + "\n",
section)
if flag > 0:
corrected = True
# wrong header fix
question = re.search(r"\|\s*\s*=(.*)", section)
timestamp = re.search(r"\|\s*\s*=[^/\n]+/\s*(\d{14})", section)
if question is None or timestamp is None:
# request is completely broken
return match.group(0)
correct_header = question.group(1).strip() + "/" + timestamp.group(1)
if header != correct_header:
corrected = True
header = correct_header
# finalization
if corrected:
global CORRECTED_COUNT
CORRECTED_COUNT += 1
return "{}== {} ==\n{}".format(indent, header, section)
else:
return match.group(0)
def move_old_request(template):
"""Forms text for (non-fast) rfaa in MOVED_TEXT."""
global MOVED_TEXT
global MOVED_COUNT
parts = re.search(r"\|\s*\s*=(.*)", template).group(1).strip().split("/")
if len(parts) == 2:
header = parts[1]
else:
header = parts[0]
MOVED_TEXT += "== {} ( ) ==\n".format(header)
MOVED_TEXT += re.sub(r"(: )", "subst:\\1", template)
MOVED_TEXT += "\n* {{block-small| ," \
+ " 7 . ~~~~}}"
MOVED_TEXT += "\n\n"
MOVED_COUNT += 1
def delete_old_request(match):
"""Process one table row and delete it if it's neccessary."""
template = match.group("template")
status = re.search(r"\|\s*\s*=\s*([+-])", template)
author = re.search(r"\|\s*\s*=([^/\n]+)/\s*(\d{14})", template)
admin = re.search(r"\|\s*\s*=([^/\n]+)/\s*(\d{14})", template)
extract_name = lambda m: m.group(1).strip()
extract_date = lambda m: datetime.strptime(m.group(2), TIME_FORMAT)
check_delay = lambda date, delay: delay >= 0 and (UTCNOW - date).total_seconds() >= delay * 60 * 60
if author is None:
delays = get_delays()
else:
delays = get_delays(extract_name(author))
if admin is None:
# request is still open
if author is not None:
if check_delay(extract_date(author), delays[2]):
# very old request that should be moved to rfaa
move_old_request(template)
return ""
else:
# request is closed
if status is None:
done = True
else:
done = status.group(1) == "+"
if done:
delay = delays[0]
else:
delay = delays[1]
if check_delay(extract_date(admin), delay):
# archiving
if done:
global DELETED_DONE_COUNT
DELETED_DONE_COUNT += 1
else:
global DELETED_UNDONE_COUNT
DELETED_UNDONE_COUNT += 1
if LOGFILE:
LOGFILE.write("{}/{}\n".format(extract_name(admin), admin.group(2)))
return ""
return match.group(0)
def form_comment():
"""Analyze global variables and form a comment for an edit."""
plural = lambda num, word: word + ("" if num % 10 == 1 and num % 100 != 11 else "")
plural_phrase = lambda num, word: str(num) + " " + plural(num, word)
deleted_parts = []
if DELETED_DONE_COUNT > 0:
deleted_parts.append(plural_phrase(DELETED_DONE_COUNT, ""))
if DELETED_UNDONE_COUNT > 0:
deleted_parts.append(plural_phrase(DELETED_UNDONE_COUNT, ""))
if MOVED_COUNT > 0:
deleted_parts.append(plural_phrase(MOVED_COUNT, ""))
deleted = ", ".join(deleted_parts)
if CORRECTED_COUNT:
corrected = str(CORRECTED_COUNT)
else:
corrected = ""
if corrected and deleted:
return " ({}), ({}).".format(corrected, deleted)
elif corrected:
return " ({}).".format(corrected)
elif deleted:
return " ({}).".format(deleted)
else:
return ""
def main():
"""Main script function."""
site = pywikibot.Site()
config = pywikibot.Page(site, ": //")
if config.exists():
load_configuration(config.text)
fast = pywikibot.Page(site, ": /")
ftext = fast.text
ftext = minor_fixes(ftext)
ftext = REGEXP.sub(correct_request, ftext)
ftext = REGEXP.sub(delete_old_request, ftext)
if MOVED_TEXT != "":
rfaa = pywikibot.Page(site, ": ")
rtext = rfaa.text
insert = rtext.find("==")
if insert == -1:
insert = len(rtext)
rtext = rtext[:insert] + MOVED_TEXT + rtext[insert:]
rfaa.text = rtext
rfaa.save(" .", minor=False)
comment = form_comment()
if comment:
fast.text = ftext
fast.save(comment)
if __name__ == "__main__":
main()
| 31.171674 | 103 | 0.592317 |
a7de746c56c67620e56b1437e51a6c5e5965554a | 1,102 | py | Python | rssfly/tests/common.py | lidavidm/rssfly | 1cfb893a249e4095412b966a1bf50fc3de7744e7 | [
"Apache-2.0"
] | 1 | 2021-02-14T03:44:35.000Z | 2021-02-14T03:44:35.000Z | rssfly/tests/common.py | lidavidm/rssfly | 1cfb893a249e4095412b966a1bf50fc3de7744e7 | [
"Apache-2.0"
] | 6 | 2021-07-15T13:03:19.000Z | 2022-03-26T14:14:14.000Z | rssfly/tests/common.py | lidavidm/rssfly | 1cfb893a249e4095412b966a1bf50fc3de7744e7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 David Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from typing import Dict
| 29.783784 | 74 | 0.696915 |
a7dfe24c47f27180a9478cedac00f9ebde2a0811 | 16,502 | py | Python | pybind/slxos/v16r_1_00b/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import mep
| 85.061856 | 1,055 | 0.690038 |
a7e04b7806a0a0c1a8e2d03be13546c94ed6e271 | 3,253 | py | Python | scripts/generate.py | maruina/diagrams | 8a9012fa24e2987b49672bae0abf16585fed440a | [
"MIT"
] | null | null | null | scripts/generate.py | maruina/diagrams | 8a9012fa24e2987b49672bae0abf16585fed440a | [
"MIT"
] | null | null | null | scripts/generate.py | maruina/diagrams | 8a9012fa24e2987b49672bae0abf16585fed440a | [
"MIT"
] | null | null | null | import os
import sys
from typing import Iterable
from jinja2 import Environment, FileSystemLoader, Template
import config as cfg
from . import app_root_dir, doc_root_dir, resource_dir, template_dir
_usage = "Usage: generate.py <onprem|aws|gcp|azure|k8s|alibabacloud|oci|programming|saas>"
def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str:
"""Generate all service node classes based on resources paths with class templates."""
tmpl = load_tmpl(cfg.TMPL_MODULE)
# TODO: extract the gen class metas for sharing
# TODO: independent function for generating all pvd/typ/paths pairs
metas = map(_gen_class_meta, paths)
aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {}
return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases)
def make_module(pvd: str, typ: str, classes: str) -> None:
"""Create a module file"""
mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py")
with open(mod_path, "w+") as f:
f.write(classes)
def make_apidoc(pvd: str, content: str) -> None:
"""Create an api documentation file"""
mod_path = os.path.join(doc_root_dir(), f"{pvd}.md")
with open(mod_path, "w+") as f:
f.write(content)
def generate(pvd: str) -> None:
"""Generates a service node classes."""
typ_paths = {}
for root, _, files in os.walk(resource_dir(pvd)):
# Extract the names and paths from resources.
files.sort()
pngs = list(filter(lambda f: f.endswith(".png"), files))
paths = list(filter(lambda f: "rounded" not in f, pngs))
# Skip the top-root directory.
typ = os.path.basename(root)
if typ == pvd:
continue
classes = gen_classes(pvd, typ, paths)
make_module(pvd, typ, classes)
typ_paths[typ] = paths
# Build API documentation
apidoc = gen_apidoc(pvd, typ_paths)
make_apidoc(pvd, apidoc)
if __name__ == "__main__":
pvd = sys.argv[1]
if pvd not in cfg.PROVIDERS:
sys.exit()
generate(pvd)
| 31.582524 | 90 | 0.640639 |
a7e08ada30043433441727fc2f8b4036acae9399 | 5,023 | py | Python | src/GUI/menuTypes.py | Vidhu007/Cloud-Encryption | ec9ccd76a71e98740d937b34a7734f821448fae0 | [
"MIT"
] | 7 | 2021-05-10T13:30:51.000Z | 2022-03-20T17:49:59.000Z | src/GUI/menuTypes.py | Vidhu007/Cloud-Encryption | ec9ccd76a71e98740d937b34a7734f821448fae0 | [
"MIT"
] | null | null | null | src/GUI/menuTypes.py | Vidhu007/Cloud-Encryption | ec9ccd76a71e98740d937b34a7734f821448fae0 | [
"MIT"
] | 8 | 2019-04-05T10:40:49.000Z | 2022-03-20T06:00:43.000Z | import users
import sys
import encryption
import googleDriveAPI
u= users
e= encryption
g= googleDriveAPI
#Function to generate menu for privileged (admin) user
#Function which generates menu for standard non-privileged users | 38.937984 | 129 | 0.428827 |
a7e0cc5a6d14c321badeeffabba58fb153ebc18b | 657 | py | Python | eap_backend/eap_api/migrations/0005_alter_eapuser_is_active.py | alan-turing-institute/AssurancePlatform | 1aa34b544990f981a289f6d21a832657ad19742e | [
"MIT"
] | 5 | 2021-09-28T15:02:21.000Z | 2022-03-23T14:37:51.000Z | eap_backend/eap_api/migrations/0005_alter_eapuser_is_active.py | alan-turing-institute/AssurancePlatform | 1aa34b544990f981a289f6d21a832657ad19742e | [
"MIT"
] | 69 | 2021-09-28T14:21:24.000Z | 2022-03-31T17:12:19.000Z | eap_backend/eap_api/migrations/0005_alter_eapuser_is_active.py | alan-turing-institute/AssurancePlatform | 1aa34b544990f981a289f6d21a832657ad19742e | [
"MIT"
] | 1 | 2021-09-28T15:11:00.000Z | 2021-09-28T15:11:00.000Z | # Generated by Django 3.2.8 on 2022-05-31 10:13
from django.db import migrations, models
| 25.269231 | 81 | 0.531202 |
a7e26aa446e86411030f396561a3b8cb6f32b961 | 465 | py | Python | lucid_torch/transforms/monochrome/TFMSMonochromeTo.py | HealthML/lucid-torch | 627700a83b5b2690cd8f95010b5ed439204102f4 | [
"MIT"
] | 1 | 2021-08-20T07:38:09.000Z | 2021-08-20T07:38:09.000Z | lucid_torch/transforms/monochrome/TFMSMonochromeTo.py | HealthML/lucid-torch | 627700a83b5b2690cd8f95010b5ed439204102f4 | [
"MIT"
] | 5 | 2021-03-19T15:50:42.000Z | 2022-03-12T00:53:17.000Z | lucid_torch/transforms/monochrome/TFMSMonochromeTo.py | HealthML/lucid-torch | 627700a83b5b2690cd8f95010b5ed439204102f4 | [
"MIT"
] | null | null | null | import torch
| 31 | 79 | 0.658065 |
a7e2b57529b0723b4ab18b73801cd2816d8025dd | 1,027 | py | Python | python/paddle/v2/framework/tests/test_modified_huber_loss_op.py | AI-books/Paddle | 5b5f4f514047975ac09ec42b31e46dabf235e7dd | [
"Apache-2.0"
] | null | null | null | python/paddle/v2/framework/tests/test_modified_huber_loss_op.py | AI-books/Paddle | 5b5f4f514047975ac09ec42b31e46dabf235e7dd | [
"Apache-2.0"
] | null | null | null | python/paddle/v2/framework/tests/test_modified_huber_loss_op.py | AI-books/Paddle | 5b5f4f514047975ac09ec42b31e46dabf235e7dd | [
"Apache-2.0"
] | 1 | 2020-06-04T04:27:15.000Z | 2020-06-04T04:27:15.000Z | import unittest
import numpy as np
from op_test import OpTest
if __name__ == '__main__':
unittest.main()
| 25.675 | 80 | 0.590068 |
a7e54d153065f4c3487a12be2e95a69fef30b9f1 | 5,626 | py | Python | bots.py | FatherUsarox/generadorqr | a56cc91c1b9320c03f3579c5a4d7d21f71b42f17 | [
"MIT"
] | null | null | null | bots.py | FatherUsarox/generadorqr | a56cc91c1b9320c03f3579c5a4d7d21f71b42f17 | [
"MIT"
] | null | null | null | bots.py | FatherUsarox/generadorqr | a56cc91c1b9320c03f3579c5a4d7d21f71b42f17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# pylint: disable=C0116,W0613
# This program is dedicated to the public domain under the CC0 license.
"""
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
GENDER, PHOTO, LOCATION, BIO = range(4)
def start(update: Update, context: CallbackContext) -> int:
"""Starts the conversation and asks the user about their gender."""
reply_keyboard = [['Boy', 'Girl', 'Other']]
update.message.reply_text(
'Hi! My name is Professor Bot. I will hold a conversation with you. '
'Send /cancel to stop talking to me.\n\n'
'Are you a boy or a girl?',
reply_markup=ReplyKeyboardMarkup(
reply_keyboard, one_time_keyboard=True, input_field_placeholder='Boy or Girl?'
),
)
return GENDER
def gender(update: Update, context: CallbackContext) -> int:
"""Stores the selected gender and asks for a photo."""
user = update.message.from_user
logger.info("Gender of %s: %s", user.first_name, update.message.text)
update.message.reply_text(
'I see! Please send me a photo of yourself, '
'so I know what you look like, or send /skip if you don\'t want to.',
reply_markup=ReplyKeyboardRemove(),
)
return PHOTO
def photo(update: Update, context: CallbackContext) -> int:
"""Stores the photo and asks for a location."""
user = update.message.from_user
photo_file = update.message.photo[-1].get_file()
photo_file.download('user_photo.jpg')
logger.info("Photo of %s: %s", user.first_name, 'user_photo.jpg')
update.message.reply_text(
'Gorgeous! Now, send me your location please, or send /skip if you don\'t want to.'
)
return LOCATION
def skip_photo(update: Update, context: CallbackContext) -> int:
"""Skips the photo and asks for a location."""
user = update.message.from_user
logger.info("User %s did not send a photo.", user.first_name)
update.message.reply_text(
'I bet you look great! Now, send me your location please, or send /skip.'
)
return LOCATION
def location(update: Update, context: CallbackContext) -> int:
"""Stores the location and asks for some info about the user."""
user = update.message.from_user
user_location = update.message.location
logger.info(
"Location of %s: %f / %f", user.first_name, user_location.latitude, user_location.longitude
)
update.message.reply_text(
'Maybe I can visit you sometime! At last, tell me something about yourself.'
)
return BIO
def skip_location(update: Update, context: CallbackContext) -> int:
"""Skips the location and asks for info about the user."""
user = update.message.from_user
logger.info("User %s did not send a location.", user.first_name)
update.message.reply_text(
'You seem a bit paranoid! At last, tell me something about yourself.'
)
return BIO
def bio(update: Update, context: CallbackContext) -> int:
"""Stores the info about the user and ends the conversation."""
user = update.message.from_user
logger.info("Bio of %s: %s", user.first_name, update.message.text)
update.message.reply_text('Thank you! I hope we can talk again some day.')
return ConversationHandler.END
def cancel(update: Update, context: CallbackContext) -> int:
"""Cancels and ends the conversation."""
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text(
'Bye! I hope we can talk again some day.', reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
def main() -> None:
"""Run the bot."""
# Create the Updater and pass it your bot's token.
updater = Updater("TOKEN")
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Boy|Girl|Other)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
)
dispatcher.add_handler(conv_handler)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main() | 32.900585 | 99 | 0.682545 |
a7e702d2867f4402c54e0d45a5281d763c846bf9 | 746 | py | Python | src/tests/TestQuadratureRule.py | WaveBlocks/WaveBlocks | 2af3730dcf27e54006ec602e696b4d4df25459d8 | [
"BSD-3-Clause"
] | null | null | null | src/tests/TestQuadratureRule.py | WaveBlocks/WaveBlocks | 2af3730dcf27e54006ec602e696b4d4df25459d8 | [
"BSD-3-Clause"
] | null | null | null | src/tests/TestQuadratureRule.py | WaveBlocks/WaveBlocks | 2af3730dcf27e54006ec602e696b4d4df25459d8 | [
"BSD-3-Clause"
] | null | null | null | """The WaveBlocks Project
Plot some quadrature rules.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
from numpy import squeeze
from matplotlib.pyplot import *
from WaveBlocks import GaussHermiteQR
tests = (2, 3, 4, 7, 32, 64, 128)
for I in tests:
Q = GaussHermiteQR(I)
print(Q)
N = Q.get_nodes()
N = squeeze(N)
W = Q.get_weights()
W = squeeze(W)
fig = figure()
ax = fig.gca()
ax.stem(N, W)
ax.set_xlabel(r"$\gamma_i$")
ax.set_ylabel(r"$\omega_i$")
ax.set_title(r"Gauss-Hermite quadrature with $"+str(Q.get_number_nodes())+r"$ nodes")
fig.savefig("qr_order_"+str(Q.get_order())+".png")
| 18.65 | 89 | 0.61126 |
a7e7986c81b7eb2a012589680dc9149ce7e709a3 | 7,141 | py | Python | neural_network.py | lee-winchester/deep-neural-network | 8f7c012e864a6bf9a3257d8cd08e3b3488243b19 | [
"MIT"
] | null | null | null | neural_network.py | lee-winchester/deep-neural-network | 8f7c012e864a6bf9a3257d8cd08e3b3488243b19 | [
"MIT"
] | null | null | null | neural_network.py | lee-winchester/deep-neural-network | 8f7c012e864a6bf9a3257d8cd08e3b3488243b19 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy
ROWS = 64
COLS = 64
CHANNELS = 3
TRAIN_DIR = 'Train_data/'
TEST_DIR = 'Test_data/'
train_images = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]
test_images = [TEST_DIR+i for i in os.listdir(TEST_DIR)]
train_set_x, train_set_y = prepare_data(train_images)
test_set_x, test_set_y = prepare_data(test_images)
train_set_x_flatten = train_set_x.reshape(train_set_x.shape[0], ROWS*COLS*CHANNELS).T
test_set_x_flatten = test_set_x.reshape(test_set_x.shape[0], -1).T
train_set_x = train_set_x_flatten/255
test_set_x = test_set_x_flatten/255
#train_set_x_flatten shape: (12288, 6002)
#train_set_y shape: (1, 6002)
#nn_model(train_set_x, train_set_y, test_set_x, test_set_y, n_h = 10, num_iterations = 3000, learning_rate = 0.05, print_cost = True)
hidden_layer = [10, 50, 100, 200, 400]
models = {}
for i in hidden_layer:
print ("hidden layer is: ",i)
models[i] = nn_model(train_set_x, train_set_y, test_set_x, test_set_y, n_h = i, num_iterations = 10000, learning_rate = 0.05, print_cost = True)
print ("-------------------------------------------------------")
for i in hidden_layer:
plt.plot(np.squeeze(models[i]["costs"]), label= str(models[i]["n_h"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
| 30.780172 | 148 | 0.618261 |
a7e7c360d245af4066c15b6cc4582b7c3939eb5b | 798 | py | Python | main.py | UstymHanyk/NearbyMovies | b54995463a30a130f9023c63d6549e734c45251c | [
"MIT"
] | 1 | 2021-02-15T20:20:06.000Z | 2021-02-15T20:20:06.000Z | main.py | UstymHanyk/NearbyMovies | b54995463a30a130f9023c63d6549e734c45251c | [
"MIT"
] | null | null | null | main.py | UstymHanyk/NearbyMovies | b54995463a30a130f9023c63d6549e734c45251c | [
"MIT"
] | null | null | null | """
A module for generating a map with 10 nearest movies
"""
from data_reader import read_data, select_year
from locations_finder import coord_finder, find_nearest_movies
from map_generator import generate_map
if __name__=="__main__":
start() | 42 | 114 | 0.766917 |
a7e7f170ca97fac3a1e811a7610e759c8f771e42 | 9,003 | py | Python | wbtools/lib/nlp/entity_extraction/ntt_extractor.py | WormBase/wbtools | 70d07109182706b2a6cc333ef7a17dcd293cc3f3 | [
"MIT"
] | 1 | 2021-02-17T06:54:13.000Z | 2021-02-17T06:54:13.000Z | wbtools/lib/nlp/entity_extraction/ntt_extractor.py | WormBase/wbtools | 70d07109182706b2a6cc333ef7a17dcd293cc3f3 | [
"MIT"
] | 1 | 2021-04-28T20:58:57.000Z | 2021-04-28T20:58:57.000Z | wbtools/lib/nlp/entity_extraction/ntt_extractor.py | WormBase/wbtools | 70d07109182706b2a6cc333ef7a17dcd293cc3f3 | [
"MIT"
] | 1 | 2021-03-31T17:23:37.000Z | 2021-03-31T17:23:37.000Z | import math
import re
from typing import List, Dict
from wbtools.db.generic import WBGenericDBManager
from wbtools.lib.nlp.common import EntityType
from wbtools.lib.nlp.literature_index.abstract_index import AbstractLiteratureIndex
ALL_VAR_REGEX = r'({designations}|m|p|It)(_)?([A-z]+)?([0-9]+)([a-zA-Z]{{1,4}}[0-9]*)?(\[[0-9]+\])?([a-zA-Z]{{1,4}}' \
r'[0-9]*)?(\[.+\])?'
NEW_VAR_REGEX = r'[\(\s]({designations}|m|p)([0-9]+)((?:{designations}|m|p|ts|gf|lf|d|sd|am|cs)[0-9]+)?[\)\s\[]'
STRAIN_REGEX = r'[\(\s,\.:;\'\"]({designations})([0-9]+)[\)\s\,\.:;\'\"]'
OPENING_REGEX_STR = "[\\.\\n\\t\\'\\/\\(\\)\\[\\]\\{\\}:;\\,\\!\\?> ]"
CLOSING_REGEX_STR = "[\\.\\n\\t\\'\\/\\(\\)\\[\\]\\{\\}:;\\,\\!\\?> ]"
OPENING_CLOSING_REGEXES = {
EntityType.VARIATION: [r'[\(\s](', r')[\)\s\[]'],
EntityType.STRAIN: [r'[\(\s,\.:;\'\"](', r')[\)\s,\.:;\'\"]']
}
def extract_all_entities_by_type(self, text: str, entity_type: EntityType, include_new: bool = True,
match_curated: bool = False, exclude_curated: bool = False,
match_entities: List[str] = None, exclude_entities: List[str] = None,
exclude_id_used_as_name: bool = True):
"""
extract entities mentioned in text
Args:
text (str): the input text
entity_type (EntityType): the type of entities to extract
include_new (bool): whether to include possibly new entities not yet in the curation database
match_curated (bool): whether to extract curated entities obtained from the provided DB manager
exclude_curated (bool): whether to remove curated entities obtained from the provided DB manager from the
extracted ones
match_entities (List[str]): match the provided entities
exclude_entities (List[str]): exclude the provided entities from the results
exclude_id_used_as_name (bool): do not extract entity ids when used as names in the DB
Returns:
list: the list of entities extracted from text
"""
entities = set()
if include_new:
entities.update(NttExtractor.match_entities_regex(text, self.entity_type_regex_map[entity_type]))
if match_curated:
entities.update(NttExtractor.match_entities_regex(
text, OPENING_CLOSING_REGEXES[entity_type][0] + '|'.join(self.db_manager.get_curated_entities(
entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name)) +
OPENING_CLOSING_REGEXES[entity_type][1]))
if exclude_curated:
entities -= set(self.get_curated_entities(entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name))
if match_entities:
entities.update(NttExtractor.match_entities_regex(
text, OPENING_CLOSING_REGEXES[entity_type][0] + '|'.join(match_entities) +
OPENING_CLOSING_REGEXES[entity_type][1]))
if exclude_entities:
entities -= set(exclude_entities)
return sorted(list(entities))
| 56.26875 | 128 | 0.59258 |
a7e832d5a07f4dde801acb1a916d9e7763b10c42 | 3,228 | py | Python | tests/feature_extractors/test_bitteli.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
] | 21 | 2016-08-22T22:00:49.000Z | 2020-03-29T04:15:19.000Z | tests/feature_extractors/test_bitteli.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
] | 22 | 2016-08-28T01:07:08.000Z | 2018-02-07T14:38:26.000Z | tests/feature_extractors/test_bitteli.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
] | 3 | 2017-01-12T10:04:27.000Z | 2022-01-06T13:25:48.000Z | """Test motif.features.bitteli
"""
import unittest
import numpy as np
from motif.feature_extractors import bitteli
| 30.742857 | 66 | 0.565675 |
a7e94b54f09da5dbd4544a51eda95b42dbf4bd2e | 3,436 | py | Python | qf_lib/backtesting/events/time_event/regular_date_time_rule.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 198 | 2019-08-16T15:09:23.000Z | 2022-03-30T12:44:00.000Z | qf_lib/backtesting/events/time_event/regular_date_time_rule.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 13 | 2021-01-07T10:15:19.000Z | 2022-03-29T13:01:47.000Z | qf_lib/backtesting/events/time_event/regular_date_time_rule.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 29 | 2019-08-16T15:21:28.000Z | 2022-02-23T09:53:49.000Z | # Copyright 2016-present CERN European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta
| 45.813333 | 116 | 0.680151 |
a7ea14ccf7f41c0614b8f95c605b3bd30018a21b | 2,643 | py | Python | example_project/blog/migrations/0001_initial.py | allran/djangorestframework-appapi | 5e843b70910ccd55d787096ee08eb85315c80000 | [
"BSD-2-Clause"
] | 4 | 2019-10-15T06:47:29.000Z | 2019-11-11T13:16:15.000Z | example_project/blog/migrations/0001_initial.py | allran/djangorestframework-appapi | 5e843b70910ccd55d787096ee08eb85315c80000 | [
"BSD-2-Clause"
] | null | null | null | example_project/blog/migrations/0001_initial.py | allran/djangorestframework-appapi | 5e843b70910ccd55d787096ee08eb85315c80000 | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-16 02:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 43.327869 | 193 | 0.573212 |
a7eb2348cfa9a172a906c37500f1917164aff9ba | 515 | py | Python | hamming/hamming.py | olepunchy/exercism-python-solutions | 7710e49ec0188510d50a22928cdb951063ad1a44 | [
"BSD-3-Clause"
] | 1 | 2021-12-20T11:29:35.000Z | 2021-12-20T11:29:35.000Z | hamming/hamming.py | olepunchy/exercism-python-solutions | 7710e49ec0188510d50a22928cdb951063ad1a44 | [
"BSD-3-Clause"
] | null | null | null | hamming/hamming.py | olepunchy/exercism-python-solutions | 7710e49ec0188510d50a22928cdb951063ad1a44 | [
"BSD-3-Clause"
] | null | null | null | """Hamming Distance from Exercism"""
def distance(strand_a, strand_b):
"""Determine the hamming distance between two RNA strings
param: str strand_a
param: str strand_b
return: int calculation of the hamming distance between strand_a and strand_b
"""
if len(strand_a) != len(strand_b):
raise ValueError("Strands must be of equal length.")
distance = 0
for i, _ in enumerate(strand_a):
if strand_a[i] != strand_b[i]:
distance += 1
return distance
| 25.75 | 81 | 0.660194 |
a7ec26521d5754d63393dc5921008ed61eb700b3 | 1,384 | py | Python | python/scopePractice.py | 5x5x5x5/Back2Basics | 4cd4117c6fdcb064b6cd62fde63be92347950526 | [
"Unlicense"
] | null | null | null | python/scopePractice.py | 5x5x5x5/Back2Basics | 4cd4117c6fdcb064b6cd62fde63be92347950526 | [
"Unlicense"
] | 1 | 2016-02-14T00:09:48.000Z | 2016-02-14T00:10:05.000Z | python/scopePractice.py | 5x5x5x5/Back2Basics | 4cd4117c6fdcb064b6cd62fde63be92347950526 | [
"Unlicense"
] | null | null | null | #def spam():
# eggs = 31337
#spam()
#print(eggs)
"""
def spam():
eggs = 98
bacon()
print(eggs)
def bacon():
ham = 101
eggs = 0
spam()
"""
"""
# Global variables can be read from local scope.
def spam():
print(eggs)
eggs = 42
spam()
print(eggs)
"""
"""
# Local and global variables with the same name.
def spam():
eggs = 'spam local'
print(eggs) # prints 'spam local'
def bacon():
eggs = 'bacon local'
print(eggs) # prints 'bacon local'
spam()
print(eggs) # prints 'bacon local'
eggs = 'global'
bacon()
print(eggs) # prints 'global'
"""
"""
# the global statement
def spam():
global eggs
eggs = 'spam'
eggs = 'it don\'t matter'
spam()
print(eggs)
"""
"""
def spam():
global eggs
eggs = 'spam' # this is the global
def bacon():
eggs = 'bacon' # this is a local
def ham():
print(eggs) # this is the global
eggs = 42 # this is global
spam()
print(eggs)
"""
# Python will not fall back to using the global eggs variable
eggs = 'global'
spam()
# This error happens because Python sees that there is an assignment statement for eggs in the spam() function and therefore considers eggs to be local. Because print(eggs) is executed before eggs is assigned anything, the local variable eggs doesn't exist.
| 16.674699 | 257 | 0.621387 |
a7ee3074b0e06212ba87fbc858e47dc0897b2f73 | 3,114 | py | Python | utils/csv_generator.py | stegmaierj/CellSynthesis | de2c90ed668b7f57b960896473df3d56636eca82 | [
"Apache-2.0"
] | 1 | 2021-07-21T21:40:32.000Z | 2021-07-21T21:40:32.000Z | utils/csv_generator.py | stegmaierj/CellSynthesis | de2c90ed668b7f57b960896473df3d56636eca82 | [
"Apache-2.0"
] | null | null | null | utils/csv_generator.py | stegmaierj/CellSynthesis | de2c90ed668b7f57b960896473df3d56636eca82 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
# 3D Image Data Synthesis.
# Copyright (C) 2021 D. Eschweiler, M. Rethwisch, M. Jarchow, S. Koppers, J. Stegmaier
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Liceense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please refer to the documentation for more information about the software
# as well as for installation instructions.
#
"""
import os
import glob
import csv
import numpy as np
| 33.847826 | 113 | 0.633269 |
a7f02ff728dc30360284b4e08bfb0d211597ed3b | 1,003 | py | Python | test/test_VersionUpdaterWindow.py | jmarrec/IDFVersionUpdater2 | 0420732141e41bdc06c85f1372d82f0843f8cebf | [
"BSD-3-Clause"
] | null | null | null | test/test_VersionUpdaterWindow.py | jmarrec/IDFVersionUpdater2 | 0420732141e41bdc06c85f1372d82f0843f8cebf | [
"BSD-3-Clause"
] | null | null | null | test/test_VersionUpdaterWindow.py | jmarrec/IDFVersionUpdater2 | 0420732141e41bdc06c85f1372d82f0843f8cebf | [
"BSD-3-Clause"
] | 2 | 2020-09-25T08:02:39.000Z | 2021-08-18T08:30:31.000Z | import os
import sys
import tempfile
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'IDFVersionUpdater'))
from VersionUpdaterWindow import VersionUpdaterWindow
| 31.34375 | 101 | 0.682951 |
a7f04cab3ce9aa87269ec6d3083f5676dec9b76a | 421 | py | Python | Algorithm/Mathematical/453. Minimum Moves to Equal Array Elements.py | smsubham/Data-Structure-Algorithms-Questions | 45da68231907068ef4e4a0444ffdac69b337fa7c | [
"Apache-2.0"
] | null | null | null | Algorithm/Mathematical/453. Minimum Moves to Equal Array Elements.py | smsubham/Data-Structure-Algorithms-Questions | 45da68231907068ef4e4a0444ffdac69b337fa7c | [
"Apache-2.0"
] | null | null | null | Algorithm/Mathematical/453. Minimum Moves to Equal Array Elements.py | smsubham/Data-Structure-Algorithms-Questions | 45da68231907068ef4e4a0444ffdac69b337fa7c | [
"Apache-2.0"
] | null | null | null | # https://leetcode.com/problems/minimum-moves-to-equal-array-elements/
# Explanation: https://leetcode.com/problems/minimum-moves-to-equal-array-elements/discuss/93817/It-is-a-math-question
# Source: https://leetcode.com/problems/minimum-moves-to-equal-array-elements/discuss/272994/Python-Greedy-Sum-Min*Len | 60.142857 | 118 | 0.752969 |
a7f28bd0d14c90ec88699bf98d0a9fe7b8320366 | 583 | py | Python | tests/test_decisions.py/test_binary_decision.py | evanofslack/pyminion | 0d0bfc6d8e84e9f33e617c7d01b6edb649166290 | [
"MIT"
] | 5 | 2021-12-17T20:34:55.000Z | 2022-01-24T15:18:05.000Z | tests/test_decisions.py/test_binary_decision.py | evanofslack/pyminion | 0d0bfc6d8e84e9f33e617c7d01b6edb649166290 | [
"MIT"
] | 31 | 2021-10-29T21:05:00.000Z | 2022-03-22T03:27:14.000Z | tests/test_decisions.py/test_binary_decision.py | evanofslack/pyminion | 0d0bfc6d8e84e9f33e617c7d01b6edb649166290 | [
"MIT"
] | 1 | 2021-12-23T18:32:47.000Z | 2021-12-23T18:32:47.000Z | from pyminion.decisions import binary_decision
| 27.761905 | 56 | 0.749571 |
a7f2b60426d4a9b0bbe027e12dcdd2ac3143d158 | 1,658 | py | Python | commands/limit.py | nstra111/autovc | e73e1fea7b566721c3dce3ca6f587472e7ee9d1b | [
"MIT"
] | 177 | 2020-02-02T18:03:46.000Z | 2022-03-17T06:18:43.000Z | commands/limit.py | zigsphere/Auto-Voice-Channels | 6ae901728580bef4246737a6f1b9f10763badd3e | [
"MIT"
] | 82 | 2020-02-02T17:43:18.000Z | 2022-03-24T20:34:55.000Z | commands/limit.py | zigsphere/Auto-Voice-Channels | 6ae901728580bef4246737a6f1b9f10763badd3e | [
"MIT"
] | 165 | 2019-02-17T20:15:20.000Z | 2022-03-27T23:59:23.000Z | import utils
import functions as func
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND>\n"
"<PREFIX><COMMAND> `N`"),
("Description:",
"Use when already in a channel - Limit the number of users allowed in your channel to either the current "
"number of users, or the specified number.\n\n"
"Use *<PREFIX>un<COMMAND>* to remove the limit."),
("Example:", "<PREFIX><COMMAND> 4"),
]
]
command = Cmd(
execute=execute,
help_text=help_text,
params_required=0,
admin_required=False,
voice_required=True,
creator_only=True,
)
| 28.586207 | 115 | 0.571773 |
a7f2fd039004fefa20925e8a466b301e8532a1f0 | 11,548 | py | Python | salt/tests/unit/modules/test_metalk8s_solutions.py | zarumaru/metalk8s | 8c79a28c2bd28ca5b84e58ace5605cbe6183fc75 | [
"Apache-2.0"
] | null | null | null | salt/tests/unit/modules/test_metalk8s_solutions.py | zarumaru/metalk8s | 8c79a28c2bd28ca5b84e58ace5605cbe6183fc75 | [
"Apache-2.0"
] | null | null | null | salt/tests/unit/modules/test_metalk8s_solutions.py | zarumaru/metalk8s | 8c79a28c2bd28ca5b84e58ace5605cbe6183fc75 | [
"Apache-2.0"
] | null | null | null | import errno
import os.path
import yaml
from parameterized import param, parameterized
from salt.exceptions import CommandExecutionError
from salttesting.mixins import LoaderModuleMockMixin
from salttesting.unit import TestCase
from salttesting.mock import MagicMock, mock_open, patch
import metalk8s_solutions
from tests.unit import utils
YAML_TESTS_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"files", "test_metalk8s_solutions.yaml"
)
with open(YAML_TESTS_FILE) as fd:
YAML_TESTS_CASES = yaml.safe_load(fd)
| 36.660317 | 79 | 0.559144 |
a7f5cbeb6c6ac6730e6541d991681e7c83554dd8 | 523 | py | Python | fun.py | Krishna-Aaseri/Python_Logical_Questions | c0f025a56dbbf85426142adb423b25fa7b034adb | [
"MIT"
] | null | null | null | fun.py | Krishna-Aaseri/Python_Logical_Questions | c0f025a56dbbf85426142adb423b25fa7b034adb | [
"MIT"
] | null | null | null | fun.py | Krishna-Aaseri/Python_Logical_Questions | c0f025a56dbbf85426142adb423b25fa7b034adb | [
"MIT"
] | null | null | null | #def add(num,num1):
# add1=num+num1
# print add1
#add(6,7)
#def welcome():
# print "python kaisa lagta h aapko"
# print "but please reply na kare aap"
#welcome()
user = int(raw_input("enter a number"))
i = 0
new = []
while i < (user):
user1 = int(raw_input("enter a number"))
new.append(user1)
i = i + 1
print new
print "**********************************************"
i = 0
new_list = []
while i < len(new):
if new[i]%2 == 0:
new_list.append(new)
else:
new_list.append(new)
i = i + 1
print new_list
| 13.763158 | 54 | 0.565966 |
a7f7aa50e11186fe4bb67eb3b4c81147ea13ad7a | 29 | py | Python | app.py | 00MB/lottocoin | ebf27f5a02169d948e8633b1dc5d5ad37ee1bb4a | [
"MIT"
] | 2 | 2021-02-10T01:40:36.000Z | 2021-02-10T01:41:22.000Z | app.py | 00MB/lottocoin | ebf27f5a02169d948e8633b1dc5d5ad37ee1bb4a | [
"MIT"
] | null | null | null | app.py | 00MB/lottocoin | ebf27f5a02169d948e8633b1dc5d5ad37ee1bb4a | [
"MIT"
] | null | null | null | from lottocoin import app
| 5.8 | 25 | 0.758621 |
a7f8a1ad9e3a4405d58d74a78a4a7eac31d085da | 30,883 | py | Python | api/views_v2.py | GeRDI-Project/HarvesterControlCenter | ce161a31a6510ae28ffa68b8e0fd43c42060cb07 | [
"Apache-2.0"
] | null | null | null | api/views_v2.py | GeRDI-Project/HarvesterControlCenter | ce161a31a6510ae28ffa68b8e0fd43c42060cb07 | [
"Apache-2.0"
] | 9 | 2020-01-07T12:40:26.000Z | 2021-09-22T18:00:38.000Z | api/views_v2.py | GeRDI-Project/HarvesterControlCenter | ce161a31a6510ae28ffa68b8e0fd43c42060cb07 | [
"Apache-2.0"
] | null | null | null | """
This is the views module which encapsulates the backend logic
which will be riggered via the corresponding path (url).
"""
import collections
import json
import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views.generic import RedirectView
from django.views.generic.base import View
from django.views.generic.edit import FormMixin
from rest_framework import generics, permissions, status
from rest_framework.authentication import (BasicAuthentication,
TokenAuthentication)
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from api.constants import HCCJSONConstants as HCCJC
from api.forms import (HarvesterForm, SchedulerForm, UploadFileForm,
ValidateFileForm, create_config_fields,
create_config_form)
from api.harvester_api import InitHarvester
from api.mixins import AjaxableResponseMixin
from api.models import Harvester
from api.permissions import IsOwner
from api.serializers import HarvesterSerializer, UserSerializer
__author__ = "Jan Frmberg, Laura Hhle"
__copyright__ = "Copyright 2018, GeRDI Project"
__credits__ = ["Jan Frmberg"]
__license__ = "Apache 2.0"
__maintainer__ = "Jan Frmberg"
__email__ = "jan.froemberg@tu-dresden.de"
# Get an instance of a logger
LOGGER = logging.getLogger(__name__)
def index(request):
"""
Index to show something meaningful instead of an empty page.
:param request:
:return: a HttpResponse
"""
return HttpResponseRedirect(reverse('hcc_gui'))
def create_form(response, harvester_name):
"""
This method generates a scheduler form for a harvester
based on a harvester specific JSON response.
If there is no response a default empty form will be created
for that harvester.
:param response: the response
:return: SchedulerForm(prefix=harvester.name)
"""
if response:
response_dict = response.data[harvester_name]
if HCCJC.CRONTAB in response_dict:
# if a GET (or any other method) we'll create form
# initialized with a schedule for this harvester
jsonstr = {
HCCJC.POSTCRONTAB:
response_dict[HCCJC.CRONTAB]
}
placehldr = response_dict[HCCJC.CRONTAB]
form = SchedulerForm(prefix=harvester_name)
if isinstance(placehldr, list):
if len(placehldr) > 0:
placehldr = response_dict[HCCJC.CRONTAB][0]
form.fields[HCCJC.POSTCRONTAB].widget.attrs.update(
{'placeholder': placehldr})
return form
else:
jsonstr = {HCCJC.POSTCRONTAB: '0 0 * * *'}
form = SchedulerForm(initial=jsonstr,
prefix=harvester_name)
return form
else:
jsonstr = {HCCJC.POSTCRONTAB: '0 0 * * *'}
form = SchedulerForm(initial=jsonstr,
prefix=harvester_name)
return form
def home(request):
"""
Home entry point of Web-Application GUI.
"""
feedback = {}
# init session variables:
# theme (dark/light) with default light
theme = request.session.get('theme', 'light')
# viewtype (card/list/table) with default card
viewtype = request.session.get('viewtype', 'card')
# collapse status (visible/collapsed)
collapse_status = {}
collapse_status['toolbox'] = request.session.get('toolbox', 'collapsed')
collapse_status['chart'] = request.session.get('chart', 'collapsed')
collapse_status['disabled_harvs'] = request.session.get(
'disabled_harvs', 'collapsed')
collapse_status['enabled_harvs'] = request.session.get(
'enabled_harvs', 'visible')
# if user is logged in
if request.user.is_authenticated:
forms = {}
response = None
harvesters = Harvester.objects.all()
num_harvesters = len(harvesters)
num_enabled_harvesters = 0
num_disabled_harvesters = 0
# get status of each enabled harvester
for harvester in harvesters:
# TODO do that call at client side!!
if harvester.enabled:
num_enabled_harvesters += 1
api = InitHarvester(harvester).get_harvester_api()
response = api.harvester_status()
forms[harvester.name] = create_form(response, harvester.name)
if response:
feedback[harvester.name] = response.data[harvester.name]
else:
feedback[harvester.name] = {}
feedback[harvester.name][HCCJC.GUI_STATUS] = HCCJC.WARNING
err = 'Error : no response object'
feedback[harvester.name][HCCJC.HEALTH] = err
else:
num_disabled_harvesters += 1
# get total amount of docs
sum_harvested = 0
sum_max_docs = 0
for harvester in feedback.values():
if isinstance(harvester, dict):
for (_k, _v) in harvester.items():
if _k == HCCJC.CACHED_DOCS:
sum_harvested += int(_v)
if _k == HCCJC.MAX_DOCUMENTS:
if _v != 'N/A':
sum_max_docs += int(_v)
feedback['sum_harvested'] = sum_harvested
feedback['sum_maxdocs'] = sum_max_docs
feedback['num_disabled_harvesters'] = num_disabled_harvesters
feedback['num_enabled_harvesters'] = num_enabled_harvesters
feedback['num_harvesters'] = num_harvesters
msg = '{} enabled Harvesters with total amount \
of harvested Items so far: {}'.format(num_enabled_harvesters,
sum_harvested)
messages.add_message(request, messages.INFO, msg)
# init form
if request.method == 'POST':
form = SchedulerForm(request.POST)
if form.is_valid():
return HttpResponseRedirect(reverse('hcc_gui'))
return render(
request, 'hcc/index.html', {
'harvesters': harvesters,
'status': feedback,
'forms': forms,
'theme': theme,
'viewtype': viewtype,
'collapse_status': collapse_status
})
return render(request, 'hcc/index.html', {
'status': feedback
})
class HarvesterCreateView(generics.ListCreateAPIView):
"""
This class handles the GET and POST requests
to create/register a new harvester
to our Harvester Control Center REST-API.
"""
authentication_classes = (BasicAuthentication, TokenAuthentication)
queryset = Harvester.objects.all()
serializer_class = HarvesterSerializer
permission_classes = (permissions.IsAuthenticated, IsOwner)
def perform_create(self, serializer):
"""Save the post data when creating a new harvester."""
serializer.save(owner=self.request.user)
class HarvesterDetailsView(generics.RetrieveUpdateDestroyAPIView):
"""
This class handles GET, PUT, PATCH and DELETE requests.
To show, modify and delete an registered harvester.
"""
authentication_classes = (BasicAuthentication, )
lookup_field = 'name'
queryset = Harvester.objects.all()
serializer_class = HarvesterSerializer
permission_classes = (permissions.IsAuthenticated, IsOwner)
class UserView(generics.ListAPIView):
"""
View to list the control center registered users.
"""
authentication_classes = (BasicAuthentication, )
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetailsView(generics.RetrieveAPIView):
"""
View to retrieve a user instance.
"""
authentication_classes = (BasicAuthentication, )
queryset = User.objects.all()
serializer_class = UserSerializer
class EditHarvesterView(LoginRequiredMixin, View,
AjaxableResponseMixin, FormMixin):
"""
This class handles AJAx, GET, DELETE and POST requests
to control the edit of the harvesters.
"""
class ConfigHarvesterView(LoginRequiredMixin, View,
AjaxableResponseMixin, FormMixin):
"""
This class handles GET, DELETE and POST requests
to control the config of the harvesters.
"""
class ScheduleHarvesterView(
SuccessMessageMixin, RedirectView, AjaxableResponseMixin, FormMixin):
"""
This class handles GET, DELETE and POST requests
to control the scheduling of harvesters.
"""
success_message = "Schedule for %(name) was created successfully"
| 36.036173 | 96 | 0.628307 |
a7f8c76db9c1ab40ada45ae9f5ec62c61c102d7a | 8,151 | py | Python | rin/modules/setu/lolicon.py | oralvi/rinyuuki | 2b55a5a9f0ebbecbdba815e242450b248c8e727a | [
"MIT"
] | null | null | null | rin/modules/setu/lolicon.py | oralvi/rinyuuki | 2b55a5a9f0ebbecbdba815e242450b248c8e727a | [
"MIT"
] | null | null | null | rin/modules/setu/lolicon.py | oralvi/rinyuuki | 2b55a5a9f0ebbecbdba815e242450b248c8e727a | [
"MIT"
] | null | null | null | import datetime
import io
import json
import os
import random
import traceback
import aiohttp
from PIL import Image
import rin
from rin import R
from .config import get_api_num, get_config, key_vaildable_query, set_key_invaild
quota_limit_time = datetime.datetime.now()
native_info = {}
native_r18_info = {}
#
def save_image(image):
path = f'setu_mix/lolicon/{image["id"]}'
if image['r18']:
path = f'setu_mix/lolicon_r18/{image["id"]}'
res = R.img(path + '.jpg')
with open(res.path, 'wb') as f:
f.write(image['data'])
res = R.img(path + '.json')
info = {
'title': image['title'],
'author': image['author'],
'url': image['url'],
'tags': image['tags'],
}
with open(res.path, 'w', encoding='utf8') as f:
json.dump(info, f, ensure_ascii=False, indent=2)
# r18: 0 1 r18 2
# r18: 0 1 r18 2
def lolicon_init():
global native_info
global native_r18_info
if get_config('lolicon', 'mode') == 3:
native_info = load_native_info('lolicon')
native_r18_info = load_native_info('lolicon_r18')
| 25.392523 | 100 | 0.655502 |
a7fa41d77e47cb4e42dcb175ead24d162418cceb | 363 | py | Python | Python Backend/diarization/build/lib/s4d/__init__.py | AdityaK1211/Final_Year_Project_SCET | 1a6092e1345dad473375ada787fb5cb00ee7515f | [
"MIT"
] | 1 | 2022-02-15T02:49:22.000Z | 2022-02-15T02:49:22.000Z | Python Backend/diarization/build/lib/s4d/__init__.py | AdityaK1211/Final_Year_Project_SCET | 1a6092e1345dad473375ada787fb5cb00ee7515f | [
"MIT"
] | null | null | null | Python Backend/diarization/build/lib/s4d/__init__.py | AdityaK1211/Final_Year_Project_SCET | 1a6092e1345dad473375ada787fb5cb00ee7515f | [
"MIT"
] | 2 | 2021-07-11T12:42:43.000Z | 2022-02-15T02:49:24.000Z | __author__ = 'meignier'
import s4d.clustering.hac_bic
import s4d.clustering.hac_clr
import s4d.clustering.hac_iv
import s4d.clustering.hac_utils
import s4d.model_iv
from s4d.clustering.cc_iv import ConnectedComponent
from s4d.diar import Diar
from s4d.segmentation import sanity_check, bic_linear, div_gauss
from s4d.viterbi import Viterbi
__version__ = "0.0.1" | 27.923077 | 64 | 0.837466 |
a7faceab673a31a756534245b8aaabc503d661d6 | 1,217 | py | Python | docs/demos/theme_explorer/list_group.py | sthagen/facultyai-dash-bootstrap-components | 2dd5eaf1c1494b2077bcee82eb7968ec2e23af46 | [
"Apache-2.0"
] | 50 | 2018-09-23T08:57:28.000Z | 2019-02-02T19:59:35.000Z | docs/demos/theme_explorer/list_group.py | sthagen/dash-bootstrap-components | d79ad7f8fdf4c26165038e6989e24f2ac17663b1 | [
"Apache-2.0"
] | 99 | 2018-09-21T11:06:29.000Z | 2019-02-04T09:04:07.000Z | docs/demos/theme_explorer/list_group.py | sthagen/dash-bootstrap-components | d79ad7f8fdf4c26165038e6989e24f2ac17663b1 | [
"Apache-2.0"
] | 3 | 2018-09-25T02:16:24.000Z | 2018-12-22T20:56:31.000Z | import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
list_group = html.Div(
[
make_subheading("ListGroup", "list_group"),
dbc.ListGroup(
[
dbc.ListGroupItem("No color applied"),
dbc.ListGroupItem("The primary item", color="primary"),
dbc.ListGroupItem("A secondary item", color="secondary"),
dbc.ListGroupItem("A successful item", color="success"),
dbc.ListGroupItem("A warning item", color="warning"),
dbc.ListGroupItem("A dangerous item", color="danger"),
dbc.ListGroupItem("An informative item", color="info"),
dbc.ListGroupItem("A light item", color="light"),
dbc.ListGroupItem("A dark item", color="dark"),
dbc.ListGroupItem("An action item", action=True),
dbc.ListGroupItem("An active item", active=True),
dbc.ListGroupItem(
[
html.H5("Item 4 heading"),
html.P("Item 4 text"),
]
),
]
),
],
className="mb-4",
)
| 36.878788 | 73 | 0.520953 |
a7fd2734a072f2bcad84ee84ad66f361e1da1371 | 865 | py | Python | products/migrations/0004_auto_20210715_2006.py | keeks-mtl/go-tennis | af3f325a9cfb2faba4d935824492f4aea6d10309 | [
"W3C",
"PostgreSQL"
] | null | null | null | products/migrations/0004_auto_20210715_2006.py | keeks-mtl/go-tennis | af3f325a9cfb2faba4d935824492f4aea6d10309 | [
"W3C",
"PostgreSQL"
] | null | null | null | products/migrations/0004_auto_20210715_2006.py | keeks-mtl/go-tennis | af3f325a9cfb2faba4d935824492f4aea6d10309 | [
"W3C",
"PostgreSQL"
] | null | null | null | # Generated by Django 3.2.3 on 2021-07-15 20:06
from django.db import migrations, models
| 25.441176 | 62 | 0.557225 |
a7fd5742db5fc95146713081d7d7a20b702afa5b | 5,967 | py | Python | task_landscape.py | aspnetcs/RecurJac-and-CROWN | 5b3fcaaa7a275483e26164506f66a618538ee881 | [
"BSD-2-Clause"
] | 54 | 2020-09-09T12:43:43.000Z | 2022-03-17T17:31:19.000Z | task_landscape.py | huanzhang12/RecurJac-Jacobian-Bounds | 163c84e7a8d345d18c718cf6b0bc61baa8a1a78a | [
"BSD-2-Clause"
] | 8 | 2020-09-23T05:11:31.000Z | 2022-03-12T00:47:29.000Z | task_landscape.py | huanzhang12/RecurJac | 163c84e7a8d345d18c718cf6b0bc61baa8a1a78a | [
"BSD-2-Clause"
] | 5 | 2020-09-10T07:19:43.000Z | 2021-07-24T06:28:04.000Z | ## task_landscape.py
##
## Run RecurJac/FastLip bounds for exploring local optimization landscape
##
## Copyright (C) 2018, Huan Zhang <huan@huan-zhang.com> and contributors
##
## This program is licenced under the BSD 2-Clause License,
## contained in the LICENCE file in this directory.
## See CREDITS for a list of contributors.
##
import time
import numpy as np
from collections import defaultdict
from utils import binary_search
from bound_base import compute_bounds
from bound_spectral import spectral_bound
| 54.743119 | 392 | 0.657282 |
a7fe6e62a19f61aac68612f736fdb8db8ad2bc69 | 5,956 | py | Python | tests/test_prepareDeploymentContainerDefinitionsStep.py | AdventielFr/ecs-crd-cli | f1421055be0b2b25e5334aef277d27bc30f161e5 | [
"MIT"
] | 1 | 2020-07-22T15:18:51.000Z | 2020-07-22T15:18:51.000Z | tests/test_prepareDeploymentContainerDefinitionsStep.py | AdventielFr/ecs-crd-cli | f1421055be0b2b25e5334aef277d27bc30f161e5 | [
"MIT"
] | 4 | 2020-03-24T17:30:40.000Z | 2021-06-02T00:23:48.000Z | tests/test_prepareDeploymentContainerDefinitionsStep.py | AdventielFr/ecs-crd-cli | f1421055be0b2b25e5334aef277d27bc30f161e5 | [
"MIT"
] | 2 | 2019-09-24T15:21:56.000Z | 2021-07-05T07:25:20.000Z | import pytest
from unittest.mock import MagicMock
import logging
from ecs_crd.canaryReleaseInfos import CanaryReleaseInfos
from ecs_crd.prepareDeploymentContainerDefinitionsStep import PrepareDeploymentContainerDefinitionsStep
from ecs_crd.canaryReleaseInfos import ScaleInfos
logger = logging.Logger('mock')
infos = CanaryReleaseInfos(action='test')
step = PrepareDeploymentContainerDefinitionsStep(infos, logger)
| 30.54359 | 103 | 0.684184 |
a7fef4c124b33416bb39eb6677220ad02a959e38 | 4,871 | py | Python | Sim/A_star/A_star.py | Chains99/Battlefield-Simulator | 9dc209c34aac5160232e47d6799bbe1b1bfcebad | [
"MIT"
] | null | null | null | Sim/A_star/A_star.py | Chains99/Battlefield-Simulator | 9dc209c34aac5160232e47d6799bbe1b1bfcebad | [
"MIT"
] | null | null | null | Sim/A_star/A_star.py | Chains99/Battlefield-Simulator | 9dc209c34aac5160232e47d6799bbe1b1bfcebad | [
"MIT"
] | null | null | null | from Sim.A_star.heap import node_heap,heapify_down,heapify_up,extract_min,append_node
from math import inf
from math import pow
# Recibe x, y componentes de la matriz y devuelve la distancia euclideana entre ellos
# Construimos un heap con los nodos del mapa con un valor inicial inf
# Actualizamos el peso/costo de cada nodo con el valor de la heuristica
# Genera los nodos adyacentes al nodo en la posicion pos en la matriz map
# Creamos un camino directo desde s hasta d, creado con cada componente del camino
# Devuelve None si no existe un camino
def adyacent_free_square(path, d, map, cols):
ady = adyacent_nodes(map, d)
new_node = d
for node in ady:
if path[node[0] * cols + node[1]] != inf:
new_node = node
break
return new_node
"""
Hs funcion lambda : lambda x -> y
Donde y es el valor de la heuristica asociado a x
"""
"""
a_star(matriz, hs, s, d)
parametros:
matriz: matriz con los valores de consumo de movimiento de cada casilla (costo),
si una casilla no puede ser transitada toma valor inf
hs funcion lambda : lambda x -> y donde y es el valor de la heuristica asociado a x
s: nodo inicial (i,j) fila i columna j
d: nodo destino (i,j)
terrain_map: mapa del terreno
soldiers_positions_matrix: matriz booleana con las posiciones del los soldados
Algoritmo de A-star para encontrar encontrar el camino mas corto de un punto de una matriz a otro
Devuelve una lista tupla (i,j) que representa el camino optimo de s a d
"""
def a_star(map, hs, s, d, terrain_map, soldiers_positions_matrix):
visited = []
for i in range(len(map)):
visited.append([False]*len(map[0]))
nodes = init_nodes(map)
# initialize the heap with the starting node
heap = []
# s_pos: la posicion de s en el heap de nodos
s_pos = s[0]*len(map[0]) + s[1]
nodes[s_pos].value = 0
append_node(heap, nodes[s_pos])
# initialize path list
path = [inf]*len(nodes)
path = dijkstra(map, nodes, heap, visited, path, d, hs, terrain_map, soldiers_positions_matrix)
return make_path(path, s, d, len(map[0]), map)
def dijkstra(w_graph, nodes, heap, visited, path, destiny, hs, terrain_map, soldiers_positions_matrix):
while len(heap) > 0:
u = extract_min(heap)
visited[u.id[0]][u.id[1]] = True
if u.id == destiny:
return path
for item in adyacent_nodes(w_graph, u.id):
# Sea item la tupla (i, j) correspondiente a un elemento de w_graph
# si el elemento en la posicion correspondiente a (i, j) en la lista de nodos tiene valor inf
if not w_graph[item[0]][item[1]] == inf and not soldiers_positions_matrix[item[0]][item[1]] and terrain_map[item[0]][item[1]].available:
if not visited[item[0]][item[1]]:
if nodes[item[0]*len(w_graph[0]) + item[1]].value == inf:
append_node(heap, nodes[item[0]*len(w_graph[0]) + item[1]])
relax(u.id, item, w_graph, nodes, heap, path, hs)
return path
def relax(u, v, graph, nodes, heap, path, hs):
if nodes[v[0]*len(graph[0])+v[1]].value > nodes[u[0]*len(graph[0])+u[1]].value + graph[v[0]][v[1]] + hs((v[0], v[1])):
nodes[v[0]*len(graph[0])+v[1]].value = nodes[u[0]*len(graph[0])+u[1]].value + graph[v[0]][v[1]] + hs((v[0], v[1]))
path[v[0]*len(graph[0])+v[1]] = u
# Update position in heap
heapify_up(heap, nodes[v[0]*len(graph[0])+v[1]])
| 31.836601 | 148 | 0.624512 |
a7ff7c5a80c329a083ab577506e02644e8986047 | 919 | py | Python | setup.py | danielcliu/youtube-transcript-channel-api | 3102c23379ad86231374b4763716310890133553 | [
"MIT"
] | 7 | 2020-10-21T08:55:38.000Z | 2021-03-22T02:53:20.000Z | setup.py | danielcliu/youtube-transcript-channel-api | 3102c23379ad86231374b4763716310890133553 | [
"MIT"
] | 3 | 2021-06-02T12:30:15.000Z | 2022-02-11T12:46:13.000Z | setup.py | danielcliu/youtube-transcript-channel-api | 3102c23379ad86231374b4763716310890133553 | [
"MIT"
] | 1 | 2021-03-25T20:03:36.000Z | 2021-03-25T20:03:36.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="youtube-channel-transcript-api", # Replace with your own username
version="0.0.1",
author="Daniel Liu",
author_email="dcliu@ucdavis.edu",
description="A python package the utilizes the Youtube Data V3 API to get all transcripts from a given channel/playlist.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/danielcliu/youtube-channel-transcript-api",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'requests',
'google-api-python-client',
'youtube-transcript-api',
],
)
| 32.821429 | 126 | 0.669206 |
c50026f8c76e0a37813e1e12579a0e280470bcd9 | 1,252 | py | Python | 51-100/67.py | yshshadow/Leetcode | 5097f69bb0050d963c784d6bc0e88a7e871568ed | [
"MIT"
] | null | null | null | 51-100/67.py | yshshadow/Leetcode | 5097f69bb0050d963c784d6bc0e88a7e871568ed | [
"MIT"
] | null | null | null | 51-100/67.py | yshshadow/Leetcode | 5097f69bb0050d963c784d6bc0e88a7e871568ed | [
"MIT"
] | null | null | null | # Given two binary strings, return their sum (also a binary string).
#
# For example,
# a = "11"
# b = "1"
# Return "100".
s = Solution()
print(s.addBinary("0", "0"))
# print(s.addBinary("11", "1"))
| 23.185185 | 68 | 0.384185 |
c5013b78c012ef81d3fc817c0e0956eb0e420741 | 2,761 | py | Python | src/mds/query.py | phs-rcg/metadata-service | 227ab79f1d8eadc3265cdb0c0bfcfc54db1da3b8 | [
"Apache-2.0"
] | 10 | 2020-04-28T10:20:02.000Z | 2021-11-01T22:20:10.000Z | src/mds/query.py | phs-rcg/metadata-service | 227ab79f1d8eadc3265cdb0c0bfcfc54db1da3b8 | [
"Apache-2.0"
] | 23 | 2020-02-04T22:36:02.000Z | 2022-03-24T18:26:49.000Z | src/mds/query.py | phs-rcg/metadata-service | 227ab79f1d8eadc3265cdb0c0bfcfc54db1da3b8 | [
"Apache-2.0"
] | 6 | 2020-01-14T20:44:50.000Z | 2022-02-15T22:17:14.000Z | from fastapi import HTTPException, Query, APIRouter
from starlette.requests import Request
from starlette.status import HTTP_404_NOT_FOUND
from .models import db, Metadata
mod = APIRouter()
def init_app(app):
app.include_router(mod, tags=["Query"])
| 27.61 | 88 | 0.574067 |
c5013dd528c7dae240161ed939f71c7b9ea2e1ef | 695 | py | Python | src/RTmission/storeinfo/forms.py | shehabkotb/RTmission_backend | 90afb06e7d290e934e3e5f77e789b9c5227805f7 | [
"BSD-3-Clause"
] | null | null | null | src/RTmission/storeinfo/forms.py | shehabkotb/RTmission_backend | 90afb06e7d290e934e3e5f77e789b9c5227805f7 | [
"BSD-3-Clause"
] | null | null | null | src/RTmission/storeinfo/forms.py | shehabkotb/RTmission_backend | 90afb06e7d290e934e3e5f77e789b9c5227805f7 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from django.core import validators
from .models import UserInfo
| 22.419355 | 89 | 0.539568 |
c50321a74b64b29dc6c4a647f031c7b97620662c | 2,668 | py | Python | python/54.spiral-matrix.py | kadaliao/leetcode | 32170b1c2ba24b3765d22f9379534651080bab26 | [
"MIT"
] | null | null | null | python/54.spiral-matrix.py | kadaliao/leetcode | 32170b1c2ba24b3765d22f9379534651080bab26 | [
"MIT"
] | null | null | null | python/54.spiral-matrix.py | kadaliao/leetcode | 32170b1c2ba24b3765d22f9379534651080bab26 | [
"MIT"
] | null | null | null | # @lc app=leetcode.cn id=54 lang=python3
#
# [54]
#
# https://leetcode-cn.com/problems/spiral-matrix/description/
#
# algorithms
# Medium (43.22%)
# Total Accepted: 129.4K
# Total Submissions: 284.1K
# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]'
#
# m n matrix
#
#
# 1
#
#
# matrix = [[1,2,3],[4,5,6],[7,8,9]]
# [1,2,3,6,9,8,7,4,5]
#
#
# 2
#
#
# matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
# [1,2,3,4,8,12,11,10,9,5,6,7]
#
#
#
#
#
#
#
# m == matrix.length
# n == matrix[i].length
# 1
# -100
#
#
#
from typing import List
# m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# ret = Solution().spiralOrder(m)
# print(ret)
# ret = Solution().spiralOrder0(m)
# print(ret)
| 24.477064 | 87 | 0.467391 |
c503e7668b1bca9c8fa3e9b2fad69b66aea6dd54 | 660 | py | Python | tests/test_path.py | Infinidat/infi.gevent-utils | 7aef923fb19c2ea7abfe9f8341d2dfcb7b7eebdd | [
"BSD-3-Clause"
] | null | null | null | tests/test_path.py | Infinidat/infi.gevent-utils | 7aef923fb19c2ea7abfe9f8341d2dfcb7b7eebdd | [
"BSD-3-Clause"
] | null | null | null | tests/test_path.py | Infinidat/infi.gevent-utils | 7aef923fb19c2ea7abfe9f8341d2dfcb7b7eebdd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from infi.gevent_utils.os import path
import sys
import os
sys.path.append(os.path.dirname(__file__))
from utils import GreenletCalledValidatorTestCase
| 33 | 100 | 0.771212 |
c5041849eb6e20166cf188e490e80a877301469d | 2,951 | py | Python | download-from-web/govori.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | download-from-web/govori.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | download-from-web/govori.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Download .mp3 podcast files of Radio Belgrade show Govori da bih te video (Speak so that I can see you)
# grab all mp3s and save them with parsed name and date to the output folder
import requests
import os
import time
import xml.dom.minidom
from urllib.parse import urlparse
url = "https://www.rts.rs/page/radio/sr/podcast/5433/govori-da-bih-te-video/audio.html"
# url results with xml that is further parsed
timestamp = time.strftime("%Y%m%d-%H%M%S")
out_dir = os.path.join("govori_" + timestamp)
doc_path = "govori_" + timestamp + ".xml"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
req = requests.get(url)
req.raise_for_status()
doc = xml.dom.minidom.parseString(req.text) # TODO check if it is valid XML
items = doc.getElementsByTagName("item")
print("found ", len(items), " items")
for item in items:
# titles = item.getElementsByTagName("title")
# if len(titles) > 0:
# print(titles[0].firstChild.data)
links = item.getElementsByTagName("link")
if len(links) > 0:
print(links[0].firstChild.data) # read element data value
# get only filename of the .html https://bit.ly/2ZnqwK7
a = urlparse(links[0].firstChild.data)
out_fname_pname = os.path.basename(a.path).replace('.html', '')
else:
out_fname_pname = "NA"
enclosures = item.getElementsByTagName("enclosure")
if len(enclosures) > 0:
url_value = enclosures[0].attributes["url"].value # read attribute value
print(url_value)
if url_value.endswith('.mp3'):
url_elements = urlparse(url_value).path.split('/')
if len(url_elements) >= 5:
out_fname_date = ''.join(url_elements[-5:-2]) # https://bit.ly/3e6mXMk
else:
out_fname_date = "NA"
out_file = out_fname_date + "_" + out_fname_pname + ".mp3"
print("saved to " + os.path.join(out_dir, out_file))
# save mp3 file from url_value to out_file
# https://dzone.com/articles/simple-examples-of-downloading-files-using-python
print("saving... ", end='')
try:
req = requests.get(url_value)
req.raise_for_status()
open(os.path.join(out_dir, out_file), 'wb').write(req.content)
print("saved to " + os.path.join(out_dir, out_file))
except requests.exceptions.HTTPError as err:
print(err)
# raise SystemExit(err)
print("")
# save rss xml
with open(os.path.join(out_dir, doc_path), "w", encoding="utf-8") as f:
f.write(doc.toprettyxml())
print(os.path.join(out_dir, doc_path))
except requests.exceptions.HTTPError as err:
print(err)
# raise SystemExit(err)
| 36.432099 | 105 | 0.597763 |
c506aceeb7ea06c9672cd06b35d80f96cd51d00c | 830 | py | Python | setup.py | uhlerlab/conditional_independence | aa4b5117b6f24bf39433d427d490312864e9bd69 | [
"BSD-3-Clause"
] | 4 | 2021-01-29T20:27:31.000Z | 2022-02-01T11:55:33.000Z | setup.py | uhlerlab/conditional_independence | aa4b5117b6f24bf39433d427d490312864e9bd69 | [
"BSD-3-Clause"
] | null | null | null | setup.py | uhlerlab/conditional_independence | aa4b5117b6f24bf39433d427d490312864e9bd69 | [
"BSD-3-Clause"
] | 1 | 2021-09-12T13:41:21.000Z | 2021-09-12T13:41:21.000Z | import setuptools
setuptools.setup(
name='conditional_independence',
version='0.1a.4',
description='Parametric and non-parametric conditional independence tests.',
long_description='',
author='Chandler Squires',
author_email='chandlersquires18@gmail.com',
packages=setuptools.find_packages(exclude=['tests']),
python_requires='>3.5.0',
zip_safe=False,
classifiers=[
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires=[
'scipy',
'dataclasses',
'numpy',
# 'scikit_sparse',
'numexpr',
'scikit_learn',
'typing',
'pygam',
'tqdm',
# 'numba',
'ipdb',
]
)
| 25.151515 | 80 | 0.6 |
c509490417a8c93598f13380d18986bf96b33fd7 | 200 | py | Python | feedbacks/urls.py | mpyatishev/djfeedback | fc1ebf0646d4449371ed80560db7cbb3f7996156 | [
"MIT"
] | null | null | null | feedbacks/urls.py | mpyatishev/djfeedback | fc1ebf0646d4449371ed80560db7cbb3f7996156 | [
"MIT"
] | null | null | null | feedbacks/urls.py | mpyatishev/djfeedback | fc1ebf0646d4449371ed80560db7cbb3f7996156 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'feedback$', views.FeedbackView.as_view(), name='feedback-post')
)
| 15.384615 | 73 | 0.655 |
c5095d645afe3699b5e0ecd4c38a1042890d0c0e | 6,466 | py | Python | pygeoapi/provider/mongo.py | paul121/pygeoapi | 21c4d36a408f510ac83ff6c1d56932338ddb6d6e | [
"MIT"
] | null | null | null | pygeoapi/provider/mongo.py | paul121/pygeoapi | 21c4d36a408f510ac83ff6c1d56932338ddb6d6e | [
"MIT"
] | null | null | null | pygeoapi/provider/mongo.py | paul121/pygeoapi | 21c4d36a408f510ac83ff6c1d56932338ddb6d6e | [
"MIT"
] | 1 | 2021-10-02T14:04:20.000Z | 2021-10-02T14:04:20.000Z | # =================================================================
#
# Authors: Timo Tuunanen <timo.tuunanen@rdvelho.com>
#
# Copyright (c) 2019 Timo Tuunanen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from datetime import datetime
import logging
from bson import Code
from pymongo import MongoClient
from pymongo import GEOSPHERE
from pymongo import ASCENDING, DESCENDING
from pymongo.collection import ObjectId
from pygeoapi.provider.base import BaseProvider, ProviderItemNotFoundError
LOGGER = logging.getLogger(__name__)
| 35.333333 | 79 | 0.604547 |
c509a2151c61ed3015af0423248b9cd0ce672927 | 1,975 | py | Python | examples/ecs/server_interface.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 43 | 2018-12-19T08:39:15.000Z | 2021-07-21T02:45:43.000Z | examples/ecs/server_interface.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 11 | 2019-03-17T13:28:56.000Z | 2020-09-23T23:57:50.000Z | examples/ecs/server_interface.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 47 | 2018-12-19T05:14:25.000Z | 2022-03-19T15:28:30.000Z | # -*-coding:utf-8 -*-
from openstack import connection
# create connection
username = "xxxxxx"
password = "xxxxxx"
projectId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # tenant ID
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password)
# create server interface
# delete interface
# show interface detail
# get list of interface
if __name__ == "__main__":
server_id = "8700184b-79ff-414b-ab8e-11ed01bd3d3d"
net_id = "e2103034-dcf3-4ac3-b551-6d5dd8fadb6e"
server = create_server_interface(server_id, net_id)
get_server_interface(server.id, server_id)
server_interfaces(server_id)
delete_server_interface(server.id, server_id)
| 32.377049 | 75 | 0.671392 |
c50a6cdccc88ffe721b0e07a35e407563cda966e | 9,060 | py | Python | sdk/python/pulumi_google_native/dlp/v2/stored_info_type.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/dlp/v2/stored_info_type.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/dlp/v2/stored_info_type.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StoredInfoTypeArgs', 'StoredInfoType']
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StoredInfoTypeArgs.__new__(StoredInfoTypeArgs)
if config is None and not opts.urn:
raise TypeError("Missing required property 'config'")
__props__.__dict__["config"] = config
__props__.__dict__["location"] = location
__props__.__dict__["project"] = project
__props__.__dict__["stored_info_type_id"] = stored_info_type_id
__props__.__dict__["current_version"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pending_versions"] = None
super(StoredInfoType, __self__).__init__(
'google-native:dlp/v2:StoredInfoType',
resource_name,
__props__,
opts)
| 45.757576 | 294 | 0.663135 |
c50a6f36d8c6b2d26bcac12eab8fe5a236ca18f3 | 7,795 | py | Python | python/GafferSceneUI/SceneHistoryUI.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneUI/SceneHistoryUI.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneUI/SceneHistoryUI.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Internal implementation
##########################################################################
__editSourceKeyPress = GafferUI.KeyEvent( "E", GafferUI.KeyEvent.Modifiers.Alt )
__editTweaksKeyPress = GafferUI.KeyEvent(
"E",
GafferUI.KeyEvent.Modifiers(
GafferUI.KeyEvent.Modifiers.Alt | GafferUI.KeyEvent.Modifiers.Shift
)
)
| 31.946721 | 109 | 0.710969 |
c50ac3b029d23e93f95a2998c1cb8c9b33f3b8ee | 294 | py | Python | core/middleware/scheduler.py | jiangxuewen16/hq-crawler | f03ec1e454513307e335943f224f4d927eaf2bbf | [
"MIT"
] | 1 | 2021-02-25T08:33:40.000Z | 2021-02-25T08:33:40.000Z | core/middleware/scheduler.py | jiangxuewen16/hq-crawler | f03ec1e454513307e335943f224f4d927eaf2bbf | [
"MIT"
] | null | null | null | core/middleware/scheduler.py | jiangxuewen16/hq-crawler | f03ec1e454513307e335943f224f4d927eaf2bbf | [
"MIT"
] | 2 | 2021-03-08T07:25:16.000Z | 2021-12-07T15:28:02.000Z | from django.utils.deprecation import MiddlewareMixin
from django.utils.autoreload import logger
| 21 | 52 | 0.731293 |
c50d8c67882d7ef410bf79b36de881a95ed1d06e | 631 | py | Python | python/cw/letterfreq2.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 7 | 2016-01-03T19:42:07.000Z | 2018-10-23T14:03:12.000Z | python/cw/letterfreq2.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | null | null | null | python/cw/letterfreq2.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 1 | 2018-03-09T08:52:01.000Z | 2018-03-09T08:52:01.000Z | #!/usr/bin/env python
from __future__ import division
import sys
from string import ascii_lowercase
with open(sys.argv[1]) as f:
data = f.read().splitlines()
d = {}
for line in data:
for letter in line:
letter = letter.lower()
if letter not in ascii_lowercase+' ':
continue
if letter not in d:
d[letter] = 1
else:
d[letter] += 1
total = 0
for k,v in d.iteritems():
total += v
for k,v in d.iteritems():
d[k] = float('{:.2%}'.format(v/total)[:-1])
for k,v in sorted(d.items(), key=lambda(k,v): (-v, k)):
print "'{}' {}%".format(k, str(v))
| 21.033333 | 55 | 0.557845 |
c50e54be42b3e46e041c3408bc115beca68acf17 | 946 | py | Python | function/python/brightics/function/extraction/test/label_encoder_test.py | GSByeon/studio | 782cf484541c6d68e1451ff6a0d3b5dc80172664 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/extraction/test/label_encoder_test.py | GSByeon/studio | 782cf484541c6d68e1451ff6a0d3b5dc80172664 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/extraction/test/label_encoder_test.py | GSByeon/studio | 782cf484541c6d68e1451ff6a0d3b5dc80172664 | [
"Apache-2.0"
] | 1 | 2020-11-19T06:44:15.000Z | 2020-11-19T06:44:15.000Z | import unittest
from brightics.function.extraction.encoder import label_encoder, \
label_encoder_model
from brightics.common.datasets import load_iris
import random
| 30.516129 | 101 | 0.684989 |
c510311a203699e5c3e0a6d1d76232cf2598509a | 4,629 | py | Python | data/query-with-params/parameter_supported_query_results.py | samelamin/setup | 73f7807ad1bd37bfc7e7021c8c71f9ef34c8b9b4 | [
"BSD-2-Clause"
] | 7 | 2021-08-20T22:48:39.000Z | 2022-01-29T04:07:43.000Z | redash_parameter_supported_query_results_query_runner/parameter_supported_query_results.py | ariarijp/redash-parameter-supported-query-results-query-runner | 09e688c2be91354a8be76051b9a8e27c4cde5e4c | [
"BSD-2-Clause"
] | null | null | null | redash_parameter_supported_query_results_query_runner/parameter_supported_query_results.py | ariarijp/redash-parameter-supported-query-results-query-runner | 09e688c2be91354a8be76051b9a8e27c4cde5e4c | [
"BSD-2-Clause"
] | null | null | null | import hashlib
import json
import logging
import re
import sqlite3
from typing import List, Optional, Tuple
import pystache
from redash.models import Query, User
from redash.query_runner import TYPE_STRING, guess_type, register
from redash.query_runner.query_results import Results, _load_query, create_table
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
def _extract_child_queries(query: str) -> List[ChildQuery]:
tokens_list = _collect_tokens(query)
child_queries = []
for tokens in tokens_list:
child_query_token = tokens[0]
query_id = tokens[1]
params = json.loads(tokens[2]) if tokens[2] else {}
table = _tmp_table_name(query_id, child_query_token)
child_queries.append(ChildQuery(query_id, params, table, child_query_token))
return child_queries
def _collect_tokens(query: str) -> list:
pattern = re.compile(r"\s(query_(\d+)(?:\(\s*'({.+})'\s*\))?)", re.IGNORECASE)
matches = pattern.findall(query)
return [(m[0], int(m[1]), m[2]) for m in list(matches)]
def _tmp_table_name(query_id: int, child_query_token: str):
return f"tmp_query{query_id}_{hashlib.sha256(child_query_token.encode('utf-8')).hexdigest()}"
def _create_tables_from_child_queries(
user: User,
connection: sqlite3.Connection,
query: str,
child_queries: List[ChildQuery],
) -> str:
for i, child_query in enumerate(child_queries):
loaded_child_query = _load_query(user, child_query.query_id)
params = (
child_query.params
if child_query.params
else get_default_params(loaded_child_query)
)
_rendered_child_query = pystache.render(loaded_child_query.query_text, params)
logger.debug(
f"ResultsWithParams child_queries[{i}], query_id={child_query.query_id} : {_rendered_child_query}"
)
results, error = loaded_child_query.data_source.query_runner.run_query(
_rendered_child_query, user
)
if error:
raise ChildQueryExecutionError(
f"Failed loading results for query id {loaded_child_query.id}."
)
results = json.loads(results)
table_name = child_query.table
create_table(connection, table_name, results)
query = query.replace(child_query.token, table_name, 1)
return query
def get_default_params(query: Query) -> dict:
return {p["name"]: p["value"] for p in query.options.get("parameters", {})}
register(ParameterSupportedResults)
| 29.864516 | 110 | 0.62022 |
c5116b08a2ee4021d6233bcbecfc48a6ba698572 | 1,111 | py | Python | notes/migrations/0005_auto_20160130_0015.py | nicbou/markdown-notes | a5d398b032b7a837909b684bb3121c7b68f49e7b | [
"CC0-1.0"
] | 121 | 2015-04-11T20:59:48.000Z | 2021-05-12T02:15:36.000Z | notes/migrations/0005_auto_20160130_0015.py | nicbou/markdown-notes | a5d398b032b7a837909b684bb3121c7b68f49e7b | [
"CC0-1.0"
] | 56 | 2015-08-10T08:16:35.000Z | 2022-03-11T23:12:33.000Z | notes/migrations/0005_auto_20160130_0015.py | nicbou/markdown-notes | a5d398b032b7a837909b684bb3121c7b68f49e7b | [
"CC0-1.0"
] | 32 | 2015-08-11T02:50:44.000Z | 2021-09-02T10:15:00.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
| 30.861111 | 126 | 0.591359 |
c5118009a2cf132e4b87f2f696c2abdd36248815 | 5,479 | py | Python | Coursework_02/Q3/airport/scenarios.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | Coursework_02/Q3/airport/scenarios.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | Coursework_02/Q3/airport/scenarios.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | '''
Created on 25 Jan 2022
@author: ucacsjj
'''
from .airport_map import MapCellType
from .airport_map import AirportMap
# This file contains a set of functions which build different maps. Only
# two of these are needed for the coursework. Others are ones which were
# used for developing and testing the algorithms and might be of use.
# Helper function which fills sets the type of all cells in a rectangular
# region to have the same type.
# This scenario can be used to test the different traversability costs
| 32.613095 | 94 | 0.686804 |
c511d2974df6ea839e2f08eec91ae6a38dd211bf | 332 | py | Python | setup.py | abkfenris/adm_locations | 266915ab7e7559bd4c66d4090bcd69a2a93ab563 | [
"MIT"
] | null | null | null | setup.py | abkfenris/adm_locations | 266915ab7e7559bd4c66d4090bcd69a2a93ab563 | [
"MIT"
] | null | null | null | setup.py | abkfenris/adm_locations | 266915ab7e7559bd4c66d4090bcd69a2a93ab563 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='csv_locate',
version='0.1',
py_modules=['csv_to_json'],
install_requires=[
'click',
'colorama',
'geocoder',
'geojson',
'jinja2',
],
entry_points='''
[console_scripts]
csv_locate=csv_to_json:convert
''',
)
| 17.473684 | 38 | 0.539157 |
c511e84604115905acf9bda9b841b2099fbb06a7 | 1,600 | py | Python | app.py | LuizGGoncalves/Corona-Graphics-Python | e2374abcff6e67f226a3e8f3d5a3dec5b4f2a5a8 | [
"MIT"
] | null | null | null | app.py | LuizGGoncalves/Corona-Graphics-Python | e2374abcff6e67f226a3e8f3d5a3dec5b4f2a5a8 | [
"MIT"
] | null | null | null | app.py | LuizGGoncalves/Corona-Graphics-Python | e2374abcff6e67f226a3e8f3d5a3dec5b4f2a5a8 | [
"MIT"
] | null | null | null | from flask import Flask
from Config import app_config, app_active
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
import Forms
import LDB
import gGraficos
config = app_config[app_active]
| 34.782609 | 134 | 0.59 |
c5137e8834c331e474c1e9e3483b2dcf01683c8b | 4,852 | py | Python | kitti_meters/frustum.py | HaochengWan/PVT | 95818d303ee63084f044a057344b2049d1fa4492 | [
"MIT"
] | 27 | 2021-12-14T02:10:37.000Z | 2022-03-31T09:54:09.000Z | kitti_meters/frustum.py | HaochengWan/PVT | 95818d303ee63084f044a057344b2049d1fa4492 | [
"MIT"
] | 3 | 2022-02-20T09:42:01.000Z | 2022-03-21T07:32:46.000Z | kitti_meters/frustum.py | HaochengWan/PVT | 95818d303ee63084f044a057344b2049d1fa4492 | [
"MIT"
] | 2 | 2021-12-30T05:43:41.000Z | 2022-02-15T13:47:21.000Z | import numpy as np
import torch
from modules.frustum import get_box_corners_3d
from kitti_meters.util import get_box_iou_3d
__all__ = ['MeterFrustumKitti']
| 54.516854 | 117 | 0.650041 |
c5160250b4498c1f1e7cd89943e80a080c1c9214 | 689 | py | Python | Base/__init__.py | jasrub/panorama-worker | 35083d4e46b7c15e33ef352562bd7889634dcebc | [
"MIT"
] | 2 | 2017-05-30T13:38:44.000Z | 2020-06-08T08:27:32.000Z | Base/__init__.py | jasrub/panorama-worker | 35083d4e46b7c15e33ef352562bd7889634dcebc | [
"MIT"
] | null | null | null | Base/__init__.py | jasrub/panorama-worker | 35083d4e46b7c15e33ef352562bd7889634dcebc | [
"MIT"
] | null | null | null | import os
import json
import ConfigParser
import logging.config
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load the shared settings file
settings_file_path = os.path.join(base_dir, 'config', 'settings.config')
settings = ConfigParser.ConfigParser()
settings.read(settings_file_path)
# set up logging
with open(os.path.join(base_dir, 'config', 'logging.json'), 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
log = logging.getLogger(__name__)
log.info("---------------------------------------------------------------------------")
requests_logger = logging.getLogger('requests')
requests_logger.setLevel(logging.INFO) | 32.809524 | 87 | 0.69521 |
c5160d08bb96e67a6e7e528e268fa0a4a2d6dfb2 | 2,462 | py | Python | src/main.py | Grant-Steinfeld/python-ubi-openshift | 82fb2d4a4093a5b67c68a3443da23532f59a230c | [
"Apache-2.0"
] | 7 | 2020-04-21T21:23:39.000Z | 2022-02-16T11:09:58.000Z | src/main.py | Grant-Steinfeld/python-ubi-openshift | 82fb2d4a4093a5b67c68a3443da23532f59a230c | [
"Apache-2.0"
] | 3 | 2020-02-18T21:57:04.000Z | 2020-03-26T20:37:22.000Z | src/main.py | Grant-Steinfeld/python-ubi-openshift | 82fb2d4a4093a5b67c68a3443da23532f59a230c | [
"Apache-2.0"
] | 13 | 2020-04-27T19:56:43.000Z | 2022-03-31T03:53:22.000Z | from flask import Flask
from flask_restplus import Api, Resource, fields
from services.serviceHandler import convertCurrency, getCurrencyExchangeRates
from services.countryCurrencyCodeHandler import (
getCountryAndCurrencyCode,
getCurrencyNameAndCode,
)
app = Flask(__name__)
api = Api(
app,
version="1.0.0",
title="Bee Travels Currency Data Service",
description="This is a microservice that handles currency exchange rate data for Bee Travels",
)
currencyNS = api.namespace(
"Currency",
description="Operations associated with currency exchange rate conversions",
)
currencyNameOrCurrencyCode = api.model(
"currencyNameOrCurrencyCode",
{
"currencyCode": fields.String(
required=False, description="3 letter currency code"
),
"country": fields.String(required=False, description="country name"),
},
)
# /currency/{currencyFromAmount}/{currencyFromCode}/{currencyToCode}
# /currency/10/EUR/USD
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=7878)
| 31.974026 | 98 | 0.718522 |
c517a4c10d04e5d45c2f649fb106b2a711638d2d | 6,344 | py | Python | orchestration/run/BrokerActions.py | pjk25/RabbitTestTool | c0b9e820f079d14d516185f2790371380e190d6c | [
"MIT"
] | null | null | null | orchestration/run/BrokerActions.py | pjk25/RabbitTestTool | c0b9e820f079d14d516185f2790371380e190d6c | [
"MIT"
] | null | null | null | orchestration/run/BrokerActions.py | pjk25/RabbitTestTool | c0b9e820f079d14d516185f2790371380e190d6c | [
"MIT"
] | null | null | null | import sys
import io
import subprocess
import threading
import time
import uuid
import os.path
import requests
import json
from random import randint
from UniqueConfiguration import UniqueConfiguration
from CommonConfiguration import CommonConfiguration
from printer import console_out | 42.577181 | 144 | 0.589061 |
c5198d8481c8a0970f981fde506e8ae0b90aab1f | 1,763 | py | Python | bin/wls_users.py | rstyczynski/wls-tools | 292a39a3f7af7b9d7d4c4849618d6789daae9b58 | [
"Apache-2.0"
] | null | null | null | bin/wls_users.py | rstyczynski/wls-tools | 292a39a3f7af7b9d7d4c4849618d6789daae9b58 | [
"Apache-2.0"
] | null | null | null | bin/wls_users.py | rstyczynski/wls-tools | 292a39a3f7af7b9d7d4c4849618d6789daae9b58 | [
"Apache-2.0"
] | null | null | null | #!$BEA_HOME/oracle_common/common/bin/wlst.sh
# default values
admin_name = 'AdminServer'
admin_address = 'localhost'
admin_port = 7001
admin_protocol = 't3'
admin_url = admin_protocol + "://" + admin_address + ":" + str(admin_port)
try:
opts, args = getopt.getopt( sys.argv[1:], 's:p:u::d:h', ['server=','port=','url=','delimiter='] )
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('--help'):
usage()
sys.exit(2)
elif opt in ('-s', '--server'):
admin_name = arg
elif opt in ('-p', '--port'):
admin_port = arg
admin_url = admin_protocol + "://" + admin_address + ":" + str(admin_port)
elif opt in ('-u', '--url'):
admin_url = arg
elif opt in ('-d', '--delimiter'):
delimiter = arg
else:
usage()
sys.exit(2)
connect(url=admin_url, adminServerName=admin_name)
# do work
from weblogic.management.security.authentication import UserReaderMBean
from weblogic.management.security.authentication import GroupReaderMBean
realmName=cmo.getSecurityConfiguration().getDefaultRealm()
authProvider = realmName.getAuthenticationProviders()
print 'admin_url,group,user'
for i in authProvider:
if isinstance(i,GroupReaderMBean):
groupReader = i
cursor = i.listGroups("*",0)
while groupReader.haveCurrent(cursor):
group = groupReader.getCurrentName(cursor)
usergroup = i.listAllUsersInGroup(group,"*",0)
for user in usergroup:
print '%s,%s,%s' % (admin_url,group,user)
groupReader.advance(cursor)
groupReader.close(cursor)
#
disconnect()
exit() | 27.546875 | 101 | 0.642087 |
c51b416cfe1486d20ea86dd385bdcfa1be5f1bbe | 1,082 | py | Python | scripts/drop_low_coverage.py | godzilla-but-nicer/SporeLoss | 8159a628e5f17191254583c053891070ba3d6e7f | [
"MIT"
] | null | null | null | scripts/drop_low_coverage.py | godzilla-but-nicer/SporeLoss | 8159a628e5f17191254583c053891070ba3d6e7f | [
"MIT"
] | null | null | null | scripts/drop_low_coverage.py | godzilla-but-nicer/SporeLoss | 8159a628e5f17191254583c053891070ba3d6e7f | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from Bio import AlignIO, Seq
# parameter to determine the maximum missing proportion that we keep
missing_thresh = 0.4
# load the alignments and turn them into a numpy array
alignments = AlignIO.read(snakemake.input[0], 'fasta')
align_arr = np.array([list(rec) for rec in alignments])
# get a list of missing values per base
missing_bases = []
# iterate over the whole alignment counting missing bases
for base in range(align_arr.shape[1]):
missing = 0
for seq in range(align_arr.shape[0]):
if alignments[seq, base] not in ['A', 'T', 'G', 'C']:
missing += 1
missing_bases.append(missing)
# calculate the proportion of missing bases for each column
missing_prop = np.array([m / align_arr.shape[0] for m in missing_bases])
align_arr = align_arr[:, missing_prop < missing_thresh]
for r, rec in enumerate(alignments):
joined_seq = ''.join(align_arr[r])
print(joined_seq[:10])
rec.seq = Seq.Seq(joined_seq)
with open(snakemake.output[0], 'w') as fout:
AlignIO.write(alignments, fout, 'fasta')
| 31.823529 | 72 | 0.711645 |
c51b5610a93a01c7edaae445a44f41f8aa36b738 | 626 | py | Python | get_proc_users.py | dangtrinhnt/gem | bc53cf19d3541542e4c14c24b5fb186432e91c45 | [
"Apache-2.0"
] | null | null | null | get_proc_users.py | dangtrinhnt/gem | bc53cf19d3541542e4c14c24b5fb186432e91c45 | [
"Apache-2.0"
] | 44 | 2019-11-18T20:15:35.000Z | 2021-07-27T20:26:38.000Z | get_proc_users.py | dangtrinhnt/gem | bc53cf19d3541542e4c14c24b5fb186432e91c45 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import sys
from commons import *
if __name__ == "__main__":
csv_path = sys.argv[1]
if sys.argv[2] == 'all':
condition_number = [-1]
else:
condition_number = map(int, sys.argv[2].split(','))
print_proc_users(csv_path, condition_number)
| 24.076923 | 64 | 0.699681 |