hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
469bf8d9da5f7e85ca99597046844afc1b477cd9 | 213 | py | Python | code/django18/django18/newsletter/forms.py | dvl/celerytalk | 312de04ea24bb073357684a3a35cfd782b2b7aae | [
"MIT"
] | null | null | null | code/django18/django18/newsletter/forms.py | dvl/celerytalk | 312de04ea24bb073357684a3a35cfd782b2b7aae | [
"MIT"
] | null | null | null | code/django18/django18/newsletter/forms.py | dvl/celerytalk | 312de04ea24bb073357684a3a35cfd782b2b7aae | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
| 19.363636 | 53 | 0.737089 |
469d18528989ab40a67eb477eeda37c2533ddfd8 | 5,448 | py | Python | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
# Documentation of the MVA
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2
# https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Fall17NoIsoV1"
# There are 6 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit
# 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit
# 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit
# 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit
mvaFall17WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz"
)
## The working point for this MVA that is expected to have about 90% signal
# WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
# The working point for the low pt categories is just taken over from the high pt
idName90 = "mvaEleID-Fall17-noIso-V1-wp90"
MVA_WP90 = EleMVA_WP(
idName = idName90, mvaTag = mvaTag,
cutCategory0 = "0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109", # EB1 low pt
cutCategory1 = "0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494", # EB2 low pt
cutCategory2 = "-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506", # EE low pt
cutCategory3 = "0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206", # EB1
cutCategory4 = "0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787", # EB2
cutCategory5 = "0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547", # EE
)
idName80 = "mvaEleID-Fall17-noIso-V1-wp80"
MVA_WP80 = EleMVA_WP(
idName = idName80, mvaTag = mvaTag,
cutCategory0 = "0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271", # EB1 low pt
cutCategory1 = "0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946", # EB2 low pt
cutCategory2 = "0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265", # EE low pt
cutCategory3 = "0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097", # EB1
cutCategory4 = "0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688", # EB2
cutCategory5 = "0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942", # EE
)
### WP tuned for HZZ analysis with very high efficiency (about 98%)
# The working points were found by requiring the same signal efficiencies in
# each category as for the Spring 16 HZZ ID
# (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py)
idNamewpLoose = "mvaEleID-Fall17-noIso-V1-wpLoose"
MVA_WPLoose = EleMVA_WP(
idName = idNamewpLoose, mvaTag = mvaTag,
cutCategory0 = "-0.13285867293779202", # EB1 low pt
cutCategory1 = "-0.31765300958836074", # EB2 low pt
cutCategory2 = "-0.0799205914718861" , # EE low pt
cutCategory3 = "-0.856871961305474" , # EB1
cutCategory4 = "-0.8107642141584835" , # EB2
cutCategory5 = "-0.7179265933023059" # EE
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Fall17_noIso_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(6),
categoryCuts = cms.vstring(*EleMVA_6CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaFall17WeightFiles_V1,
variableDefinition = cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt")
)
# Create the VPset's for VID cuts
mvaEleID_Fall17_V1_wpLoose = configureVIDMVAEleID( MVA_WPLoose )
mvaEleID_Fall17_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Fall17_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
mvaEleID_Fall17_V1_wpLoose.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp80.isPOGApproved = cms.untracked.bool(True)
| 54.48 | 124 | 0.727423 |
469d299beef21a4f12403e1476091e6f816d16ea | 1,676 | py | Python | dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | import numpy as np
import gym
from utils import *
from agent import *
from config import *
if __name__ == '__main__':
env = gym.make(RAM_ENV_NAME)
agent = Agent(env.observation_space.shape[0], env.action_space.n, BATCH_SIZE, LEARNING_RATE, TAU, GAMMA, DEVICE, False, DUEL, DOUBLE, PRIORITIZED)
rewards_log, _ = train(env, agent, RAM_NUM_EPISODE, EPS_INIT, EPS_DECAY, EPS_MIN, MAX_T)
np.save('{}_rewards.npy'.format(RAM_ENV_NAME), rewards_log)
agent.Q_local.to('cpu')
torch.save(agent.Q_local.state_dict(), '{}_weights.pth'.format(RAM_ENV_NAME)) | 33.52 | 150 | 0.613365 |
469d44404e5e5089163e7fb2cbe8fd08587f00ec | 4,274 | py | Python | tools/parallel_launcher/parallel_launcher.py | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
] | 2 | 2017-09-02T19:08:28.000Z | 2021-11-15T15:15:14.000Z | tools/parallel_launcher/parallel_launcher.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | null | null | null | tools/parallel_launcher/parallel_launcher.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | 1 | 2020-11-04T07:22:28.000Z | 2020-11-04T07:22:28.000Z | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool launches several shards of a gtest-based binary
in parallel on a local machine.
Example usage:
parallel_launcher.py path/to/base_unittests
"""
import optparse
import os
import subprocess
import sys
import threading
import time
def StreamCopyWindows(stream_from, stream_to):
"""Copies stream_from to stream_to."""
while True:
buf = stream_from.read(1024)
if not buf:
break
stream_to.write(buf)
stream_to.flush()
def StreamCopyPosix(stream_from, stream_to, child_exited):
"""
Copies stream_from to stream_to, and exits if child_exited
is signaled.
"""
import fcntl
# Put the source stream in a non-blocking mode, so we can check
# child_exited when there is no data.
fd = stream_from.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
buf = os.read(fd, 1024)
except OSError, e:
if e.errno == 11:
if child_exited.isSet():
break
time.sleep(0.1)
continue
raise
if not buf:
break
stream_to.write(buf)
stream_to.flush()
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 27.574194 | 78 | 0.656996 |
469e17c0eac34c546af54d506129856bf3802b70 | 83 | py | Python | 05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | a = int(input())
for i in range(a):
print('* '*(a-a//2))
print(' *'*(a//2)) | 20.75 | 24 | 0.433735 |
469f233747542475f293cc21f8824a73074353c6 | 7,176 | py | Python | ginga/util/dp.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76 | 2015-01-05T14:46:14.000Z | 2022-03-23T04:10:54.000Z | ginga/util/dp.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 858 | 2015-01-17T01:55:12.000Z | 2022-03-08T20:20:31.000Z | ginga/util/dp.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 60 | 2015-01-14T21:59:07.000Z | 2022-02-13T03:38:49.000Z | #
# dp.py -- Data pipeline and reduction routines
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
from collections import OrderedDict
from ginga import AstroImage, colors
from ginga.RGBImage import RGBImage
from ginga.util import wcs
# counter used to name anonymous images
prefixes = dict(dp=0)
# https://gist.github.com/stscieisenhamer/25bf6287c2c724cb9cc7
def masktorgb(mask, color='lightgreen', alpha=1.0):
"""Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
"""
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError('ndim={0} is not supported'.format(mask.ndim))
ht, wd = mask.shape
r, g, b = colors.lookup_color(color)
rgbobj = RGBImage(data_np=np.zeros((ht, wd, 4), dtype=np.uint8))
rc = rgbobj.get_slice('R')
gc = rgbobj.get_slice('G')
bc = rgbobj.get_slice('B')
ac = rgbobj.get_slice('A')
ac[:] = 0 # Transparent background
rc[mask] = int(r * 255)
gc[mask] = int(g * 255)
bc[mask] = int(b * 255)
ac[mask] = int(alpha * 255)
# For debugging
#rgbobj.save_as_file('ztmp_rgbobj.png')
return rgbobj
# END
| 27.181818 | 72 | 0.593924 |
469f5e5ed924f088a814fc16a98e14d55994cdf9 | 3,294 | py | Python | jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 | [
"MIT"
] | null | null | null | jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 | [
"MIT"
] | null | null | null | jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 | [
"MIT"
] | null | null | null | from torch.utils.data import Dataset
from ..utils.optimal_lenght import find_optimal_lenght
| 31.673077 | 70 | 0.532483 |
46a1c447600050372f1c46ddc6ed6f7e8c87b183 | 117 | py | Python | app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | d8b2c7312304df627369721e8e1821cf724431d7 | [
"MIT"
] | null | null | null | app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | d8b2c7312304df627369721e8e1821cf724431d7 | [
"MIT"
] | null | null | null | app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | d8b2c7312304df627369721e8e1821cf724431d7 | [
"MIT"
] | null | null | null | blacklist=set()
| 14.625 | 29 | 0.735043 |
46a2b9041cbdb5a67a2d23664c83d5328cdf8c09 | 260 | py | Python | apps/tasks/api/views.py | dayvidemerson/django-rest-example | 85eabb1e154cfd8ebc0019080b37cd3f1302c206 | [
"MIT"
] | null | null | null | apps/tasks/api/views.py | dayvidemerson/django-rest-example | 85eabb1e154cfd8ebc0019080b37cd3f1302c206 | [
"MIT"
] | null | null | null | apps/tasks/api/views.py | dayvidemerson/django-rest-example | 85eabb1e154cfd8ebc0019080b37cd3f1302c206 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from rest_framework import generics
from ..models import Task
from .serializers import TaskSerializer
| 21.666667 | 41 | 0.796154 |
46a39565f7364f7f1fb4b269da3328cdcc2b0021 | 97 | py | Python | jarvis/__init__.py | jduncan8142/JARVIS | 387003bc00cea2ca74d7094a92e55eab593a968a | [
"MIT"
] | null | null | null | jarvis/__init__.py | jduncan8142/JARVIS | 387003bc00cea2ca74d7094a92e55eab593a968a | [
"MIT"
] | null | null | null | jarvis/__init__.py | jduncan8142/JARVIS | 387003bc00cea2ca74d7094a92e55eab593a968a | [
"MIT"
] | null | null | null | __version__ = "0.0.3"
__author__ = "Jason Duncan"
__support__ = "jason.matthew.duncan@gmail.com"
| 24.25 | 46 | 0.742268 |
46a4d57f3c07a504c88eba6e3644cc933118a8c3 | 1,798 | py | Python | src/figcli/test/cli/action.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 36 | 2020-07-21T21:22:02.000Z | 2021-10-20T06:55:47.000Z | src/figcli/test/cli/action.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 2 | 2020-10-29T12:49:15.000Z | 2021-04-29T01:12:05.000Z | src/figcli/test/cli/action.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | null | null | null | from typing import Union, List
import pexpect
from figcli.utils.utils import Utils
import sys
| 34.576923 | 119 | 0.60178 |
46a4f53ed5b4a611b18a262f155eca68d71783fb | 7,831 | py | Python | test/python/test_elementwise_ops.py | avijit-chakroborty/ngraph-bridge | b691d57412a40582ea93c6e564d80c750b7f2e8e | [
"Apache-2.0"
] | 142 | 2019-02-21T00:53:06.000Z | 2022-03-11T07:46:28.000Z | test/python/test_elementwise_ops.py | tensorflow/ngraph | ea6422491ec75504e78a63db029e7f74ec3479a5 | [
"Apache-2.0"
] | 252 | 2019-03-11T19:27:59.000Z | 2021-03-19T10:58:17.000Z | test/python/test_elementwise_ops.py | tensorflow/ngraph | ea6422491ec75504e78a63db029e7f74ec3479a5 | [
"Apache-2.0"
] | 65 | 2019-03-13T15:27:29.000Z | 2021-07-16T07:09:16.000Z | # ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge elementwise operations test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
| 47.75 | 80 | 0.477461 |
46a58d19b627254c0cc57fea12f1310b8d2e7c37 | 7,453 | py | Python | qc/slips.py | mfkiwl/UREGA-qc | 989e6b59d4fa5259ce48daa6165bdab4e020ba49 | [
"MIT"
] | null | null | null | qc/slips.py | mfkiwl/UREGA-qc | 989e6b59d4fa5259ce48daa6165bdab4e020ba49 | [
"MIT"
] | null | null | null | qc/slips.py | mfkiwl/UREGA-qc | 989e6b59d4fa5259ce48daa6165bdab4e020ba49 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Lars Stenseng.
@mail: lars@stenseng.net
"""
# from qc.__version__ import __version__
import georinex as gr
import numpy as np
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
obs = gr.load(
'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx',
# tlim=['2021-11-03T12:00', '2021-11-03T12:30'])
tlim=['2021-11-03T05:30', '2021-11-03T07:30'])
# tlim=['2021-11-03T15:00', '2021-11-03T18:00'])
# hdr = gr.rinexheader(
# 'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx')
# rnx_version = 3
# %% Starting test
# Copying helper functions from Multipath class - later on, it could be turned
# into a separate class with helper functions
# Pick GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# %%
# 5:30 to 7:30, G08 and G21 give 2 cycle slips # [290:300]
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
plt.title('Single-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %%
# Plot all loaded sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
for i in range(0, len(svG)):
test = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label=svG[i], linewidth=2.0)
#ax.plot(L2test.time, L2test, label='L2', linewidth=0.5)
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %%
# Plot separate sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
test = obs.sel(sv='E21').dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label='L1', linewidth=2.0)
ax.plot(L2test.time, L2test, label='L2', linewidth=1.0)
ax.grid()
# ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %% Dual-frequency Melbourne-Wuebbena testing
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
freq = [1575.42, 1227.60, 1176.45] # L1, L2, L5 for GPS
f1 = freq[0]*1e6
f2 = freq[1]*1e6
P1 = sattest['C1C']
P2 = sattest['C2W']
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L6 = (1/(f1-f2))*(f1*L1 - f2*L2) - (1/(f1+f2))*(f1*P1 + f2*P2)
sigma_L6 = np.std(L6)
k = 4 # criterion factor
criterion = k*sigma_L6
slips_nr = 0
L6_diff = []
for i in range(1, len(L6)):
L6_diff.append(np.abs(L6[i] - L6[i-1]))
if (np.abs(L6[i] - L6[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L6_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L6')
plt.title('Dual-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %% Work in Progress
# %% Testing first algorithm
sliptest = Slips().slips_MW_single_freq(obs)
# %% Testing plot function
sliptest = Slips().plot_slips(obs, 'G08')
| 25.611684 | 79 | 0.570643 |
46a68217514e2d9ae5f9e06cbba236282798ed2c | 8,610 | py | Python | tests/test_utils.py | jga/goldfinchsong | 638e166948944a7f027d03bcf8f7c14dc2f4b6f2 | [
"MIT"
] | null | null | null | tests/test_utils.py | jga/goldfinchsong | 638e166948944a7f027d03bcf8f7c14dc2f4b6f2 | [
"MIT"
] | null | null | null | tests/test_utils.py | jga/goldfinchsong | 638e166948944a7f027d03bcf8f7c14dc2f4b6f2 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from datetime import datetime, timezone
import unittest
from os.path import join
from tinydb import TinyDB, storages
from goldfinchsong import utils
IMAGE_NAMES = ['goldfinch1.jpg', 'goldfinch2.jpg', 'goldfinch3.jpg',
'goldfinch4.jpg', 'goldfinch5.jpg']
TEST_TEXT1 = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
TEST_TEXT2 = 'This is a test of the goldfinchsong project. Tests ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
| 40.046512 | 121 | 0.616609 |
46a6977e7b919a1d64a9944a2e191bffb62c293c | 2,868 | py | Python | lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | [
"Apache-2.0"
] | 2,611 | 2018-10-16T20:14:10.000Z | 2022-03-31T14:48:41.000Z | lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | [
"Apache-2.0"
] | 249 | 2018-10-27T06:02:29.000Z | 2022-03-30T18:00:39.000Z | lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | [
"Apache-2.0"
] | 436 | 2018-10-25T05:31:45.000Z | 2022-03-31T07:26:03.000Z | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input generator for image data."""
import os
import lingvo.compat as tf
from lingvo.core import base_input_generator
from tensorflow.python.ops import io_ops
def _GetRandomImages(batch_size):
images = tf.random.uniform((batch_size, 28, 28, 1), 0, 255, tf.int32)
return tf.cast(images, tf.uint8)
def _GetRandomLabels(batch_size):
labels = tf.random.categorical(0.1 * tf.ones((1, 10)), batch_size)
return tf.cast(labels, tf.uint8)
def FakeMnistData(tmpdir, train_size=60000, test_size=10000):
"""Fake Mnist data for unit tests."""
data_path = os.path.join(tmpdir, 'ckpt')
with tf.Graph().as_default():
tf.random.set_seed(91)
with tf.Session() as sess:
sess.run(
io_ops.save_v2(
data_path,
tensor_names=['x_train', 'y_train', 'x_test', 'y_test'],
shape_and_slices=['', '', '', ''],
tensors=[
_GetRandomImages(train_size),
_GetRandomLabels(train_size),
_GetRandomImages(test_size),
_GetRandomLabels(test_size)
]))
return data_path
| 27.84466 | 80 | 0.639121 |
46a69a58e97c1c80acfb7499d4de7c5a7c1ed4bb | 875 | py | Python | src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
] | null | null | null | src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
] | null | null | null | src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
] | null | null | null | from src.base.solution import Solution
from src.tests.part1.q389_test_find_diff import FindDiffTestCases
if __name__ == '__main__':
solution = FindDiff()
solution.run_tests() | 24.305556 | 65 | 0.580571 |
46aa55bc676b909ffd23d501d1007af51f171f16 | 293 | py | Python | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
] | null | null | null | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
] | null | null | null | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
] | null | null | null | #unit
#mydict.py
| 19.533333 | 67 | 0.68942 |
46ab99e6eb8d1fdb04330410131bcc8d1d609369 | 19,623 | py | Python | copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 844029ee59867c41acfa75c8ce12db6713c9960c | [
"MIT"
] | 19 | 2019-06-06T18:23:29.000Z | 2022-01-06T15:30:20.000Z | copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 844029ee59867c41acfa75c8ce12db6713c9960c | [
"MIT"
] | 524 | 2019-07-01T19:18:39.000Z | 2022-02-13T19:33:02.000Z | copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 844029ee59867c41acfa75c8ce12db6713c9960c | [
"MIT"
] | 18 | 2019-06-06T18:23:07.000Z | 2021-07-15T06:01:17.000Z | import contextlib
import os
import tempfile
import warnings
from enum import Enum
import mip
| 42.109442 | 169 | 0.587983 |
46abd7d33dffc8675b1cbcb1f61d7140668df589 | 249 | py | Python | output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_pattern_1_xsd.nistschema_sv_iv_atomic_integer_pattern_1 import NistschemaSvIvAtomicIntegerPattern1
__all__ = [
"NistschemaSvIvAtomicIntegerPattern1",
]
| 41.5 | 190 | 0.891566 |
46ad0929668e5c287bd02c9734950a08fc91328e | 3,919 | py | Python | Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 48403f13dbcaa7981f252361819f06435a75333b | [
"MIT"
] | null | null | null | Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 48403f13dbcaa7981f252361819f06435a75333b | [
"MIT"
] | null | null | null | Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 48403f13dbcaa7981f252361819f06435a75333b | [
"MIT"
] | null | null | null | import re
import xlsxwriter
| 48.382716 | 446 | 0.63307 |
46ad5a9032022b2051fa16df11181281b5f9eae8 | 1,256 | py | Python | example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 2217f8790820d8ec7ab076c836b2d182877d8ee8 | [
"MIT"
] | 3 | 2021-01-08T09:56:46.000Z | 2021-03-02T20:47:29.000Z | example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 2217f8790820d8ec7ab076c836b2d182877d8ee8 | [
"MIT"
] | 1 | 2021-01-23T06:50:31.000Z | 2021-03-17T15:35:18.000Z | example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 2217f8790820d8ec7ab076c836b2d182877d8ee8 | [
"MIT"
] | 4 | 2021-01-06T12:10:23.000Z | 2021-03-16T22:16:07.000Z | #!/usr/bin/env python3
from sys import stderr, exit
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from parentheses_lib import recognize
# METADATA OF THIS TAL_SERVICE:
problem="parentheses"
service="check_one_sol_server"
args_list = [
('input_formula',str),
('n',str),
('silent',bool),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
n=ENV['n']
len_input = len(ENV["input_formula"])//2
if not ENV["silent"]:
TAc.print(LANG.opening_msg, "green")
if n=='free':
answer()
else:
if len_input==int(n):
answer()
elif recognize(ENV["input_formula"], TAc, LANG) and not ENV['silent']:
TAc.print(LANG.render_feedback("different_lengths", f"No! Your string represents a valid formula of parentheses but not of {n} pairs."), "red", ["bold"])
exit(0)
| 29.904762 | 162 | 0.65207 |
46ad743d904b177a6882bac14c8a5ed867753ee6 | 2,436 | py | Python | app/validation/translator.py | codingedward/book-a-meal-api | 36756abc225bf7e8306330f2c3e223dc32af7869 | [
"MIT"
] | null | null | null | app/validation/translator.py | codingedward/book-a-meal-api | 36756abc225bf7e8306330f2c3e223dc32af7869 | [
"MIT"
] | null | null | null | app/validation/translator.py | codingedward/book-a-meal-api | 36756abc225bf7e8306330f2c3e223dc32af7869 | [
"MIT"
] | 2 | 2018-10-01T17:45:19.000Z | 2020-12-07T13:48:25.000Z | """Translates validation error messages for the response"""
messages = {
'accepted': 'The :field: must be accepted.',
'after': 'The :field: must be a date after :other:.',
'alpha': 'The :field: may contain only letters.',
'alpha_dash': 'The :field: may only contain letters, numbers, and dashes.',
'alpha_num': 'The :field: may contain only letters and numbers.',
'array': 'The :field: must be an array.',
'before': 'The :field: must be a date before :other:.',
'between': 'The :field: must be between :least: and :most:.',
'between_string': 'The :field: must be between :least: and :most: characters.',
'between_numeric': 'The :field: must be between :least: and :most:.',
'boolean': 'The :field: must be either true or false.',
'confirmed': 'The :field: confirmation does not match.',
'date': 'The :field: is not a valid date.',
'different': 'The :field: and :other: must be different.',
'digits': 'The :field: must be :length: digits.',
'email': 'The :field: must be a valid email address.',
'exists': 'The selected :field: is invalid.',
'found_in': 'The selected :field: is invalid.',
'integer': 'The :field: must be an integer.',
'json': 'The :field: must be valid json format.',
'most_string': 'The :field: must not be greater than :most: characters.',
'most_numeric': 'The :field: must not be greater than :most:.',
'least_string': 'The :field: must be at least :least: characters.',
'least_numeric': 'The :field: must be at least :least:.',
'not_in': 'The selected :field: is invalid.',
'numeric': 'The :field: must be a number.',
'positive': 'The :field: must be a positive number.',
'regex': 'The :field: format is invalid.',
'required': 'The :field: field is required.',
'required_with': 'The :field: field is required when :other: is present.',
'required_without': 'The :field: field is required when :other: si not present.',
'same': 'The :field: and :other: must match.',
'size_string': 'The :field: must be :size: characters.',
'size_numeric': 'The :field: must be :size:.',
'string': 'The :field: must be a string.',
'unique': 'The :field: is already taken.',
'url': 'The :field: format is invalid.',
}
| 47.764706 | 85 | 0.630542 |
46af16319f2d029f582d103a8745545d6de7422c | 333 | py | Python | chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 82e54eb914fb005fcdebad1ed07cede898957733 | [
"MIT"
] | 12 | 2016-11-30T15:22:22.000Z | 2018-02-27T23:03:12.000Z | chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 82e54eb914fb005fcdebad1ed07cede898957733 | [
"MIT"
] | null | null | null | chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 82e54eb914fb005fcdebad1ed07cede898957733 | [
"MIT"
] | null | null | null | from channels import Group
# websocket.connect
# websocket.receive
# websocket.disconnect | 22.2 | 48 | 0.708709 |
46af2004cde5fbac1f953e967f4311dafcb8c8e2 | 8,154 | py | Python | env/enviroment.py | Dorebom/robot_pybullet | 21e95864da28eb5553266513b1a1a735901395b6 | [
"MIT"
] | null | null | null | env/enviroment.py | Dorebom/robot_pybullet | 21e95864da28eb5553266513b1a1a735901395b6 | [
"MIT"
] | null | null | null | env/enviroment.py | Dorebom/robot_pybullet | 21e95864da28eb5553266513b1a1a735901395b6 | [
"MIT"
] | null | null | null | from copy import deepcopy
import numpy as np
import pybullet as p
import gym
from gym import spaces
from env.robot import Manipulator
from env.work import Work | 38.828571 | 101 | 0.589159 |
46b07861f72e984eb2546daa1dab51801dd00b0a | 1,820 | py | Python | Thread/Threading.py | zxg110/PythonGrammer | 7d07648c62e3d49123688c33d09fe4bb369cf852 | [
"Apache-2.0"
] | null | null | null | Thread/Threading.py | zxg110/PythonGrammer | 7d07648c62e3d49123688c33d09fe4bb369cf852 | [
"Apache-2.0"
] | null | null | null | Thread/Threading.py | zxg110/PythonGrammer | 7d07648c62e3d49123688c33d09fe4bb369cf852 | [
"Apache-2.0"
] | null | null | null | import _thread
import time
import threading
#
# def print_time(threadName,delay):
# count = 0;
# while count < 5:
# time.sleep(delay)
# count += 1;
# print("%s: %s" % (threadName, time.ctime(time.time())))
#
# try:
# _thread.start_new(print_time,("Thread-1",2,))
# _thread.start_new(print_time("Thread-2",4))
# except:
# print("error")
#
# while 1:
# pass
# Python3 _thread threading
# _thread threading
# threading _thread
# threading.currentThread():
# threading.enumerate(): list
# threading.activeCount(): len(threading.enumerate())
# ThreadThread:
# run():
# start():
# join([time]): BAB.join()A
# Btime
#
# isAlive():
# getName():
# setName():
exitFlag = 0
#
thread1 = MyThread(1, "Thread-1", 5)
thread2 = MyThread(2, "Thread-2", 5)
#
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print ("")
| 26 | 73 | 0.666484 |
46b1624f4a6a70026386fb13d4c9f4cd8b816721 | 492 | py | Python | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
] | null | null | null | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
] | null | null | null | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import generics
# Create your views here.
from petsAPI.models import Pets
from petsAPI.serializers import PetSerializer
| 24.6 | 63 | 0.792683 |
46b234a2d05a51f6fb5809df7fa1df618dfd4547 | 2,153 | py | Python | cluster_faces.py | sandhyalaxmiK/faces_clustering | e2da7e057ce5ec749e0c631f450e262f046b8e1d | [
"MIT"
] | null | null | null | cluster_faces.py | sandhyalaxmiK/faces_clustering | e2da7e057ce5ec749e0c631f450e262f046b8e1d | [
"MIT"
] | null | null | null | cluster_faces.py | sandhyalaxmiK/faces_clustering | e2da7e057ce5ec749e0c631f450e262f046b8e1d | [
"MIT"
] | null | null | null | import face_recognition
import sys,os
import re,cv2
input_dir_path=sys.argv[1]
output_dir_path=sys.argv[2]
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
if not os.path.exists(output_dir_path+'/'+str(1)):
os.mkdir(output_dir_path+'/'+str(1))
input_images=sorted_alphanumeric(os.listdir(input_dir_path))
cv2.imwrite(output_dir_path+'/'+str(1)+'/'+input_images[0],cv2.imread(input_dir_path+'/'+input_images[0]))
if not os.path.exists(output_dir_path+'/back_imgs'):
os.mkdir(output_dir_path+'/back_imgs')
if not os.path.exists(output_dir_path+'/error'):
os.mkdir(output_dir_path+'/error')
for img_path in input_images[1:]:
try:
prev_similarity=0
img=face_recognition.load_image_file(input_dir_path+'/'+img_path)
img_encoding=face_recognition.face_encodings(img)
if img_encoding==[]:
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite(output_dir_path+'/back_imgs/'+img_path,img)
continue
img_encoding=face_recognition.face_encodings(img)[0]
imgs_dirs=sorted_alphanumeric(os.listdir(output_dir_path))
imgs_dirs=list(set(imgs_dirs)-set(['error','back_imgs']))
for img_dir in imgs_dirs:
check_img=face_recognition.load_image_file(output_dir_path+'/'+img_dir+'/'+sorted_alphanumeric(os.listdir(output_dir_path+'/'+img_dir))[0])
check_img_encoding=face_recognition.face_encodings(check_img)[0]
similarity=1-face_recognition.compare_faces([img_encoding], check_img_encoding)
if similarity>prev_similarity:
prev_similarity=similarity
result_dir=img_dir
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
if prev_similarity<0.6:
new_dir=str(len(os.listdir(output_dir_path))+1)
os.mkdir(output_dir_path+'/'+new_dir)
cv2.imwrite(output_dir_path+'/'+new_dir+'/'+img_path,img)
else:
cv2.imwrite(output_dir_path+'/'+result_dir+'/'+img_path,img)
except:
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite(output_dir_path+'/error/'+img_path,img)
| 39.87037 | 142 | 0.75987 |
46b37f4db1428d7e3e970e352faebde87a24d82f | 7,329 | py | Python | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | import requests
from bbdata.config import output_api_url
from bbdata.util import handle_response
| 33.619266 | 78 | 0.592714 |
46b3fea476ee5e207c6461dc2f22693adf1376cd | 94 | py | Python | python/tako/client/__init__.py | vyomkeshj/tako | d0906df5cdc0023ee955ad34d9eb4696b5ecec5e | [
"MIT"
] | null | null | null | python/tako/client/__init__.py | vyomkeshj/tako | d0906df5cdc0023ee955ad34d9eb4696b5ecec5e | [
"MIT"
] | null | null | null | python/tako/client/__init__.py | vyomkeshj/tako | d0906df5cdc0023ee955ad34d9eb4696b5ecec5e | [
"MIT"
] | null | null | null | from .exception import TakoException, TaskFailed # noqa
from .session import connect # noqa
| 31.333333 | 56 | 0.787234 |
46b430ceffc244986e1b0a3ab9f0c59e0b7629b0 | 5,533 | py | Python | helpers/parser.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
] | null | null | null | helpers/parser.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
] | null | null | null | helpers/parser.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
] | null | null | null | import argparse
| 39.521429 | 74 | 0.325863 |
46b4aae481a7dcad8401c1fdb98aae95f3b590c6 | 2,207 | py | Python | api/patients/urls.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
] | 10 | 2018-03-14T06:17:06.000Z | 2022-03-10T05:33:34.000Z | api/patients/urls.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
] | 512 | 2018-09-10T07:37:34.000Z | 2022-03-30T02:23:43.000Z | api/patients/urls.py | D00dleman/l2 | 0870144537ee340cd8db053a608d731e186f02fb | [
"MIT"
] | 24 | 2018-07-31T05:52:12.000Z | 2022-02-08T00:39:41.000Z | from django.urls import path
from . import views
urlpatterns = [
path('search-card', views.patients_search_card),
path('search-individual', views.patients_search_individual),
path('search-l2-card', views.patients_search_l2_card),
path('create-l2-individual-from-card', views.create_l2_individual_from_card),
path('card/<int:card_id>', views.patients_get_card_data),
path('card/save', views.patients_card_save),
path('card/archive', views.patients_card_archive),
path('card/unarchive', views.patients_card_unarchive),
path('individuals/search', views.individual_search),
path('individuals/sex', views.get_sex_by_param),
path('individuals/edit-doc', views.edit_doc),
path('individuals/edit-agent', views.edit_agent),
path('individuals/update-cdu', views.update_cdu),
path('individuals/update-wia', views.update_wia),
path('individuals/sync-rmis', views.sync_rmis),
path('individuals/sync-tfoms', views.sync_tfoms),
path('individuals/load-anamnesis', views.load_anamnesis),
path('individuals/load-dreg', views.load_dreg),
path('individuals/load-screening', views.load_screening),
path('individuals/load-vaccine', views.load_vaccine),
path('individuals/load-ambulatory-data', views.load_ambulatory_data),
path('individuals/load-benefit', views.load_benefit),
path('individuals/load-dreg-detail', views.load_dreg_detail),
path('individuals/load-vaccine-detail', views.load_vaccine_detail),
path('individuals/load-ambulatorydata-detail', views.load_ambulatory_data_detail),
path('individuals/load-ambulatory-history', views.load_ambulatory_history),
path('individuals/load-benefit-detail', views.load_benefit_detail),
path('individuals/save-dreg', views.save_dreg),
path('individuals/save-plan-dreg', views.update_dispensary_reg_plans),
path('individuals/save-vaccine', views.save_vaccine),
path('individuals/save-ambulatory-data', views.save_ambulatory_data),
path('individuals/save-benefit', views.save_benefit),
path('individuals/save-anamnesis', views.save_anamnesis),
path('is-card', views.is_l2_card),
path('save-screening-plan', views.update_screening_reg_plan),
]
| 53.829268 | 86 | 0.752152 |
46b5e33d3c7311128739c73f9d648a67b6c52c18 | 1,139 | py | Python | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
] | 1 | 2021-09-03T08:50:54.000Z | 2021-09-03T08:50:54.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-15 07:06
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
| 35.59375 | 302 | 0.604039 |
46b6263389154f262f0911cbbda3dfc8ad613ae7 | 3,014 | py | Python | setup.py | conan-hdk/xlwings | 44395c4d18b46f76249279b7d0965e640291499c | [
"BSD-3-Clause"
] | null | null | null | setup.py | conan-hdk/xlwings | 44395c4d18b46f76249279b7d0965e640291499c | [
"BSD-3-Clause"
] | null | null | null | setup.py | conan-hdk/xlwings | 44395c4d18b46f76249279b7d0965e640291499c | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import re
import glob
from setuptools import setup, find_packages
# long_description: Take from README file
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
readme = f.read()
# Version Number
with open(os.path.join(os.path.dirname(__file__), 'xlwings', '__init__.py')) as f:
version = re.compile(r".*__version__ = '(.*?)'", re.S).match(f.read()).group(1)
# Dependencies
if sys.platform.startswith('win'):
if sys.version_info[:2] >= (3, 7):
pywin32 = 'pywin32 >= 224'
else:
pywin32 = 'pywin32'
install_requires = [pywin32]
# This places dlls next to python.exe for standard setup and in the parent folder for virtualenv
data_files = [('', glob.glob('xlwings*.dll'))]
elif sys.platform.startswith('darwin'):
install_requires = ['psutil >= 2.0.0', 'appscript >= 1.0.1']
data_files = [(os.path.expanduser("~") + '/Library/Application Scripts/com.microsoft.Excel', [f'xlwings/xlwings-{version}.applescript'])]
else:
if os.environ.get('READTHEDOCS', None) == 'True' or os.environ.get('INSTALL_ON_LINUX') == '1':
data_files = []
install_requires = []
else:
raise OSError("xlwings requires an installation of Excel and therefore only works on Windows and macOS. To enable the installation on Linux nevertheless, do: export INSTALL_ON_LINUX=1; pip install xlwings")
extras_require = {
'pro': ['cryptography', 'Jinja2', 'pdfrw'],
'all': ['cryptography', 'Jinja2', 'pandas', 'matplotlib', 'plotly', 'flask', 'requests', 'pdfrw']
}
setup(
name='xlwings',
version=version,
url='https://www.xlwings.org',
license='BSD 3-clause',
author='Zoomer Analytics LLC',
author_email='felix.zumstein@zoomeranalytics.com',
description='Make Excel fly: Interact with Excel from Python and vice versa.',
long_description=readme,
data_files=data_files,
packages=find_packages(exclude=('tests', 'tests.*',)),
package_data={'xlwings': ['xlwings.bas', 'Dictionary.cls', '*.xlsm', '*.xlam', '*.applescript', 'addin/xlwings.xlam', 'addin/xlwings_unprotected.xlam']},
keywords=['xls', 'excel', 'spreadsheet', 'workbook', 'vba', 'macro'],
install_requires=install_requires,
extras_require=extras_require,
entry_points={'console_scripts': ['xlwings=xlwings.cli:main'],},
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Office/Business :: Financial :: Spreadsheet',
'License :: OSI Approved :: BSD License'],
platforms=['Windows', 'Mac OS X'],
python_requires='>=3.6',
)
| 42.450704 | 214 | 0.651626 |
46b6eaa6075021f6bee39458eda6a940c6b7c8b2 | 2,044 | py | Python | secedgar/tests/test_cli.py | abbadata/sec-edgar | f801d2137a988c928449bf64b44a85c01e80fd3a | [
"Apache-2.0"
] | null | null | null | secedgar/tests/test_cli.py | abbadata/sec-edgar | f801d2137a988c928449bf64b44a85c01e80fd3a | [
"Apache-2.0"
] | null | null | null | secedgar/tests/test_cli.py | abbadata/sec-edgar | f801d2137a988c928449bf64b44a85c01e80fd3a | [
"Apache-2.0"
] | null | null | null | import pytest
from click.testing import CliRunner
from secedgar.cli import daily, filing
from secedgar.utils.exceptions import FilingTypeError
| 34.066667 | 90 | 0.672211 |
46b74e2cb30b2d76500271ee27ada8ec4c26cdc1 | 2,013 | py | Python | hydro.py | garethcmurphy/hydrosolve | ef150a6adcab1e835b4b907c5fed2dd58cd4ba08 | [
"MIT"
] | null | null | null | hydro.py | garethcmurphy/hydrosolve | ef150a6adcab1e835b4b907c5fed2dd58cd4ba08 | [
"MIT"
] | null | null | null | hydro.py | garethcmurphy/hydrosolve | ef150a6adcab1e835b4b907c5fed2dd58cd4ba08 | [
"MIT"
] | null | null | null | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
nstep=200
nx=400
nv=3
u=np.zeros((nx,nv))
prim=np.zeros((nx,nv))
gam=5./3.
dx=1./nx
dt=1e-3
time=0
x=np.linspace(0,1,num=nx)
prim[:,0]=1.
prim[:,1]=0.
prim[:,2]=1.
for i in range(int(nx/2),nx):
prim[i,0]=0.1
prim[i,1]=0.
prim[i,2]=0.125
print (prim[:,2])
u=ptou(prim)
uold=u
pold=prim
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(x,prim[:,0],'pres')
ax2.plot(x,prim[:,1],'pres')
ax3.plot(x,prim[:,2],'pres')
fig.show()
for nstep in range(0,nstep):
print (time)
um=np.roll(u, 1,axis=0)
up=np.roll(u,-1,axis=0)
um[0,:] =um[1,:]
up[nx-1,:]=up[nx-2,:]
fm=getflux(um)
fp=getflux(up)
cfl=0.49
dtdx=1./getmaxv(p)
dt=dtdx*dx
time=time+dt
un=0.5*(um+up) - cfl*dtdx* (fp-fm)
u=un
p=utop(u)
plt.close(fig)
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(p[:,0])
ax2.plot(p[:,1])
ax3.plot(p[:,2])
fig.show()
| 18.638889 | 42 | 0.516642 |
d3b125d2c7eabb30628cc33826a64ef3ed9c92f4 | 22,179 | py | Python | tests/test_pluralize.py | weixu365/pluralizer-py | 793b2a8ff1320f701e70810038e0902c610aa5b0 | [
"MIT"
] | 4 | 2020-05-10T12:02:57.000Z | 2022-02-02T11:20:02.000Z | tests/test_pluralize.py | weixu365/pluralizer-py | 793b2a8ff1320f701e70810038e0902c610aa5b0 | [
"MIT"
] | 30 | 2020-05-10T10:07:00.000Z | 2022-03-26T18:22:43.000Z | tests/test_pluralize.py | weixu365/pluralizer-py | 793b2a8ff1320f701e70810038e0902c610aa5b0 | [
"MIT"
] | null | null | null |
import unittest
from pluralizer import Pluralizer
import re
# Standard singular/plural matches.
#
# @type {Array}
BASIC_TESTS = [
# Uncountables.
['firmware', 'firmware'],
['fish', 'fish'],
['media', 'media'],
['moose', 'moose'],
['police', 'police'],
['sheep', 'sheep'],
['series', 'series'],
['agenda', 'agenda'],
['news', 'news'],
['reindeer', 'reindeer'],
['starfish', 'starfish'],
['smallpox', 'smallpox'],
['tennis', 'tennis'],
['chickenpox', 'chickenpox'],
['shambles', 'shambles'],
['garbage', 'garbage'],
['you', 'you'],
['wildlife', 'wildlife'],
['Staff', 'Staff'],
['STAFF', 'STAFF'],
['turquois', 'turquois'],
['carnivorous', 'carnivorous'],
['only', 'only'],
['aircraft', 'aircraft'],
# Latin.
['veniam', 'veniam'],
# Pluralization.
['this', 'these'],
['that', 'those'],
['is', 'are'],
['man', 'men'],
['superman', 'supermen'],
['ox', 'oxen'],
['bus', 'buses'],
['airbus', 'airbuses'],
['railbus', 'railbuses'],
['wife', 'wives'],
['guest', 'guests'],
['thing', 'things'],
['mess', 'messes'],
['guess', 'guesses'],
['person', 'people'],
['meteor', 'meteors'],
['chateau', 'chateaus'],
['lap', 'laps'],
['cough', 'coughs'],
['death', 'deaths'],
['coach', 'coaches'],
['boy', 'boys'],
['toy', 'toys'],
['guy', 'guys'],
['girl', 'girls'],
['chair', 'chairs'],
['toe', 'toes'],
['tiptoe', 'tiptoes'],
['tomato', 'tomatoes'],
['potato', 'potatoes'],
['tornado', 'tornadoes'],
['torpedo', 'torpedoes'],
['hero', 'heroes'],
['superhero', 'superheroes'],
['volcano', 'volcanoes'],
['canto', 'cantos'],
['hetero', 'heteros'],
['photo', 'photos'],
['portico', 'porticos'],
['quarto', 'quartos'],
['kimono', 'kimonos'],
['albino', 'albinos'],
['cherry', 'cherries'],
['piano', 'pianos'],
['pro', 'pros'],
['combo', 'combos'],
['turbo', 'turbos'],
['bar', 'bars'],
['crowbar', 'crowbars'],
['van', 'vans'],
['tobacco', 'tobaccos'],
['afficionado', 'afficionados'],
['monkey', 'monkeys'],
['neutrino', 'neutrinos'],
['rhino', 'rhinos'],
['steno', 'stenos'],
['latino', 'latinos'],
['casino', 'casinos'],
['avocado', 'avocados'],
['commando', 'commandos'],
['tuxedo', 'tuxedos'],
['speedo', 'speedos'],
['dingo', 'dingoes'],
['echo', 'echoes'],
['nacho', 'nachos'],
['motto', 'mottos'],
['psycho', 'psychos'],
['poncho', 'ponchos'],
['pass', 'passes'],
['ghetto', 'ghettos'],
['mango', 'mangos'],
['lady', 'ladies'],
['bath', 'baths'],
['professional', 'professionals'],
['dwarf', 'dwarves'], # Proper spelling is "dwarfs".
['encyclopedia', 'encyclopedias'],
['louse', 'lice'],
['roof', 'roofs'],
['woman', 'women'],
['formula', 'formulas'],
['polyhedron', 'polyhedra'],
['index', 'indices'], # Maybe "indexes".
['matrix', 'matrices'],
['vertex', 'vertices'],
['axe', 'axes'], # Could also be plural of "ax".
['pickaxe', 'pickaxes'],
['crisis', 'crises'],
['criterion', 'criteria'],
['phenomenon', 'phenomena'],
['addendum', 'addenda'],
['datum', 'data'],
['forum', 'forums'],
['millennium', 'millennia'],
['alumnus', 'alumni'],
['medium', 'mediums'],
['census', 'censuses'],
['genus', 'genera'],
['dogma', 'dogmata'],
['life', 'lives'],
['hive', 'hives'],
['kiss', 'kisses'],
['dish', 'dishes'],
['human', 'humans'],
['knife', 'knives'],
['phase', 'phases'],
['judge', 'judges'],
['class', 'classes'],
['witch', 'witches'],
['church', 'churches'],
['massage', 'massages'],
['prospectus', 'prospectuses'],
['syllabus', 'syllabi'],
['viscus', 'viscera'],
['cactus', 'cacti'],
['hippopotamus', 'hippopotamuses'],
['octopus', 'octopuses'],
['platypus', 'platypuses'],
['kangaroo', 'kangaroos'],
['atlas', 'atlases'],
['stigma', 'stigmata'],
['schema', 'schemata'],
['phenomenon', 'phenomena'],
['diagnosis', 'diagnoses'],
['mongoose', 'mongooses'],
['mouse', 'mice'],
['liturgist', 'liturgists'],
['box', 'boxes'],
['gas', 'gases'],
['self', 'selves'],
['chief', 'chiefs'],
['quiz', 'quizzes'],
['child', 'children'],
['shelf', 'shelves'],
['fizz', 'fizzes'],
['tooth', 'teeth'],
['thief', 'thieves'],
['day', 'days'],
['loaf', 'loaves'],
['fix', 'fixes'],
['spy', 'spies'],
['vertebra', 'vertebrae'],
['clock', 'clocks'],
['lap', 'laps'],
['cuff', 'cuffs'],
['leaf', 'leaves'],
['calf', 'calves'],
['moth', 'moths'],
['mouth', 'mouths'],
['house', 'houses'],
['proof', 'proofs'],
['hoof', 'hooves'],
['elf', 'elves'],
['turf', 'turfs'],
['craft', 'crafts'],
['die', 'dice'],
['penny', 'pennies'],
['campus', 'campuses'],
['virus', 'viri'],
['iris', 'irises'],
['bureau', 'bureaus'],
['kiwi', 'kiwis'],
['wiki', 'wikis'],
['igloo', 'igloos'],
['ninja', 'ninjas'],
['pizza', 'pizzas'],
['kayak', 'kayaks'],
['canoe', 'canoes'],
['tiding', 'tidings'],
['pea', 'peas'],
['drive', 'drives'],
['nose', 'noses'],
['movie', 'movies'],
['status', 'statuses'],
['alias', 'aliases'],
['memorandum', 'memorandums'],
['language', 'languages'],
['plural', 'plurals'],
['word', 'words'],
['multiple', 'multiples'],
['reward', 'rewards'],
['sandwich', 'sandwiches'],
['subway', 'subways'],
['direction', 'directions'],
['land', 'lands'],
['row', 'rows'],
['grow', 'grows'],
['flow', 'flows'],
['rose', 'roses'],
['raise', 'raises'],
['friend', 'friends'],
['follower', 'followers'],
['male', 'males'],
['nail', 'nails'],
['sex', 'sexes'],
['tape', 'tapes'],
['ruler', 'rulers'],
['king', 'kings'],
['queen', 'queens'],
['zero', 'zeros'],
['quest', 'quests'],
['goose', 'geese'],
['foot', 'feet'],
['ex', 'exes'],
['reflex', 'reflexes'],
['heat', 'heats'],
['train', 'trains'],
['test', 'tests'],
['pie', 'pies'],
['fly', 'flies'],
['eye', 'eyes'],
['lie', 'lies'],
['node', 'nodes'],
['trade', 'trades'],
['chinese', 'chinese'],
['please', 'pleases'],
['japanese', 'japanese'],
['regex', 'regexes'],
['license', 'licenses'],
['zebra', 'zebras'],
['general', 'generals'],
['corps', 'corps'],
['pliers', 'pliers'],
['flyer', 'flyers'],
['scissors', 'scissors'],
['fireman', 'firemen'],
['chirp', 'chirps'],
['harp', 'harps'],
['corpse', 'corpses'],
['dye', 'dyes'],
['move', 'moves'],
['zombie', 'zombies'],
['variety', 'varieties'],
['talkie', 'talkies'],
['walkie-talkie', 'walkie-talkies'],
['groupie', 'groupies'],
['goonie', 'goonies'],
['lassie', 'lassies'],
['genie', 'genies'],
['foodie', 'foodies'],
['faerie', 'faeries'],
['collie', 'collies'],
['obloquy', 'obloquies'],
['looey', 'looies'],
['osprey', 'ospreys'],
['cover', 'covers'],
['tie', 'ties'],
['groove', 'grooves'],
['bee', 'bees'],
['ave', 'aves'],
['wave', 'waves'],
['wolf', 'wolves'],
['airwave', 'airwaves'],
['archive', 'archives'],
['arch', 'arches'],
['dive', 'dives'],
['aftershave', 'aftershaves'],
['cave', 'caves'],
['grave', 'graves'],
['gift', 'gifts'],
['nerve', 'nerves'],
['nerd', 'nerds'],
['carve', 'carves'],
['rave', 'raves'],
['scarf', 'scarves'],
['sale', 'sales'],
['sail', 'sails'],
['swerve', 'swerves'],
['love', 'loves'],
['dove', 'doves'],
['glove', 'gloves'],
['wharf', 'wharves'],
['valve', 'valves'],
['werewolf', 'werewolves'],
['view', 'views'],
['emu', 'emus'],
['menu', 'menus'],
['wax', 'waxes'],
['fax', 'faxes'],
['nut', 'nuts'],
['crust', 'crusts'],
['lemma', 'lemmata'],
['anathema', 'anathemata'],
['analysis', 'analyses'],
['locus', 'loci'],
['uterus', 'uteri'],
['curriculum', 'curricula'],
['quorum', 'quora'],
['genius', 'geniuses'],
['flower', 'flowers'],
['crash', 'crashes'],
['soul', 'souls'],
['career', 'careers'],
['planet', 'planets'],
['son', 'sons'],
['sun', 'suns'],
['drink', 'drinks'],
['diploma', 'diplomas'],
['dilemma', 'dilemmas'],
['grandma', 'grandmas'],
['no', 'nos'],
['yes', 'yeses'],
['employ', 'employs'],
['employee', 'employees'],
['history', 'histories'],
['story', 'stories'],
['purchase', 'purchases'],
['order', 'orders'],
['key', 'keys'],
['bomb', 'bombs'],
['city', 'cities'],
['sanity', 'sanities'],
['ability', 'abilities'],
['activity', 'activities'],
['cutie', 'cuties'],
['validation', 'validations'],
['floaty', 'floaties'],
['nicety', 'niceties'],
['goalie', 'goalies'],
['crawly', 'crawlies'],
['duty', 'duties'],
['scrutiny', 'scrutinies'],
['deputy', 'deputies'],
['beauty', 'beauties'],
['bank', 'banks'],
['family', 'families'],
['tally', 'tallies'],
['ally', 'allies'],
['alley', 'alleys'],
['valley', 'valleys'],
['medley', 'medleys'],
['melody', 'melodies'],
['trolly', 'trollies'],
['thunk', 'thunks'],
['koala', 'koalas'],
['special', 'specials'],
['book', 'books'],
['knob', 'knobs'],
['crab', 'crabs'],
['plough', 'ploughs'],
['high', 'highs'],
['low', 'lows'],
['hiccup', 'hiccups'],
['bonus', 'bonuses'],
['circus', 'circuses'],
['abacus', 'abacuses'],
['phobia', 'phobias'],
['case', 'cases'],
['lace', 'laces'],
['trace', 'traces'],
['mage', 'mages'],
['lotus', 'lotuses'],
['motorbus', 'motorbuses'],
['cutlas', 'cutlases'],
['tequila', 'tequilas'],
['liar', 'liars'],
['delta', 'deltas'],
['visa', 'visas'],
['flea', 'fleas'],
['favela', 'favelas'],
['cobra', 'cobras'],
['finish', 'finishes'],
['gorilla', 'gorillas'],
['mass', 'masses'],
['face', 'faces'],
['rabbit', 'rabbits'],
['adventure', 'adventures'],
['breeze', 'breezes'],
['brew', 'brews'],
['canopy', 'canopies'],
['copy', 'copies'],
['spy', 'spies'],
['cave', 'caves'],
['charge', 'charges'],
['cinema', 'cinemas'],
['coffee', 'coffees'],
['favourite', 'favourites'],
['themself', 'themselves'],
['country', 'countries'],
['issue', 'issues'],
['authority', 'authorities'],
['force', 'forces'],
['objective', 'objectives'],
['present', 'presents'],
['industry', 'industries'],
['believe', 'believes'],
['century', 'centuries'],
['category', 'categories'],
['eve', 'eves'],
['fee', 'fees'],
['gene', 'genes'],
['try', 'tries'],
['currency', 'currencies'],
['pose', 'poses'],
['cheese', 'cheeses'],
['clue', 'clues'],
['cheer', 'cheers'],
['litre', 'litres'],
['money', 'monies'],
['attorney', 'attorneys'],
['balcony', 'balconies'],
['cockney', 'cockneys'],
['donkey', 'donkeys'],
['honey', 'honeys'],
['smiley', 'smilies'],
['survey', 'surveys'],
['whiskey', 'whiskeys'],
['whisky', 'whiskies'],
['volley', 'volleys'],
['tongue', 'tongues'],
['suit', 'suits'],
['suite', 'suites'],
['cruise', 'cruises'],
['eave', 'eaves'],
['consultancy', 'consultancies'],
['pouch', 'pouches'],
['wallaby', 'wallabies'],
['abyss', 'abysses'],
['weekly', 'weeklies'],
['whistle', 'whistles'],
['utilise', 'utilises'],
['utilize', 'utilizes'],
['mercy', 'mercies'],
['mercenary', 'mercenaries'],
['take', 'takes'],
['flush', 'flushes'],
['gate', 'gates'],
['evolve', 'evolves'],
['slave', 'slaves'],
['native', 'natives'],
['revolve', 'revolves'],
['twelve', 'twelves'],
['sleeve', 'sleeves'],
['subjective', 'subjectives'],
['stream', 'streams'],
['beam', 'beams'],
['foam', 'foams'],
['callus', 'calluses'],
['use', 'uses'],
['beau', 'beaus'],
['gateau', 'gateaus'],
['fetus', 'fetuses'],
['luau', 'luaus'],
['pilau', 'pilaus'],
['shoe', 'shoes'],
['sandshoe', 'sandshoes'],
['zeus', 'zeuses'],
['nucleus', 'nuclei'],
['sky', 'skies'],
['beach', 'beaches'],
['brush', 'brushes'],
['hoax', 'hoaxes'],
['scratch', 'scratches'],
['nanny', 'nannies'],
['negro', 'negroes'],
['taco', 'tacos'],
['cafe', 'cafes'],
['cave', 'caves'],
['giraffe', 'giraffes'],
['goodwife', 'goodwives'],
['housewife', 'housewives'],
['safe', 'safes'],
['save', 'saves'],
['pocketknife', 'pocketknives'],
['tartufe', 'tartufes'],
['tartuffe', 'tartuffes'],
['truffle', 'truffles'],
['jefe', 'jefes'],
['agrafe', 'agrafes'],
['agraffe', 'agraffes'],
['bouffe', 'bouffes'],
['carafe', 'carafes'],
['chafe', 'chafes'],
['pouffe', 'pouffes'],
['pouf', 'poufs'],
['piaffe', 'piaffes'],
['gaffe', 'gaffes'],
['executive', 'executives'],
['cove', 'coves'],
['dove', 'doves'],
['fave', 'faves'],
['positive', 'positives'],
['solve', 'solves'],
['trove', 'troves'],
['treasure', 'treasures'],
['suave', 'suaves'],
['bluff', 'bluffs'],
['half', 'halves'],
['knockoff', 'knockoffs'],
['handkerchief', 'handkerchiefs'],
['reed', 'reeds'],
['reef', 'reefs'],
['yourself', 'yourselves'],
['sunroof', 'sunroofs'],
['plateau', 'plateaus'],
['radius', 'radii'],
['stratum', 'strata'],
['stratus', 'strati'],
['focus', 'foci'],
['fungus', 'fungi'],
['appendix', 'appendices'],
['seraph', 'seraphim'],
['cherub', 'cherubim'],
['memo', 'memos'],
['cello', 'cellos'],
['automaton', 'automata'],
['button', 'buttons'],
['crayon', 'crayons'],
['captive', 'captives'],
['abrasive', 'abrasives'],
['archive', 'archives'],
['additive', 'additives'],
['hive', 'hives'],
['beehive', 'beehives'],
['olive', 'olives'],
['black olive', 'black olives'],
['chive', 'chives'],
['adjective', 'adjectives'],
['cattle drive', 'cattle drives'],
['explosive', 'explosives'],
['executive', 'executives'],
['negative', 'negatives'],
['fugitive', 'fugitives'],
['progressive', 'progressives'],
['laxative', 'laxatives'],
['incentive', 'incentives'],
['genesis', 'geneses'],
['surprise', 'surprises'],
['enterprise', 'enterprises'],
['relative', 'relatives'],
['positive', 'positives'],
['perspective', 'perspectives'],
['superlative', 'superlatives'],
['afterlife', 'afterlives'],
['native', 'natives'],
['detective', 'detectives'],
['collective', 'collectives'],
['lowlife', 'lowlives'],
['low-life', 'low-lives'],
['strife', 'strifes'],
['pony', 'ponies'],
['phony', 'phonies'],
['felony', 'felonies'],
['colony', 'colonies'],
['symphony', 'symphonies'],
['semicolony', 'semicolonies'],
['radiotelephony', 'radiotelephonies'],
['company', 'companies'],
['ceremony', 'ceremonies'],
['carnivore', 'carnivores'],
['emphasis', 'emphases'],
['abuse', 'abuses'],
['ass', 'asses'],
['mile', 'miles'],
['consensus', 'consensuses'],
['coatdress', 'coatdresses'],
['courthouse', 'courthouses'],
['playhouse', 'playhouses'],
['crispness', 'crispnesses'],
['racehorse', 'racehorses'],
['greatness', 'greatnesses'],
['demon', 'demons'],
['lemon', 'lemons'],
['pokemon', 'pokemon'],
['pokmon', 'pokmon'],
['christmas', 'christmases'],
['zymase', 'zymases'],
['accomplice', 'accomplices'],
['amice', 'amices'],
['titmouse', 'titmice'],
['slice', 'slices'],
['base', 'bases'],
['database', 'databases'],
['rise', 'rises'],
['uprise', 'uprises'],
['size', 'sizes'],
['prize', 'prizes'],
['booby', 'boobies'],
['hobby', 'hobbies'],
['baby', 'babies'],
['cookie', 'cookies'],
['budgie', 'budgies'],
['calorie', 'calories'],
['brownie', 'brownies'],
['lolly', 'lollies'],
['hippie', 'hippies'],
['smoothie', 'smoothies'],
['techie', 'techies'],
['specie', 'species'],
['quickie', 'quickies'],
['pixie', 'pixies'],
['rotisserie', 'rotisseries'],
['porkpie', 'porkpies'],
['newbie', 'newbies'],
['veggie', 'veggies'],
['bourgeoisie', 'bourgeoisies'],
['party', 'parties'],
['apology', 'apologies'],
['ancestry', 'ancestries'],
['anomaly', 'anomalies'],
['anniversary', 'anniversaries'],
['battery', 'batteries'],
['nappy', 'nappies'],
['hanky', 'hankies'],
['junkie', 'junkies'],
['hogtie', 'hogties'],
['footsie', 'footsies'],
['curry', 'curries'],
['fantasy', 'fantasies'],
['housefly', 'houseflies'],
['falsy', 'falsies'],
['doggy', 'doggies'],
['carny', 'carnies'],
['cabby', 'cabbies'],
['charlie', 'charlies'],
['bookie', 'bookies'],
['auntie', 'aunties'],
# Prototype inheritance.
['constructor', 'constructors'],
# Non-standard case.
['randomWord', 'randomWords'],
['camelCase', 'camelCases'],
['PascalCase', 'PascalCases'],
['Alumnus', 'Alumni'],
['CHICKEN', 'CHICKENS'],
['', ''],
['', ''],
['', ''],
[' ', ' '],
[' chicken', ' chickens'],
['Order2', 'Order2s'],
['Work Order2', 'Work Order2s'],
['SoundFX2', 'SoundFX2s'],
['oDonald', 'oDonalds']
]
#
# Odd plural to singular tests.
#
# @type {Array}
#
SINGULAR_TESTS = [
['dingo', 'dingos'],
['mango', 'mangoes'],
['echo', 'echos'],
['ghetto', 'ghettoes'],
['nucleus', 'nucleuses'],
['bureau', 'bureaux'],
['seraph', 'seraphs']
]
#
# Odd singular to plural tests.
#
# @type {Array}
#
PLURAL_TESTS = [
['plateaux', 'plateaux'],
['axis', 'axes'],
['basis', 'bases'],
['automatum', 'automata'],
['thou', 'you'],
['axiS', 'axes'],
['passerby', 'passersby']
]
if __name__ == '__main__':
unittest.main()
| 28.039191 | 83 | 0.515803 |
d3b146cefcbdfbb497115b74257a2891722524b5 | 1,988 | py | Python | promgen/util.py | sundy-li/promgen | e532bde46b542dd66f46e3dd654bc1ad31deeec7 | [
"MIT"
] | null | null | null | promgen/util.py | sundy-li/promgen | e532bde46b542dd66f46e3dd654bc1ad31deeec7 | [
"MIT"
] | 8 | 2021-04-08T21:59:34.000Z | 2022-02-10T10:42:43.000Z | promgen/util.py | Andreich2010/promgen | dae2b720f30b0c002aa50a74c4c4fc8dfbcbb2b7 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import requests.sessions
from django.db.models import F
from promgen.version import __version__
from django.conf import settings
# Wrappers around request api to ensure we always attach our user agent
# https://github.com/requests/requests/blob/master/requests/api.py
def setting(key, default=None, domain=None):
"""
Settings helper based on saltstack's query
Allows a simple way to query settings from YAML
using the style `path:to:key` to represent
path:
to:
key: value
"""
rtn = settings.PROMGEN
if domain:
rtn = rtn[domain]
for index in key.split(":"):
try:
rtn = rtn[index]
except KeyError:
return default
return rtn
| 28.811594 | 87 | 0.667505 |
d3b1ab341873d6a614ea74d34b804a1a2793bea2 | 5,478 | py | Python | integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | dd2c9f05751e1bae212a30dbc54381167a14f6c5 | [
"Apache-2.0"
] | null | null | null | integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | dd2c9f05751e1bae212a30dbc54381167a14f6c5 | [
"Apache-2.0"
] | null | null | null | integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | dd2c9f05751e1bae212a30dbc54381167a14f6c5 | [
"Apache-2.0"
] | null | null | null | import time
import kubernetes
import pytest
from dagster_k8s.client import DagsterK8sError, WaitForPodState
from dagster_k8s.utils import retrieve_pod_logs, wait_for_job_success, wait_for_pod
from dagster_k8s_test_infra.helm import get_helm_test_namespace
| 41.18797 | 100 | 0.61245 |
d3b2063cf7dc483f806ac22531b14a9333116ffb | 1,092 | py | Python | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | null | null | null | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | 8 | 2021-05-17T10:54:28.000Z | 2021-06-08T12:02:37.000Z | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-15 08:52
from django.db import migrations, models
import django.db.models.deletion
| 32.117647 | 115 | 0.571429 |
d3b26049eb155f0068830e3349db9d53a2b93029 | 2,088 | py | Python | uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
] | 4 | 2016-07-12T09:01:52.000Z | 2016-12-07T03:11:02.000Z | uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
] | null | null | null | uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
] | 3 | 2016-11-29T02:13:17.000Z | 2019-10-16T06:25:20.000Z | from PyQt5.QtCore import Qt, QStringListModel
from PyQt5.QtWidgets import QLineEdit, QCompleter
| 34.229508 | 84 | 0.631226 |
d3b26adf9f1c111614b51c252d8d80c26d192abc | 337 | py | Python | utils/__init__.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
] | null | null | null | utils/__init__.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
] | null | null | null | utils/__init__.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
] | null | null | null | # # importing all the modules at once
# from .config import *
# from .normalization import *
# from .others import *
# from .img_reg import *
# from .transformation import *
# from .visualization import *
# importing the modules in a selective way
import utils.config
import utils.normalization
import utils.misc
import utils.lr_finder
| 24.071429 | 42 | 0.762611 |
d3b2a1b997cbe83aa232f13b17539c3d7b815053 | 434 | py | Python | tasks.py | epu-ntua/QualiChain-mediator | 1d0f848d60861665d95ad0359914add361551763 | [
"MIT"
] | 2 | 2020-03-09T11:10:15.000Z | 2020-03-11T06:11:58.000Z | tasks.py | epu-ntua/QualiChain-mediator | 1d0f848d60861665d95ad0359914add361551763 | [
"MIT"
] | 2 | 2021-03-31T19:43:58.000Z | 2021-12-13T20:34:57.000Z | tasks.py | epu-ntua/QualiChain-mediator | 1d0f848d60861665d95ad0359914add361551763 | [
"MIT"
] | 2 | 2020-03-12T11:14:20.000Z | 2020-07-07T06:17:45.000Z | from celery import Celery
from clients.dobie_client import send_data_to_dobie
app = Celery('qualichain_mediator')
app.config_from_object('settings', namespace='CELERY_')
| 25.529412 | 75 | 0.767281 |
d3b3426ac37ef57bd78d3b9aa39a2ef7e95619d6 | 1,174 | py | Python | ingest/ambit_geo.py | brianhouse/okavango | 4006940ddead3f31eea701efb9b9dcdc7b19402e | [
"MIT"
] | 2 | 2015-01-25T06:20:03.000Z | 2015-02-15T23:54:41.000Z | ingest/ambit_geo.py | brianhouse/okavango_15 | 4006940ddead3f31eea701efb9b9dcdc7b19402e | [
"MIT"
] | null | null | null | ingest/ambit_geo.py | brianhouse/okavango_15 | 4006940ddead3f31eea701efb9b9dcdc7b19402e | [
"MIT"
] | 3 | 2017-11-14T21:18:23.000Z | 2021-06-20T21:08:31.000Z | import json, math
from ingest import ingest_json_body
from housepy import config, log, strings, util | 32.611111 | 87 | 0.537479 |
d3b45e5164e572fbde2110d62cb448013353f1cd | 1,593 | py | Python | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
] | 1 | 2020-12-17T09:41:42.000Z | 2020-12-17T09:41:42.000Z | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
] | null | null | null | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
'''update gandi DNS domain entry, with LiveDNS v5
Cf. https://doc.livedns.gandi.net/#work-with-domains
'''
import argparse
import ipaddress
import json
import os
from subprocess import check_output
import requests
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('domain')
parser.add_argument('name')
parser.add_argument('--ip', help="defaults to ifconfig.me's return")
parser.add_argument('--api_key', help="defaults to GANDI_API_KEY env var, or the return of 'pass api/gandi'")
args = parser.parse_args()
if args.ip is None:
args.ip = requests.get('http://ifconfig.me', headers={'User-Agent': 'curl/7.61.1'}).content.decode().strip()
ip = ipaddress.ip_address(args.ip)
if args.api_key is None:
args.api_key = os.environ.get('GANDI_API_KEY', check_output(['pass', 'api/gandi'], text=True).strip())
key = {'X-Api-Key': args.api_key}
r = requests.get(f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}', headers=key)
r.raise_for_status()
if r.json()[0]['rrset_values'][0] == args.ip:
if args.verbose:
print('ok')
else:
type_ = 'AAAA' if isinstance(ip, ipaddress.IPv6Address) else 'A'
url = f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}/{type_}'
data = {'rrset_values': [args.ip]}
headers = {'Content-Type': 'application/json', **key}
r = requests.put(url, data=json.dumps(data), headers=headers)
if args.verbose:
print(r.json())
else:
r.raise_for_status()
| 32.510204 | 112 | 0.696171 |
d3b4e36f678d36e360884bedfd448ece0a34ced3 | 1,895 | py | Python | leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | mamane19/coding-interview-gym | 20ae1a048eddbc9a32c819cf61258e2b57572f05 | [
"MIT"
] | 713 | 2019-11-19T16:11:25.000Z | 2022-03-31T02:27:52.000Z | leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | arunsank/coding-interview-gym | 8131e3a82795707e144fe55d765b6c15bdb97306 | [
"MIT"
] | 7 | 2020-01-16T17:07:18.000Z | 2021-11-15T18:24:39.000Z | leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | arunsank/coding-interview-gym | 8131e3a82795707e144fe55d765b6c15bdb97306 | [
"MIT"
] | 393 | 2019-11-18T17:55:45.000Z | 2022-03-28T20:26:32.000Z | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
from collections import defaultdict
# My solution during mock, getting TLE, don't know why
from collections import defaultdict
from collections import deque | 31.583333 | 109 | 0.582058 |
d3b4eac02574fc5ff2fd374b340d31cb4dba25c1 | 3,750 | py | Python | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
] | null | null | null | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
] | null | null | null | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from sentry.db.models import (
ArrayField, BoundedPositiveIntegerField, Model, FlexibleForeignKey, sane_repr
)
from django.db import models
from jsonfield import JSONField
from django.utils import timezone
from sentry.constants import ObjectStatus
from django.utils.translation import ugettext_lazy as _
| 32.327586 | 82 | 0.686133 |
d3b76c1c0fc989bb41ad8f58fabce2395587d211 | 1,615 | py | Python | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
] | 1 | 2021-05-07T16:37:03.000Z | 2021-05-07T16:37:03.000Z | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
] | 11 | 2021-05-17T06:45:48.000Z | 2021-10-03T15:16:23.000Z | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
] | null | null | null | from .BaseDriver import BaseDriver
from ..OAuthUser import OAuthUser
| 28.333333 | 90 | 0.479257 |
d3b8efd54656a0b32ac0c5b886fd7d4ce09f8a83 | 1,266 | py | Python | python/convert_to_readwise.py | t27/highlights-convert | a6c6696ece4fabbbb56e420cb23c0466710e1345 | [
"MIT"
] | null | null | null | python/convert_to_readwise.py | t27/highlights-convert | a6c6696ece4fabbbb56e420cb23c0466710e1345 | [
"MIT"
] | null | null | null | python/convert_to_readwise.py | t27/highlights-convert | a6c6696ece4fabbbb56e420cb23c0466710e1345 | [
"MIT"
] | 1 | 2021-06-29T20:40:06.000Z | 2021-06-29T20:40:06.000Z | import pandas as pd
import json
import glob
columns = ["Highlight","Title","Author","URL","Note","Location"]
# for sample of the input json look at any json in the root of the `results` folder
def convert_to_readwise_df(json_files):
"""Convert the internal json format to a readwise compatible dataframe
Args:
json_files (List[str]): list of json files
Returns:
pd.DataFrame: dataframe with columns as required by readwise
"""
df_data = []
for file in json_files:
with open(file) as f:
data = json.load(f)
title = data['volume']['title']
author = ", ".join(data['volume']['authors'])
for entry in data['highlights']:
highlight = entry['content']
location = entry['location']
notes = ""
if "notes" in entry:
for note in notes:
notes = notes+"\n"+note
df_data.append([highlight,title,author,"",notes,location])
df = pd.DataFrame(df_data,columns = columns)
return df
if __name__ == "__main__":
json_files = glob.glob("../results/*.json")
df = convert_to_readwise_df(json_files)
df.to_csv("tarang_readwise.csv",index=False)
| 32.461538 | 83 | 0.590837 |
d3ba25fae7aacb5e43b639c41eadbd3d14fb7a48 | 303 | py | Python | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 18 | 2017-09-01T12:26:12.000Z | 2022-02-23T02:31:29.000Z | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 19 | 2017-03-12T20:40:36.000Z | 2022-03-31T22:50:47.000Z | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 14 | 2016-05-06T02:25:30.000Z | 2022-03-31T14:40:06.000Z | """A collection of methods for determining whether a given spectrum is
of high quality (likely to produce a high quality interpretation)
"""
from .heuristic import xrea
from .isolation import CoIsolation, PrecursorPurityEstimator
__all__ = [
"xrea",
"CoIsolation", "PrecursorPurityEstimator"
]
| 27.545455 | 70 | 0.772277 |
d3bbc84b4a938b83b84adeff2d313509849c11f6 | 3,855 | py | Python | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
] | null | null | null | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
] | null | null | null | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
] | null | null | null | from .item import Item
| 27.147887 | 107 | 0.575357 |
d3bc12f8ef0d8afa0eabbca33671def2b9e2dfc8 | 4,293 | py | Python | styrobot/cogs/help.py | ThatRedKite/styrobot | c6c449aec99cb59c4695f739d59efe2def0e0064 | [
"MIT"
] | 1 | 2021-08-02T23:19:31.000Z | 2021-08-02T23:19:31.000Z | styrobot/cogs/help.py | ThatRedKite/styrobot | c6c449aec99cb59c4695f739d59efe2def0e0064 | [
"MIT"
] | null | null | null | styrobot/cogs/help.py | ThatRedKite/styrobot | c6c449aec99cb59c4695f739d59efe2def0e0064 | [
"MIT"
] | 1 | 2021-07-28T02:26:54.000Z | 2021-07-28T02:26:54.000Z | import discord
from discord.ext import commands
from styrobot.util.contrib import info
import random
def setup(bot):
bot.add_cog(HelpCog(bot))
| 44.257732 | 109 | 0.610063 |
d3bcd85e84067fe1c97d2fef2c0994e569b7ca18 | 1,527 | py | Python | misc/Queue_hello.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
] | 3 | 2017-05-22T03:14:21.000Z | 2019-05-24T11:44:15.000Z | misc/Queue_hello.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
] | null | null | null | misc/Queue_hello.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
] | null | null | null | # Testing with threading and queue modules for Thread-based parallelism
import threading, queue, time
# The worker thread gets jobs off the queue. When the queue is empty, it
# assumes there will be no more work and exits.
# (Realistically workers will run until terminated.)
# Work function that processes the arguments
q = queue.Queue()
# Begin adding work to the queue
for i in range(20):
q.put(i)
threadPool = []
# Start a pool of 5 workers
for i in range(5):
t = threading.Thread(target=worker, name='worker %i' % (i + 1))
t.start()
threadPool.append(t)
# time.sleep(5) # testing if workers die before work is queued - yes they do die
# q.join()
for i in range(20):
q.put(i+20)
for t in threadPool:
t.join()
# Give threads time to run
# print('Main thread sleeping')
# time.sleep(5)
print('Main thread finished') | 26.327586 | 90 | 0.629339 |
d3bea2de7d4525c6881fb3abdb31815d971e7131 | 506 | py | Python | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
] | null | null | null | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
] | null | null | null | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
] | null | null | null | import pytest
from pymongo import MongoClient
import app as flask_app
test_database_name = 'spartatest'
client = MongoClient('localhost', 27017)
db = client.get_database(test_database_name)
| 20.24 | 55 | 0.727273 |
d3bfaf1d9fa752290f67cc0958281b146d4daff0 | 98 | py | Python | threader/__init__.py | mwoolweaver/threader | fdb4fe9ab71d3c85146969f716d10b78f970323e | [
"MIT"
] | 34 | 2017-07-24T20:54:06.000Z | 2022-03-18T13:10:11.000Z | threader/__init__.py | mwoolweaver/threader | fdb4fe9ab71d3c85146969f716d10b78f970323e | [
"MIT"
] | 2 | 2019-05-28T07:21:15.000Z | 2019-07-23T21:45:43.000Z | threader/__init__.py | mwoolweaver/threader | fdb4fe9ab71d3c85146969f716d10b78f970323e | [
"MIT"
] | 8 | 2019-05-28T06:49:02.000Z | 2022-02-04T22:59:09.000Z | """Tools to quickly create twitter threads."""
from .thread import Threader
__version__ = "0.1.1" | 24.5 | 46 | 0.734694 |
d3bfd6a64622fae1b5dc880f345c000e85f77a5b | 1,080 | py | Python | src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
] | null | null | null | src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
] | null | null | null | src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
] | null | null | null | # count numbers of pages from the Mediawiki history dumps
import bz2
import subprocess
import os
from datetime import datetime
inizio = datetime.now()
dataset_folder = '/home/gandelli/dev/data/it/'
totali = set()
revisioni = set()
revert = set()
ns0 = set()
for year in range(2001, 2021):
dump_in = bz2.open(dataset_folder+'/it' + str(year) + '.tsv.bz2', 'r')
line = dump_in.readline()
print(year)
while line != '':
line = dump_in.readline().rstrip().decode('utf-8')[:-1]
values = line.split('\t')
if len(values) < 2:
continue
if values[23] != '':
page = int(values[23])
totali.add(page)
if values[28] == '0':
ns0.add(page)
if values[1] == 'revision':
revisioni.add(page)
if values[64] == 'true' and values[67] == 'true':
revert.add(page)
print('total page ',len(totali))
print('total pages ns0', len(ns0))
print('total revisions ns0', len(revisioni))
print('total revert ns0', len(revert) ) | 23.478261 | 74 | 0.566667 |
d3c078904bb9cd81a5346502975e431e6b94a34e | 6,395 | py | Python | livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | dedf68218a1a8e2f8a463ded835ea2a7d4b51b78 | [
"MIT"
] | null | null | null | livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | dedf68218a1a8e2f8a463ded835ea2a7d4b51b78 | [
"MIT"
] | null | null | null | livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | dedf68218a1a8e2f8a463ded835ea2a7d4b51b78 | [
"MIT"
] | null | null | null | import colorsys
import cv2
from PIL import Image
import pkg_resources
from .LivescoreBase import LivescoreBase
from .details import Alliance, OngoingMatchDetails
| 39.475309 | 124 | 0.603597 |
d3c0c248eab748f6973cc1f7d32930648b9e6320 | 1,825 | py | Python | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
] | 2 | 2021-07-09T18:53:15.000Z | 2021-08-06T06:21:14.000Z | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
] | 8 | 2021-07-09T13:08:07.000Z | 2021-09-12T20:25:08.000Z | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
] | 4 | 2021-07-09T12:32:20.000Z | 2021-07-29T15:19:25.000Z | from postDB import Model, Column, types
from datetime import datetime
import utils
| 40.555556 | 101 | 0.633973 |
d3c2f371f8e9bd53dfa26410d72fcf0c4b952e00 | 1,004 | py | Python | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
] | 6 | 2019-05-20T21:23:41.000Z | 2021-06-23T15:00:30.000Z | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
] | null | null | null | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
] | 5 | 2018-12-27T16:46:45.000Z | 2020-09-14T13:44:00.000Z | """
Commom settings to all applications
"""
A = 40.3
TECU = 1.0e16
C = 299792458
F1 = 1.57542e9
F2 = 1.22760e9
factor_1 = (F1 - F2) / (F1 + F2) / C
factor_2 = (F1 * F2) / (F2 - F1) / C
DIFF_TEC_MAX = 0.05
LIMIT_STD = 7.5
plot_it = True
REQUIRED_VERSION = 3.01
CONSTELLATIONS = ['G', 'R']
COLUMNS_IN_RINEX = {'3.03': {'G': {'L1': 'L1C', 'L2': 'L2W', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1C', 'L2': 'L2C', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.02': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.01': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
}
}
| 33.466667 | 100 | 0.351594 |
d3c36036476de94ac751c017398b3c5474c873f2 | 51 | py | Python | io_almacen/channel/__init__.py | xyla-io/io_almacen | 76725391b496fe3f778d013fc680ae80637eb74b | [
"MIT"
] | null | null | null | io_almacen/channel/__init__.py | xyla-io/io_almacen | 76725391b496fe3f778d013fc680ae80637eb74b | [
"MIT"
] | null | null | null | io_almacen/channel/__init__.py | xyla-io/io_almacen | 76725391b496fe3f778d013fc680ae80637eb74b | [
"MIT"
] | null | null | null | from .channel_io import Channel, channel_entity_url | 51 | 51 | 0.882353 |
d3c3d276986b71cc9d8aae788f2dcd9c3f2eb96a | 1,009 | py | Python | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
] | 726 | 2017-04-12T22:55:39.000Z | 2020-09-02T20:47:13.000Z | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
] | 248 | 2017-04-12T21:45:10.000Z | 2020-09-03T08:48:37.000Z | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
] | 140 | 2017-04-12T20:02:57.000Z | 2020-09-02T08:54:23.000Z | """Test API utilities."""
import json
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.gateway import Gateway
def test_constructor_timeout_passed_to_subprocess(monkeypatch):
"""Test that original timeout is passed to subprocess."""
capture = {}
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", timeout=20, psk="abc")
api.request(Gateway().get_devices())
assert capture["timeout"] == 20
def test_custom_timeout_passed_to_subprocess(monkeypatch):
"""Test that custom timeout is passed to subprocess."""
capture = {}
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", psk="abc")
api.request(Gateway().get_devices(), timeout=1)
assert capture["timeout"] == 1
| 28.027778 | 64 | 0.698712 |
d3c44721938c2e001d9a0ea64b9e887be6780370 | 1,293 | py | Python | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
] | null | null | null | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
] | null | null | null | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
] | 1 | 2021-02-16T03:06:38.000Z | 2021-02-16T03:06:38.000Z | # scrapes Townes van Zandt lyrics
# sample code so I don't have to remember all of this stuff
# the next time I want to source some verses
from bs4 import BeautifulSoup as soup
import requests
import string
punctuation_trans_table = str.maketrans("", "", string.punctuation)
base_url = "http://ippc2.orst.edu/coopl/lyrics/"
index = requests.get(base_url + "albums.html")
parsed_index = soup(index.text)
all_links = parsed_index.find_all("a") # get all <a> tags
links = [l for l in all_links if l.text] # filter out image links
def to_filename(s, path="texts/townes_van_zandt/"):
'''Quick and dirty snake-casing'''
s = s.replace("&", "and") # special case, "Poncho & Lefty"
s = strip_punctuation(s)
s = s.lower()
s = s.replace(" ", "_")
s = path + s + ".txt"
return s
| 28.108696 | 67 | 0.657386 |
d3c48e47d2fa33e8114041e17aa2a33b9c9c1809 | 895 | py | Python | chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
] | 3 | 2020-08-05T01:15:41.000Z | 2020-08-05T09:28:36.000Z | chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
] | null | null | null | chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
if elif else
Version: 0.1
Author: huijz
Date: 2020-08-24
"""
# 1:if
flag = False
name = 'huijz'
if name == 'python': # python
flag = True #
print 'welcome boss' #
else:
print name #
# 2:elif
num = 5
if num == 3: # num
print 'boss'
elif num == 2:
print 'user'
elif num == 1:
print 'worker'
elif num < 0: #
print 'error'
else:
print 'road' #
# 3if
num = 9
if 0 <= num <= 10: # 0~10
print 'hello'
# : hello
num = 10
if num < 0 or num > 10: # 010
print 'hello'
else:
print 'unDefine'
# : unDefine
num = 8
# 0~510~15
if (0 <= num <= 5) or (10 <= num <= 15):
print 'hello'
else:
print 'unDefine'
# : unDefine
# 4var = 100
var = 100
if var == 100: print " var 100"
print "Good bye!"
| 16.886792 | 41 | 0.606704 |
d3c5d75262328f54482b5a9f8b47cfdc49c36760 | 445 | py | Python | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
] | 39 | 2018-09-25T21:40:38.000Z | 2022-01-19T23:26:51.000Z | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
] | 22 | 2018-09-25T21:36:46.000Z | 2021-09-07T16:03:41.000Z | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
] | 9 | 2018-09-26T00:38:35.000Z | 2020-02-27T05:59:03.000Z | from setuptools import setup
from setuptools import find_packages
setup(
name="Jann",
version="4.0.0",
description="Jann is a Nearest Neighbour retrieval-based chatbot.",
author="Kory Mathewson",
author_email="korymath@gmail.com",
license="MIT",
url="https://github.com/korymath/jann",
packages=find_packages(),
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest"
],
)
| 20.227273 | 71 | 0.647191 |
d3c6c4df7fb938e9c1ce540f827eb9b023f7dd26 | 7,895 | py | Python | tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
] | 1 | 2018-03-26T08:15:21.000Z | 2018-03-26T08:15:21.000Z | tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
] | null | null | null | tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KE Version Rule Scanner Tests."""
import unittest
import mock
from tests import unittest_utils
from google.cloud.security.common.gcp_type import (
ke_cluster as ke_cluster_type)
from google.cloud.security.common.gcp_type import (
organization as organization_type)
from google.cloud.security.common.gcp_type import project as project_type
from google.cloud.security.scanner.scanners import ke_version_scanner
# pylint: disable=bad-indentation
if __name__ == '__main__':
unittest.main()
| 41.119792 | 76 | 0.500823 |
d3c71f0ccce66077dfdcd88c05a9aa625f2426c0 | 1,147 | py | Python | metrics/utils.py | edwardyehuang/iSeg | 256b0f7fdb6e854fe026fa8df41d9a4a55db34d5 | [
"MIT"
] | 4 | 2021-12-13T09:49:26.000Z | 2022-02-19T11:16:50.000Z | metrics/utils.py | edwardyehuang/iSeg | 256b0f7fdb6e854fe026fa8df41d9a4a55db34d5 | [
"MIT"
] | 1 | 2021-07-28T10:40:56.000Z | 2021-08-09T07:14:06.000Z | metrics/utils.py | edwardyehuang/iSeg | 256b0f7fdb6e854fe026fa8df41d9a4a55db34d5 | [
"MIT"
] | null | null | null | # ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
from iseg.metrics.seg_metric_wrapper import SegMetricWrapper
from iseg.metrics.mean_iou import MeanIOU
| 26.068182 | 118 | 0.569311 |
d3c7eb72e9d8627f04182ce89238416d18909674 | 1,436 | py | Python | src/core/stats.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
] | null | null | null | src/core/stats.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
] | null | null | null | src/core/stats.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
] | null | null | null | import math
def lognormal_mean(m, stddev):
""" compute mean of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: mean of log x
"""
return math.log(m) - (0.5 * math.log(1.0 + (stddev * stddev) / (m * m)))
def lognormal_stddev(m, stddev):
""" compute std. of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: std. of log x
"""
return math.sqrt(math.log((stddev * stddev) / (m * m) + 1))
def lognormal_underlying_mean(m, stddev):
""" compute mean of x with mean and std of log x
Args:
m: mean of log x
stddev: std of log x
Returns:
"""
# if m == 0 or stddev == 0:
# print '{}'.format('why ???')
# return 0
return math.exp(m + 0.5 * stddev * stddev)
def lognormal_underlying_stddev(m, stddev):
""" compute std of x with mean and std of log x
Args:
m: mean of log x
stddev: std of log x
Returns: std of x
"""
# if m == 0 or stddev == 0:
# print '{}'.format('strange why???')
# return 0
return math.sqrt((math.exp(stddev**2.0) - 1.0) *
math.exp(2.0*m + stddev**2.0))
#return lognormal_underlying_mean(m, stddev) * \
# math.sqrt((math.exp(stddev * stddev) - 1.0))
| 23.16129 | 77 | 0.521588 |
d3c82c2d822564092119880c7f993bd3fd1d721b | 5,720 | py | Python | vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | f28402d97cacd22b5e564003af72c4022908cb4d | [
"MIT"
] | null | null | null | vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | f28402d97cacd22b5e564003af72c4022908cb4d | [
"MIT"
] | 13 | 2020-01-28T22:30:33.000Z | 2022-03-02T14:57:16.000Z | vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | f28402d97cacd22b5e564003af72c4022908cb4d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Tag Highlighter:
# Author: A. S. Budden <abudden _at_ gmail _dot_ com>
# Copyright: Copyright (C) 2009-2013 A. S. Budden
# Permission is hereby granted to use and distribute this code,
# with or without modifications, provided that this copyright
# notice is copied with it. Like anything else that's free,
# the TagHighlight plugin is provided *as is* and comes with no
# warranty of any kind, either expressed or implied. By using
# this plugin, you agree that in no event will the copyright
# holder be liable for any damages resulting from the use
# of this software.
# ---------------------------------------------------------------------
import os
import glob
from .config import config
from .loaddata import LoadDataFile, LoadFile, GlobData
from .debug import Debug
| 38.648649 | 110 | 0.544755 |
d3c84323f9dc3dcd909b2c9adb14b8efc078f1c5 | 6,099 | py | Python | archive/bayes_sensor.py | robmarkcole/HASS-data-science | 7edd07a1519682683b42d140d6268a87d91522ec | [
"MIT"
] | 11 | 2018-01-21T02:37:02.000Z | 2022-01-20T03:32:40.000Z | archive/bayes_sensor.py | robmarkcole/HASS-data-science | 7edd07a1519682683b42d140d6268a87d91522ec | [
"MIT"
] | null | null | null | archive/bayes_sensor.py | robmarkcole/HASS-data-science | 7edd07a1519682683b42d140d6268a87d91522ec | [
"MIT"
] | 8 | 2017-12-19T14:05:33.000Z | 2021-12-08T09:54:06.000Z | """
Bayes sensor code split out from
https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/binary_sensor/bayesian.py
This module is used to explore the sensor.
"""
from collections import OrderedDict
from const import *
def update_probability(prior, prob_true, prob_false):
"""Update probability using Bayes' rule."""
numerator = prob_true * prior
denominator = numerator + prob_false * (1 - prior)
probability = numerator / denominator
return probability
def setup_platform(config):
"""Set up the Bayesian Binary sensor.
Modified from async_setup_platform."""
name = config[CONF_NAME]
observations = config[CONF_OBSERVATIONS]
prior = config[CONF_PRIOR]
probability_threshold = config[CONF_PROBABILITY_THRESHOLD]
device_class = config[CONF_DEVICE_CLASS]
return BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class)
| 33.510989 | 108 | 0.643384 |
d3c8f408394af973ef52e2eab96bcf7c6c3f5ac5 | 27,212 | py | Python | CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | afb7feff33ef86a673f899006aded486964f18dc | [
"MIT"
] | null | null | null | CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | afb7feff33ef86a673f899006aded486964f18dc | [
"MIT"
] | null | null | null | CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | afb7feff33ef86a673f899006aded486964f18dc | [
"MIT"
] | null | null | null | import os
import glob
import shutil
import logging
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage, misc, signal, spatial
from skimage.filters import gaussian, gabor_kernel
import cv2
import math
def gaussian2d(shape=(5, 5), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
| 32.785542 | 118 | 0.561921 |
d3c9a9f08cb2ab991b3fa5be8156332e24b37380 | 52 | py | Python | config/paths.py | fusic-com/flask-todo | 909ce22132ed081feca02e2fb255afa08b59611d | [
"MIT"
] | 34 | 2015-01-08T07:11:54.000Z | 2021-08-28T23:55:25.000Z | config/paths.py | spacecode-live/flask-todo | 909ce22132ed081feca02e2fb255afa08b59611d | [
"MIT"
] | null | null | null | config/paths.py | spacecode-live/flask-todo | 909ce22132ed081feca02e2fb255afa08b59611d | [
"MIT"
] | 13 | 2015-02-10T09:48:53.000Z | 2021-03-02T15:23:21.000Z | from settings import VAR_DIR
CACHE=VAR_DIR/'cache'
| 13 | 28 | 0.807692 |
d3c9f4c940421bb8e75ec41e434f5dfd39d574c9 | 1,687 | py | Python | Android.py | ChakradharG/Sudoku-Core | 5963db235cecec4cc6682380c30b7af10a3c4d11 | [
"MIT"
] | null | null | null | Android.py | ChakradharG/Sudoku-Core | 5963db235cecec4cc6682380c30b7af10a3c4d11 | [
"MIT"
] | 1 | 2022-02-10T07:19:40.000Z | 2022-02-10T07:19:40.000Z | Android.py | ChakradharG/Sudoku-Solver | 5963db235cecec4cc6682380c30b7af10a3c4d11 | [
"MIT"
] | null | null | null | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #To suppress warnings thrown by tensorflow
from time import sleep
import numpy as np
from cv2 import cv2
import pyautogui as pg
import Sudoku_Core as SC
import OCR
s = 513//9 #Size of board//9
fs = 25 #Size of the final image
if __name__ == '__main__':
main()
| 24.449275 | 109 | 0.653231 |
d3cb3a07ae3bcc910cc22e9e664b83887e73f8fe | 3,570 | py | Python | app/model.py | kurapikaaaa/CITS3403Project | 8958219845d5251830f2abd7c58dfd87d97b8c4a | [
"MIT"
] | 1 | 2021-08-04T12:50:57.000Z | 2021-08-04T12:50:57.000Z | app/model.py | kurapikaaaa/CITS3403Project | 8958219845d5251830f2abd7c58dfd87d97b8c4a | [
"MIT"
] | null | null | null | app/model.py | kurapikaaaa/CITS3403Project | 8958219845d5251830f2abd7c58dfd87d97b8c4a | [
"MIT"
] | 1 | 2021-08-12T10:40:28.000Z | 2021-08-12T10:40:28.000Z | from app import db, login
from flask_login import UserMixin
from datetime import datetime
from flask import url_for, redirect
from werkzeug.security import generate_password_hash, check_password_hash
| 30.254237 | 81 | 0.654622 |
d3cb4fc2b23e4f4fb2c765f3d7673f2b43240708 | 19,911 | py | Python | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
] | 1 | 2021-07-11T14:07:59.000Z | 2021-07-11T14:07:59.000Z | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
] | null | null | null | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/12_top.ipynb (unless otherwise specified).
__all__ = ['empty_tensor_handling_loss', 'nan_loss_handling', 'create_dummy_if_empty', 'BaseTop', 'SequenceLabel',
'Classification', 'PreTrain', 'Seq2Seq', 'MultiLabelClassification', 'MaskLM']
# Cell
import logging
from functools import partial
from typing import Dict, Tuple, Union
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
from transformers.modeling_tf_utils import TFSharedEmbeddings
from tensorflow_addons.layers.crf import CRF
from tensorflow_addons.text.crf import crf_log_likelihood
from .params import BaseParams
from .utils import gather_indexes
class BaseTop(tf.keras.Model):
# Cell
# Cell
# Cell
# Cell
# Cell
# Cell
| 42.095137 | 138 | 0.638491 |
d3cc0f069903b9e861ac782e53bdcec6efa743dd | 3,332 | py | Python | challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | import re
def evaluate_cancellation_code(cancellation_code: str, booking_time_before: int, stay_duration: int) -> float:
"""
gives a numerical value to given cancellation code, return expected fine in percentage
:return:
"""
cancellations = process_cancellation_code(cancellation_code)
p = min(7, booking_time_before)
chosen_p = min([lst for lst in cancellations if lst[0] > p], key=lambda tup: tup[0], default=[None, None, None])
expected_fine = 0 if chosen_p[0] is None else chosen_p[2] if chosen_p[1] is None else chosen_p[1] / stay_duration
return expected_fine
def no_show(cancellation_code: str) -> int:
"""
returns 1 if the cancellation code contains a no-show fee, and 0 otherwise
"""
cancellations = process_cancellation_code(cancellation_code)
return any(lst for lst in cancellations if lst[0] == 0)
def fine_after_x_days(cancellation_code: str, booking_time_before: int, stay_duration: int, days: int):
"""
returns the expected fine in percentages after 'days' days from reservation.
"""
time_before_reservation = booking_time_before - days
if time_before_reservation < 0:
return 0
cancellations = process_cancellation_code(cancellation_code)
# convert cancellation policy to format (Days, Percentage)
percentage_cancellations = []
for cancel in cancellations:
if cancel[1] is None:
percentage_cancellations.append((cancel[0], cancel[2]))
else:
percentage_cancellations.append((cancel[0], cancel[1] / stay_duration))
if not percentage_cancellations:
return 0
# return the fine associated with the smallest number of days larger than time_before_reservation
fines = [x for x in percentage_cancellations if x[0] > time_before_reservation]
if not fines:
return 0
return min(fines, key=lambda x: x[0])[1]
| 40.634146 | 117 | 0.680972 |
d3cd8f9eafbfda626f2013905a1df1f02a7ae23e | 1,163 | py | Python | acronym/scoring.py | sigma67/acronym | b197d12aa843fbf0e74efb67361f74b8157cc3e1 | [
"MIT"
] | 340 | 2018-03-30T21:00:54.000Z | 2022-03-25T20:05:45.000Z | acronym/scoring.py | sigma67/acronym | b197d12aa843fbf0e74efb67361f74b8157cc3e1 | [
"MIT"
] | 12 | 2018-03-30T15:48:05.000Z | 2020-07-16T08:27:02.000Z | acronym/scoring.py | sigma67/acronym | b197d12aa843fbf0e74efb67361f74b8157cc3e1 | [
"MIT"
] | 29 | 2018-03-30T16:55:34.000Z | 2022-02-25T03:20:26.000Z | import re
regex = re.compile('[^a-zA-Z]')
def score_acronym(capitalized_acronym, corpus=None):
"""
For each capitalized letter in the acronym:
* 10 points if first letter in a word (with exception of first letter)
* 3 point if second or last letter in a word
* 1 point otherwise
* N bonus points if begins an N-length valid sub-word
(ex: multiVariable -> 8 bonus points)
* 2 bonus points if immediately following a capitalizd letter
"""
return sum([score_word(word, corpus=corpus) for word in capitalized_acronym.split(' ')]) - 10
| 31.432432 | 97 | 0.575236 |
d3ce35364812f96b726436b7cd0cab140d019f97 | 956 | py | Python | e2e_test.py | bartossh/hebbian_mirror | 2d080ae7a707845e0922894e5cee2ad7b0119e8f | [
"MIT"
] | 2 | 2019-11-15T09:10:19.000Z | 2019-12-26T15:05:16.000Z | e2e_test.py | bartOssh/hebbian_mirror | 2d080ae7a707845e0922894e5cee2ad7b0119e8f | [
"MIT"
] | 1 | 2019-11-07T11:06:09.000Z | 2019-11-07T11:06:09.000Z | e2e_test.py | bartOssh/hebbian_mirror | 2d080ae7a707845e0922894e5cee2ad7b0119e8f | [
"MIT"
] | null | null | null | import requests
num_of_iter = 2
data = open('./assets/test.jpg', 'rb').read()
for i in range(0, num_of_iter):
res = requests.get(
url='http://0.0.0.0:8000/recognition/object/boxes_names'
)
print("\n RESPONSE GET boxes names for test number {}: \n {}"
.format(i, res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/boxes',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to boxes, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/image',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to image, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res))
| 43.454545 | 94 | 0.58159 |
d3ce5432cde433f90fde37eb3f5e56f8a23b111c | 7,698 | py | Python | appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | b075ce1553492be7088026b67f525a529bf03770 | [
"Apache-2.0"
] | 7 | 2020-11-21T03:45:34.000Z | 2022-03-25T00:40:20.000Z | appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | b075ce1553492be7088026b67f525a529bf03770 | [
"Apache-2.0"
] | null | null | null | appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | b075ce1553492be7088026b67f525a529bf03770 | [
"Apache-2.0"
] | 5 | 2020-09-21T15:07:21.000Z | 2021-06-02T20:25:36.000Z | import argparse
import os
import torch
import torch.nn as nn
from torchtext.data import TabularDataset, BucketIterator
from torchtext.data import Field
from torchtext.vocab import Vectors, GloVe
from tqdm import tqdm, trange
import sys
import os
sys.path.insert(0, "../../pyfunctor")
sys.path.insert(0, "../../model")
from cnn import CNNModel
from lstm import LSTMModel
from bilstm import BILSTMModel
from sklearn import metrics
import csv_handler as csv_handler
import transform as transform
import time
#from util.weight import WeightClassCSV
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == "__main__":
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",
default=None,
type=str,
required=True,
help="Dataset folder")
parser.add_argument("--model",
default=None,
type=str,
required=True,
help="Model type: CNN, LSTM or BILSTM")
parser.add_argument("--glove",
default="840B",
type=str,
help="Golve version (6B, 42B, 840B)")
parser.add_argument("--emb_size",
default=300,
type=int,
help="Golve embedding size (100, 200, 300)")
parser.add_argument("--max_seq_length",
default=256,
type=int,
help="Maximum sequence length")
parser.add_argument("--num_epoch",
default=9,
type=int,
help="Number of training epoch")
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size")
parser.add_argument("--lr",
default=1e-4,
type=float,
help="Learning rate")
parser.add_argument("--fix_emb",
default=False,
type=bool,
help="Fix embedding layer")
parser.add_argument("--log_file",
default=False,
type=str,
required=True,
help="log file path")
args = parser.parse_args()
# Load data
print("Loading data ...")
train_iter, test_iter, vocab_size, vocab_weights = load_data(args.dataset,
args.batch_size, args.max_seq_length, glove=args.glove, emb_size=args.emb_size)
# Initialize model
assert args.model in ["CNN", "LSTM", "BILSTM"], "Only support CNN, LSTM or BILSTM."
if args.model == "CNN":
model = CNNModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
elif args.model == "LSTM":
model = LSTMModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
else:
model = BILSTMModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
model = model.to(device)
# Train
print("Training %s ..." % args.model)
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
loss_func = nn.CrossEntropyLoss()
#label_weight = WeightClassCSV(args.dataset + "/train.csv").get_weights(['0', '1'])
#loss_func = nn.CrossEntropyLoss(weight = torch.tensor(label_weight).to(device))
model.train()
for epoch in trange(args.num_epoch, desc="Epoch"):
total_loss = 0
for idx, batch in enumerate(tqdm(train_iter, desc="Iteration")):
inputs, labels = batch.sent, batch.label
inputs = inputs.to(device)
labels = labels.to(device)
logits = model(inputs)
loss = loss_func(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("\tEpoch %d, total loss: %f" % (epoch, total_loss))
train_finish_time = time.time()
train_overall_time = train_finish_time - start_time
# Evaluate
print("Evaluating ...")
model.eval()
predicts = []
golds = []
predicted_proba = []
with torch.no_grad():
for idx, batch in enumerate(tqdm(test_iter, desc="Iteration")):
inputs, labels = batch.sent, batch.label
inputs = inputs.to(device)
logits = model(inputs)
predicted_proba += list(logits.data.cpu().numpy())
predict = torch.argmax(logits, dim=1).data.cpu().numpy()
predicts += list(predict)
golds += list(labels.data.cpu().numpy())
precision, recall, f1 = F1(predicts, golds)
print("Precision: %f, Recall: %f, F1: %f" % (precision, recall, f1))
train_time = train_overall_time
test_time = time.time() - train_finish_time
print(metrics.classification_report(golds, predicts))
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(golds, predicts)
log_row = []
log_row.append(args.dataset)
log_row.append(precision[1])
log_row.append(recall[1])
log_row.append(fscore[1])
log_row.append(train_time)
log_row.append(test_time)
pos_predicted = transform.map_func(predicted_proba, lambda p : p[1])
auc = metrics.roc_auc_score(golds, pos_predicted)
log_row.append(auc)
accuracy = metrics.accuracy_score(golds, predicts)
log_row.append(accuracy)
csv_handler.append_row(args.log_file, log_row)
| 34.990909 | 99 | 0.587685 |
d3ce6f7210df816909e214cda327fef650ba334a | 1,566 | py | Python | setup.py | teamproserve/pinkopy | 48842ac26aff90728482f7cac2977f56d5fc579f | [
"MIT"
] | null | null | null | setup.py | teamproserve/pinkopy | 48842ac26aff90728482f7cac2977f56d5fc579f | [
"MIT"
] | null | null | null | setup.py | teamproserve/pinkopy | 48842ac26aff90728482f7cac2977f56d5fc579f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
with open('README.md') as f:
readme = f.read()
install_requires = [
'cachetools>=1.1.5',
'requests>=2.7.0',
'xmltodict>=0.9.2',
]
tests_require = [
'pytest',
'requests-mock==0.7.0'
]
setup(
name='pinkopy',
version='2.1.3-dev',
description='Python wrapper for Commvault api',
long_description=readme,
author='Herkermer Sherwood',
author_email='theherk@gmail.com',
url='https://github.com/teamproserve/pinkopy',
download_url='https://github.com/teamproserve/pinkopy/archive/2.1.3-dev.zip',
packages=find_packages(),
platforms=['all'],
license='MIT',
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_require,
classifiers=[
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
| 27.964286 | 81 | 0.621328 |
d3cf16476f10f947ae96e115903c726cd418feaf | 24,050 | py | Python | scss/extension/core.py | xen0n/pyScss | 86712d21fe7c3abdd7e593973fb35010422f1a41 | [
"MIT"
] | null | null | null | scss/extension/core.py | xen0n/pyScss | 86712d21fe7c3abdd7e593973fb35010422f1a41 | [
"MIT"
] | null | null | null | scss/extension/core.py | xen0n/pyScss | 86712d21fe7c3abdd7e593973fb35010422f1a41 | [
"MIT"
] | null | null | null | """Extension for built-in Sass functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from itertools import product
import math
import os.path
from pathlib import PurePosixPath
from six.moves import xrange
from scss.extension import Extension
from scss.namespace import Namespace
from scss.source import SourceFile
from scss.types import (
Arglist, Boolean, Color, List, Null, Number, String, Map, expect_type)
# Alias to make the below declarations less noisy
ns = CoreExtension.namespace
# ------------------------------------------------------------------------------
# Color creation
# ------------------------------------------------------------------------------
# Color inspection
# ------------------------------------------------------------------------------
# Color modification
def _scale_channel(channel, scaleby):
if scaleby is None:
return channel
expect_type(scaleby, Number)
if not scaleby.is_simple_unit('%'):
raise ValueError("Expected percentage, got %r" % (scaleby,))
factor = scaleby.value / 100
if factor > 0:
# Add x% of the remaining range, up to 1
return channel + (1 - channel) * factor
else:
# Subtract x% of the existing channel. We add here because the factor
# is already negative
return channel * (1 + factor)
# ------------------------------------------------------------------------------
# String functions
# TODO this and several others should probably also require integers
# TODO and assert that the indexes are valid
# ------------------------------------------------------------------------------
# Number functions
ns.set_function('abs', 1, Number.wrap_python_function(abs))
ns.set_function('round', 1, Number.wrap_python_function(round))
ns.set_function('ceil', 1, Number.wrap_python_function(math.ceil))
ns.set_function('floor', 1, Number.wrap_python_function(math.floor))
# ------------------------------------------------------------------------------
# List functions
# TODO get the compass bit outta here
# TODO get the compass bit outta here
# TODO need a way to use "list" as the arg name without shadowing the builtin
# ------------------------------------------------------------------------------
# Map functions
# DEVIATIONS: these do not exist in ruby sass
# ------------------------------------------------------------------------------
# Meta functions
# ------------------------------------------------------------------------------
# Miscellaneous
| 27.052868 | 80 | 0.614012 |
d3d01a6d5b6d4e91e1847f49e77097d90f67ce9c | 906 | py | Python | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | from pypy.module.cpyext.test.test_api import BaseApiTest
| 39.391304 | 62 | 0.684327 |
d3d143abd1287d1ebf9fec072e925b1f0bce15d1 | 21,407 | py | Python | capsule_em/experiment.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
] | 6 | 2019-12-16T04:23:57.000Z | 2021-12-09T14:17:14.000Z | capsule_em/experiment.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:19:53.000Z | 2022-02-10T00:39:26.000Z | capsule_em/experiment.py | ZachT1711/google-research | 662e6837a3efa0c40b11cb4122447c4b028d2115 | [
"Apache-2.0"
] | 1 | 2020-03-05T09:24:01.000Z | 2020-03-05T09:24:01.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The runners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow as tf
from capsule_em import model as f_model
from capsule_em.mnist \
import mnist_record
from capsule_em.norb \
import norb_record
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.python import debug as tf_debug
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_prime_capsules', 32,
'Number of first layer capsules.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate')
tf.app.flags.DEFINE_integer('routing_iteration', 3,
'Number of iterations for softmax routing')
tf.app.flags.DEFINE_float(
'routing_rate', 1,
'ratio for combining routing logits and routing feedback')
tf.app.flags.DEFINE_float('decay_rate', 0.96, 'ratio for learning rate decay')
tf.app.flags.DEFINE_integer('decay_steps', 20000,
'number of steps for learning rate decay')
tf.app.flags.DEFINE_bool('normalize_kernels', False,
'Normalize the capsule weight kernels')
tf.app.flags.DEFINE_integer('num_second_atoms', 16,
'number of capsule atoms for the second layer')
tf.app.flags.DEFINE_integer('num_primary_atoms', 16,
'number of capsule atoms for the first layer')
tf.app.flags.DEFINE_integer('num_start_conv', 32,
'number of channels for the start layer')
tf.app.flags.DEFINE_integer('kernel_size', 5,
'kernel size for the start layer.')
tf.app.flags.DEFINE_integer(
'routing_iteration_prime', 1,
'number of routing iterations for primary capsules.')
tf.app.flags.DEFINE_integer('max_steps', 2000000,
'Number of steps to run trainer.')
tf.app.flags.DEFINE_string('data_dir', '/datasets/mnist/',
'Directory for storing input data')
tf.app.flags.DEFINE_string('summary_dir',
'/tmp/tensorflow/mnist/logs/mnist_with_summaries',
'Summaries log directory')
tf.app.flags.DEFINE_bool('train', True, 'train or test.')
tf.app.flags.DEFINE_integer(
'checkpoint_steps', 1500,
'number of steps before saving a training checkpoint.')
tf.app.flags.DEFINE_bool('verbose_image', False, 'whether to show images.')
tf.app.flags.DEFINE_bool('multi', True,
'whether to use multiple digit dataset.')
tf.app.flags.DEFINE_bool('eval_once', False,
'whether to evaluate once on the ckpnt file.')
tf.app.flags.DEFINE_integer('eval_size', 24300,
'number of examples to evaluate.')
tf.app.flags.DEFINE_string(
'ckpnt',
'/tmp/tensorflow/mnist/logs/mnist_with_summaries/train/model.ckpnt',
'The checkpoint to load and evaluate once.')
tf.app.flags.DEFINE_integer('keep_ckpt', 5, 'number of examples to evaluate.')
tf.app.flags.DEFINE_bool(
'clip_lr', False, 'whether to clip learning rate to not go bellow 1e-5.')
tf.app.flags.DEFINE_integer('stride_1', 2,
'stride for the first convolutinal layer.')
tf.app.flags.DEFINE_integer('kernel_2', 9,
'kernel size for the secon convolutinal layer.')
tf.app.flags.DEFINE_integer('stride_2', 2,
'stride for the second convolutinal layer.')
tf.app.flags.DEFINE_string('padding', 'VALID',
'the padding method for conv layers.')
tf.app.flags.DEFINE_integer('extra_caps', 2, 'number of extra conv capsules.')
tf.app.flags.DEFINE_string('caps_dims', '32,32',
'output dim for extra conv capsules.')
tf.app.flags.DEFINE_string('caps_strides', '2,1',
'stride for extra conv capsules.')
tf.app.flags.DEFINE_string('caps_kernels', '3,3',
'kernel size for extra conv capsuls.')
tf.app.flags.DEFINE_integer('extra_conv', 0, 'number of extra conv layers.')
tf.app.flags.DEFINE_string('conv_dims', '', 'output dim for extra conv layers.')
tf.app.flags.DEFINE_string('conv_strides', '', 'stride for extra conv layers.')
tf.app.flags.DEFINE_string('conv_kernels', '',
'kernel size for extra conv layers.')
tf.app.flags.DEFINE_bool('leaky', False, 'Use leaky routing.')
tf.app.flags.DEFINE_bool('staircase', False, 'Use staircase decay.')
tf.app.flags.DEFINE_integer('num_gpus', 1, 'number of gpus to train.')
tf.app.flags.DEFINE_bool('adam', True, 'Use Adam optimizer.')
tf.app.flags.DEFINE_bool('pooling', False, 'Pooling after convolution.')
tf.app.flags.DEFINE_bool('use_caps', True, 'Use capsule layers.')
tf.app.flags.DEFINE_integer(
'extra_fc', 512, 'number of units in the extra fc layer in no caps mode.')
tf.app.flags.DEFINE_bool('dropout', False, 'Dropout before last layer.')
tf.app.flags.DEFINE_bool('tweak', False, 'During eval recons from tweaked rep.')
tf.app.flags.DEFINE_bool('softmax', False, 'softmax loss in no caps.')
tf.app.flags.DEFINE_bool('c_dropout', False, 'dropout after conv capsules.')
tf.app.flags.DEFINE_bool(
'distort', True,
'distort mnist images by cropping to 24 * 24 and rotating by 15 degrees.')
tf.app.flags.DEFINE_bool('restart', False, 'Clean train checkpoints.')
tf.app.flags.DEFINE_bool('use_em', True,
'If set use em capsules with em routing.')
tf.app.flags.DEFINE_float('final_beta', 0.01, 'Temperature at the sigmoid.')
tf.app.flags.DEFINE_bool('eval_ensemble', False, 'eval over aggregated logits.')
tf.app.flags.DEFINE_string('part1', 'ok', 'ok')
tf.app.flags.DEFINE_string('part2', 'ok', 'ok')
tf.app.flags.DEFINE_bool('debug', False, 'If set use tfdbg wrapper.')
tf.app.flags.DEFINE_bool('reduce_mean', False,
'If set normalize mean of each image.')
tf.app.flags.DEFINE_float('loss_rate', 1.0,
'classification to regularization rate.')
tf.app.flags.DEFINE_integer('batch_size', 64, 'Batch size.')
tf.app.flags.DEFINE_integer('norb_pixel', 48, 'Batch size.')
tf.app.flags.DEFINE_bool('patching', True, 'If set use patching for eval.')
tf.app.flags.DEFINE_string('data_set', 'norb', 'the data set to use.')
tf.app.flags.DEFINE_string('cifar_data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_string('norb_data_dir', '/tmp/smallNORB/',
"""Path to the norb data directory.""")
tf.app.flags.DEFINE_string('affnist_data_dir', '/tmp/affnist_data',
"""Path to the affnist data directory.""")
num_classes = {
'mnist': 10,
'cifar10': 10,
'mnist_multi': 10,
'svhn': 10,
'affnist': 10,
'expanded_mnist': 10,
'norb': 5,
}
def get_features(train, total_batch):
"""Return batched inputs."""
print(FLAGS.data_set)
batch_size = total_batch // max(1, FLAGS.num_gpus)
split = 'train' if train else 'test'
features = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/cpu:0'):
with tf.name_scope('input_tower_%d' % (i)):
if FLAGS.data_set == 'norb':
features += [
norb_record.inputs(
train_dir=FLAGS.norb_data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
image_pixel=FLAGS.norb_pixel,
distort=FLAGS.distort,
patching=FLAGS.patching,
)
]
elif FLAGS.data_set == 'affnist':
features += [
mnist_record.inputs(
train_dir=FLAGS.affnist_data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
shift=0,
height=40,
train_file='test.tfrecords')
]
elif FLAGS.data_set == 'expanded_mnist':
features += [
mnist_record.inputs(
train_dir=FLAGS.data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
height=40,
train_file='train_6shifted_6padded_mnist.tfrecords',
shift=6)
]
else:
if train and not FLAGS.distort:
shift = 2
else:
shift = 0
features += [
mnist_record.inputs(
train_dir=FLAGS.data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
shift=shift,
distort=FLAGS.distort)
]
print(features)
return features
def run_training():
"""Train."""
with tf.Graph().as_default():
# Input images and labels.
features = get_features(True, FLAGS.batch_size)
model = f_model.multi_gpu_model
print('so far so good!')
result = model(features)
param_stats = contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
sys.stdout.write('total_params: %d\n' % param_stats.total_parameters)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
merged = result['summary']
train_step = result['train']
# test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type='curses')
sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
saver = tf.train.Saver(max_to_keep=FLAGS.keep_ckpt)
if tf.gfile.Exists(FLAGS.summary_dir + '/train'):
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
print(ckpt)
if (not FLAGS.restart) and ckpt and ckpt.model_checkpoint_path:
print('hesllo')
saver.restore(sess, ckpt.model_checkpoint_path)
prev_step = int(
ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
else:
print('what??')
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/train')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/train')
prev_step = 0
else:
tf.gfile.MakeDirs(FLAGS.summary_dir + '/train')
prev_step = 0
train_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/train',
sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
for i in range(prev_step, FLAGS.max_steps):
step += 1
summary, _ = sess.run([merged, train_step])
train_writer.add_summary(summary, i)
if (i + 1) % FLAGS.checkpoint_steps == 0:
saver.save(
sess,
os.path.join(FLAGS.summary_dir + '/train', 'model.ckpt'),
global_step=i + 1)
except tf.errors.OutOfRangeError:
print('Done training for %d steps.' % step)
finally:
# When done, ask the threads to stop.
coord.request_stop()
train_writer.close()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def run_eval():
"""Evaluate on test or validation."""
with tf.Graph().as_default():
# Input images and labels.
features = get_features(False, 5)
model = f_model.multi_gpu_model
result = model(features)
merged = result['summary']
correct_prediction_sum = result['correct']
almost_correct_sum = result['almost']
saver = tf.train.Saver()
test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test')
seen_step = -1
time.sleep(3 * 60)
paused = 0
while paused < 360:
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoin
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
time.sleep(2 * 60)
paused += 2
continue
while seen_step == int(global_step):
time.sleep(2 * 60)
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
paused += 2
if paused > 360:
test_writer.close()
return
paused = 0
seen_step = int(global_step)
print(seen_step)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver.restore(sess, ckpt.model_checkpoint_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
total_tp = 0
total_almost = 0
for i in range(FLAGS.eval_size // 5):
summary_j, tp, almost = sess.run(
[merged, correct_prediction_sum, almost_correct_sum])
total_tp += tp
total_almost += almost
total_false = FLAGS.eval_size - total_tp
total_almost_false = FLAGS.eval_size - total_almost
summary_tp = tf.Summary.FromString(summary_j)
summary_tp.value.add(tag='correct_prediction', simple_value=total_tp)
summary_tp.value.add(tag='wrong_prediction', simple_value=total_false)
summary_tp.value.add(
tag='almost_wrong_prediction', simple_value=total_almost_false)
test_writer.add_summary(summary_tp, global_step)
print('write done')
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
test_writer.close()
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def eval_ensemble(ckpnts):
"""Evaluate on an ensemble of checkpoints."""
with tf.Graph().as_default():
first_features = get_features(False, 100)[0]
h = first_features['height']
d = first_features['depth']
features = {
'images': tf.placeholder(tf.float32, shape=(100, d, h, h)),
'labels': tf.placeholder(tf.float32, shape=(100, 10)),
'recons_image': tf.placeholder(tf.float32, shape=(100, d, h, h)),
'recons_label': tf.placeholder(tf.int32, shape=(100)),
'height': first_features['height'],
'depth': first_features['depth']
}
model = f_model.multi_gpu_model
result = model([features])
logits = result['logits']
config = tf.ConfigProto(allow_soft_placement=True)
# saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpnt))
batch_logits = np.zeros((FLAGS.eval_size // 100, 100, 10), dtype=np.float32)
batch_recons_label = np.zeros((FLAGS.eval_size // 100, 100),
dtype=np.float32)
batch_labels = np.zeros((FLAGS.eval_size // 100, 100, 10), dtype=np.float32)
batch_images = np.zeros((FLAGS.eval_size // 100, 100, d, h, h),
dtype=np.float32)
batch_recons_image = np.zeros((FLAGS.eval_size // 100, 100, d, h, h),
dtype=np.float32)
saver = tf.train.Saver()
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for i in range(FLAGS.eval_size // 100):
(batch_recons_label[i, Ellipsis], batch_labels[i, Ellipsis], batch_images[i, Ellipsis],
batch_recons_image[i, Ellipsis]) = sess.run([
first_features['recons_label'], first_features['labels'],
first_features['images'], first_features['recons_image']
])
for ckpnt in ckpnts:
saver.restore(sess, ckpnt)
for i in range(FLAGS.eval_size // 100):
logits_i = sess.run(
logits,
feed_dict={
features['recons_label']: batch_recons_label[i, Ellipsis],
features['labels']: batch_labels[i, Ellipsis],
features['images']: batch_images[i, Ellipsis],
features['recons_image']: batch_recons_image[i, Ellipsis]
})
# batch_logits[i, ...] += softmax(logits_i)
batch_logits[i, Ellipsis] += logits_i
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
batch_pred = np.argmax(batch_logits, axis=2)
total_wrong = np.sum(np.not_equal(batch_pred, batch_recons_label))
print(total_wrong)
def eval_once(ckpnt):
"""Evaluate on one checkpoint once."""
ptches = np.zeros((14, 14, 32, 32))
for i in range(14):
for j in range(14):
ind_x = i * 2
ind_y = j * 2
for k in range(5):
for h in range(5):
ptches[i, j, ind_x + k, ind_y + h] = 1
ptches = np.reshape(ptches, (14 * 14, 32, 32))
with tf.Graph().as_default():
features = get_features(False, 1)[0]
if FLAGS.patching:
features['images'] = features['cc_images']
features['recons_label'] = features['cc_recons_label']
features['labels'] = features['cc_labels']
model = f_model.multi_gpu_model
result = model([features])
# merged = result['summary']
correct_prediction_sum = result['correct']
# almost_correct_sum = result['almost']
# mid_act = result['mid_act']
logits = result['logits']
saver = tf.train.Saver()
test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test_once')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.3
sess = tf.Session(config=config)
# saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpnt))
saver.restore(sess, ckpnt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
i = 0
try:
total_tp = 0
for i in range(FLAGS.eval_size):
#, g_ac, ac
lb, tp, lg = sess.run([
features['recons_label'],
correct_prediction_sum,
logits,
])
if FLAGS.patching:
batched_lg = np.sum(lg / np.sum(lg, axis=1, keepdims=True), axis=0)
batch_pred = np.argmax(batched_lg)
tp = np.equal(batch_pred, lb[0])
total_tp += tp
total_false = FLAGS.eval_size - total_tp
print('false:{}, true:{}'.format(total_false, total_tp))
# summary_tp = tf.Summary.FromString(summary_j)
# summary_tp.value.add(tag='correct_prediction', simple_value=total_tp)
# summary_tp.value.add(tag='wrong_prediction', simple_value=total_false)
# summary_tp.value.add(
# tag='almost_wrong_prediction', simple_value=total_almost_false)
# test_writer.add_summary(summary_tp, i + 1)
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
test_writer.close()
if __name__ == '__main__':
tf.app.run()
| 40.390566 | 95 | 0.633998 |
d3d3a5b087e35b140a4cca72077a3d96a9f4d93b | 42,865 | py | Python | grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | MikeAT/visualizer | 946b98d82eaf7ec508861115585afd683fc49e5c | [
"MIT"
] | 6 | 2021-03-03T17:52:24.000Z | 2022-02-10T11:45:22.000Z | grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | Acidburn0zzz/visualizer | 20fba91f0d26b98531f97f643c8329640d1c0d11 | [
"MIT"
] | 1 | 2021-04-29T12:34:04.000Z | 2021-04-29T14:50:17.000Z | grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | Acidburn0zzz/visualizer | 20fba91f0d26b98531f97f643c8329640d1c0d11 | [
"MIT"
] | 2 | 2021-04-27T14:02:03.000Z | 2021-11-12T10:34:32.000Z | # Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Aggregation client subnet statistics
import textwrap
import grafanalib.core as GCore
import grafanacommon as GCommon
| 46.04189 | 133 | 0.30778 |
d3d41214e53cc3ba9f42c3c82841438366d8ce1d | 2,812 | py | Python | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | 1 | 2017-10-29T06:18:35.000Z | 2017-10-29T06:18:35.000Z | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | null | null | null | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | null | null | null | """
Module to train a simple MLP for demo.
"""
from jobman.tools import expand
from jobman.tools import flatten
import logging
import nice_experiment
import numpy as np
from os import path
from pylearn2.config import yaml_parse
from pylearn2.neuroimaging_utils.datasets import MRI
from pylearn2.neuroimaging_utils.dataset_utils import mri_nifti
from pylearn2.scripts.jobman.experiment import ydict
from pylearn2.utils import serial
logging.basicConfig(format="[%(module)s:%(levelname)s]:%(message)s")
logger = logging.getLogger(__name__)
yaml_file = nice_experiment.yaml_file
if __name__ == "__main__":
parser = nice_experiment.make_argument_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
main(args)
| 34.292683 | 91 | 0.629445 |
d3d4d6dda5b57fc2a5aa6672a9cd0393e4d62ee6 | 6,199 | py | Python | _Framework/Layer.py | isfopo/MacroPushScript | 46c440aa3f6325d8767e88252c5520d76a9fa634 | [
"MIT"
] | null | null | null | _Framework/Layer.py | isfopo/MacroPushScript | 46c440aa3f6325d8767e88252c5520d76a9fa634 | [
"MIT"
] | null | null | null | _Framework/Layer.py | isfopo/MacroPushScript | 46c440aa3f6325d8767e88252c5520d76a9fa634 | [
"MIT"
] | null | null | null | #Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Framework/Layer.py
u"""
Module implementing a way to resource-based access to controls in an
unified interface dynamic.
"""
from __future__ import absolute_import, print_function, unicode_literals
from builtins import str
from builtins import object
from future.utils import raise_
from itertools import repeat
from .ControlElement import ControlElementClient
from .Util import nop
from .Resource import ExclusiveResource, CompoundResource
from .Disconnectable import Disconnectable
| 33.874317 | 139 | 0.658816 |
d3d6cda09c480bcbc5eba0a289993557df421803 | 1,529 | py | Python | src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | b71462d308b8a4adbc370691bf085d44666914d7 | [
"MIT"
] | 5 | 2020-04-22T12:08:07.000Z | 2021-09-03T01:56:44.000Z | src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | b71462d308b8a4adbc370691bf085d44666914d7 | [
"MIT"
] | 1 | 2020-09-16T11:29:15.000Z | 2020-09-16T11:29:15.000Z | src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | b71462d308b8a4adbc370691bf085d44666914d7 | [
"MIT"
] | 1 | 2020-09-16T14:05:08.000Z | 2020-09-16T14:05:08.000Z | # retrieve_exons_sequence_genomes.py
# This script is to retrieve exons from sequenced genomes which are also present in the reference genome (A. thaliana).
# To identify the contigs from the sequenced genomes, each contig has to be retrieved from A. thaliana first.
# Then, for each sequence query of A. thaliana, the query can be BLAT against the database reference.
# In this case, the database reference will be S. irio and A. lyrata.
# Made by: Elfy Ly
# Date: 19 May 2020
import os
from Bio import SeqIO
path_to_at_exons_dir = "/mnt/c/Users/elfyl/PycharmProjects/brassicaceae-hybseq-pipeline-offline/results/exons"
path_to_at_dir = "/mnt/c/Users/elfyl/PycharmProjects/brassicaceae-hybseq-pipeline-offline/data/reference_genomes"
path_to_at_reference = path_to_at_dir + "/ref-at.fasta"
# Create exons_AT Directory if don't exist
if not os.path.exists(path_to_at_exons_dir):
os.mkdir(path_to_at_exons_dir)
print("Directory ", path_to_at_exons_dir, " Created ")
else:
print("Directory ", path_to_at_exons_dir, " already exists")
# Create new files for every sequence query of the reference genome A. thaliana
count_id = 0
for seq_record in SeqIO.parse(path_to_at_reference, "fasta"):
f = open(path_to_at_exons_dir + "/" + seq_record.id + ".txt", "w+")
print("New text file created: " + seq_record.id + ".fa")
seq_id = seq_record.id
seq_seq = str(seq_record.seq)
f.write(">" + seq_id + "\n" + seq_seq)
f.close()
count_id += 1
print("Number of sequence records: " + str(count_id))
| 41.324324 | 119 | 0.743623 |
d3d7b8b121c459940512749ce36dcfbad947c964 | 1,136 | py | Python | lexical/lexical.py | xmeng17/Malicious-URL-Detection | f286aeb50570455486b470cbc2db9aa0fae99b8f | [
"MIT"
] | null | null | null | lexical/lexical.py | xmeng17/Malicious-URL-Detection | f286aeb50570455486b470cbc2db9aa0fae99b8f | [
"MIT"
] | null | null | null | lexical/lexical.py | xmeng17/Malicious-URL-Detection | f286aeb50570455486b470cbc2db9aa0fae99b8f | [
"MIT"
] | null | null | null | import re
| 26.418605 | 70 | 0.617958 |
d3d87b798d29e52210031306b4e2f4fee10a8cd2 | 992 | py | Python | stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 0cf1df67b4ae5aeda5845442c84905909101c238 | [
"BSD-2-Clause"
] | 1 | 2021-11-06T17:01:01.000Z | 2021-11-06T17:01:01.000Z | stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 0cf1df67b4ae5aeda5845442c84905909101c238 | [
"BSD-2-Clause"
] | null | null | null | stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 0cf1df67b4ae5aeda5845442c84905909101c238 | [
"BSD-2-Clause"
] | 1 | 2021-11-06T17:00:53.000Z | 2021-11-06T17:00:53.000Z | import unittest
from ....providers.aws.interactive import requires_replacement
| 30.060606 | 79 | 0.634073 |
d3d9152a0002e3e05bd42184c7b5ca8570672123 | 1,046 | py | Python | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
] | 2 | 2017-03-03T20:37:29.000Z | 2018-06-01T22:22:15.000Z | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
] | null | null | null | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
] | 2 | 2018-01-26T07:11:42.000Z | 2019-03-06T23:30:39.000Z | from setuptools import setup, find_packages
setup(
name='digicert-express',
version='1.1dev2',
description='Express Install for DigiCert, Inc.',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Topic :: Security',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
url='https://github.com/digicert/digicert_express',
author='DigiCert, Inc.',
author_email='support@digicert.com',
license='MIT',
zip_safe=False,
packages=find_packages(exclude=['tests.*', '*.tests.*', '*.tests', 'tests', 'scripts']),
include_package_data=True,
install_requires=[
'python-augeas',
'requests>=2.8.1',
'ndg-httpsclient',
'pyasn1',
'pyOpenSSL' # prefer OS install but we can try here, too
],
)
| 29.885714 | 92 | 0.605163 |
d3d9c66d64ebac5543f8099d6066695442fe0072 | 11,392 | py | Python | pytorch/plane.py | NunoEdgarGFlowHub/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
] | 83 | 2019-04-12T10:23:23.000Z | 2022-03-30T12:40:43.000Z | pytorch/plane.py | sten2lu/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
] | null | null | null | pytorch/plane.py | sten2lu/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
] | 11 | 2019-04-12T11:26:00.000Z | 2020-05-12T01:14:21.000Z | import argparse
import json
import numpy as np
import os
import torch
import data_
import models
import utils
from matplotlib import cm, pyplot as plt
from tensorboardX import SummaryWriter
from torch import optim
from torch.utils import data
from tqdm import tqdm
from utils import io
parser = argparse.ArgumentParser()
# CUDA
parser.add_argument('--use_gpu', type=bool, default=True, help='Whether to use GPU.')
# data
parser.add_argument('--dataset_name', type=str, default='spirals',
help='Name of dataset to use.')
parser.add_argument('--n_data_points', default=int(1e6),
help='Number of unique data points in training set.')
parser.add_argument('--batch_size', type=int, default=256,
help='Size of batch used for training.')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers used in data loaders.')
# MADE
parser.add_argument('--n_residual_blocks_made', default=4,
help='Number of residual blocks in MADE.')
parser.add_argument('--hidden_dim_made', default=256,
help='Dimensionality of hidden layers in MADE.')
parser.add_argument('--activation_made', default='relu',
help='Activation function for MADE.')
parser.add_argument('--use_batch_norm_made', default=False,
help='Whether to use batch norm in MADE.')
parser.add_argument('--dropout_probability_made', default=None,
help='Dropout probability for MADE.')
# energy net
parser.add_argument('--context_dim', default=64,
help='Dimensionality of context vector.')
parser.add_argument('--n_residual_blocks_energy_net', default=4,
help='Number of residual blocks in energy net.')
parser.add_argument('--hidden_dim_energy_net', default=128,
help='Dimensionality of hidden layers in energy net.')
parser.add_argument('--energy_upper_bound', default=0,
help='Max value for output of energy net.')
parser.add_argument('--activation_energy_net', default='relu',
help='Activation function for energy net.')
parser.add_argument('--use_batch_norm_energy_net', default=False,
help='Whether to use batch norm in energy net.')
parser.add_argument('--dropout_probability_energy_net', default=None,
help='Dropout probability for energy net.')
parser.add_argument('--scale_activation', default='softplus',
help='Activation to use for scales in proposal mixture components.')
parser.add_argument('--apply_context_activation', default=False,
help='Whether to apply activation to context vector.')
# proposal
parser.add_argument('--n_mixture_components', default=10,
help='Number of proposal mixture components (per dimension).')
parser.add_argument('--proposal_component', default='gaussian',
help='Type of location-scale family distribution '
'to use in proposal mixture.')
parser.add_argument('--n_proposal_samples_per_input', default=20,
help='Number of proposal samples used to estimate '
'normalizing constant during training.')
parser.add_argument('--n_proposal_samples_per_input_validation', default=100,
help='Number of proposal samples used to estimate '
'normalizing constant during validation.')
parser.add_argument('--mixture_component_min_scale', default=1e-3,
help='Minimum scale for proposal mixture components.')
# optimization
parser.add_argument('--learning_rate', default=5e-4,
help='Learning rate for Adam.')
parser.add_argument('--n_total_steps', default=int(4e5),
help='Number of total training steps.')
parser.add_argument('--alpha_warm_up_steps', default=5000,
help='Number of warm-up steps for AEM density.')
parser.add_argument('--hard_alpha_warm_up', default=True,
help='Whether to use a hard warm up for alpha')
# logging and checkpoints
parser.add_argument('--monitor_interval', default=100,
help='Interval in steps at which to report training stats.')
parser.add_argument('--visualize_interval', default=10000,
help='Interval in steps at which to report training stats.')
parser.add_argument('--save_interval', default=10000,
help='Interval in steps at which to save model.')
# reproducibility
parser.add_argument('--seed', default=1638128,
help='Random seed for PyTorch and NumPy.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.use_gpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
# Generate data
train_dataset = data_.load_plane_dataset(args.dataset_name, args.n_data_points)
train_loader = data_.InfiniteLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_epochs=None
)
# Generate test grid data
n_points_per_axis = 512
bounds = np.array([
[-4, 4],
[-4, 4]
])
grid_dataset = data_.TestGridDataset(n_points_per_axis=n_points_per_axis, bounds=bounds)
grid_loader = data.DataLoader(
dataset=grid_dataset,
batch_size=1000,
drop_last=False
)
# various dimensions for autoregressive and energy nets
dim = 2 # D
output_dim_multiplier = args.context_dim + 3 * args.n_mixture_components # K + 3M
# Create MADE
made = models.ResidualMADE(
input_dim=dim,
n_residual_blocks=args.n_residual_blocks_made,
hidden_dim=args.hidden_dim_made,
output_dim_multiplier=output_dim_multiplier,
conditional=False,
activation=utils.parse_activation(args.activation_made),
use_batch_norm=args.use_batch_norm_made,
dropout_probability=args.dropout_probability_made
).to(device)
# create energy net
energy_net = models.ResidualEnergyNet(
input_dim=(args.context_dim + 1),
n_residual_blocks=args.n_residual_blocks_energy_net,
hidden_dim=args.hidden_dim_energy_net,
energy_upper_bound=args.energy_upper_bound,
activation=utils.parse_activation(args.activation_energy_net),
use_batch_norm=args.use_batch_norm_energy_net,
dropout_probability=args.dropout_probability_energy_net
).to(device)
# create AEM
aem = models.AEM(
autoregressive_net=made,
energy_net=energy_net,
context_dim=args.context_dim,
n_proposal_mixture_components=args.n_mixture_components,
proposal_component_family=args.proposal_component,
n_proposal_samples_per_input=args.n_proposal_samples_per_input,
mixture_component_min_scale=args.mixture_component_min_scale,
apply_context_activation=args.apply_context_activation
).to(device)
# make optimizer
parameters = list(made.parameters()) + list(energy_net.parameters())
optimizer = optim.Adam(parameters, lr=args.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_total_steps)
# create summary writer and write to log directory
timestamp = io.get_timestamp()
log_dir = os.path.join(io.get_log_root(), args.dataset_name, timestamp)
writer = SummaryWriter(log_dir=log_dir)
filename = os.path.join(log_dir, 'config.json')
with open(filename, 'w') as file:
json.dump(vars(args), file)
# Training loop
tbar = tqdm(range(args.n_total_steps))
alpha = 0
for step in tbar:
aem.train()
scheduler.step(step)
optimizer.zero_grad()
# training step
batch = next(train_loader).to(device)
log_density, log_proposal_density, _, log_normalizer = aem(batch)
mean_log_density = torch.mean(log_density)
mean_log_proposal_density = torch.mean(log_proposal_density)
mean_log_normalizer = torch.mean(log_normalizer)
if args.alpha_warm_up_steps is not None:
if args.hard_alpha_warm_up:
alpha = float(step > args.alpha_warm_up_steps)
else:
alpha = torch.Tensor([min(step / args.alpha_warm_up_steps, 1)])
loss = - (alpha * mean_log_density + mean_log_proposal_density)
else:
loss = - (mean_log_density + mean_log_proposal_density)
loss.backward()
optimizer.step()
if (step + 1) % args.monitor_interval == 0:
s = 'Loss: {:.4f}, log p: {:.4f}, log q: {:.4f}'.format(
loss.item(),
mean_log_density.item(),
mean_log_proposal_density.item()
)
tbar.set_description(s)
# write summaries
summaries = {
'loss': loss.detach(),
'log-prob-aem': mean_log_density.detach(),
'log-prob-proposal': mean_log_proposal_density.detach(),
'log-normalizer': mean_log_normalizer.detach(),
'learning-rate': torch.Tensor(scheduler.get_lr()),
}
for summary, value in summaries.items():
writer.add_scalar(tag=summary, scalar_value=value, global_step=step)
if (step + 1) % args.visualize_interval == 0:
# Plotting
aem.eval()
aem.set_n_proposal_samples_per_input_validation(
args.n_proposal_samples_per_input_validation)
log_density_np = []
log_proposal_density_np = []
for batch in grid_loader:
batch = batch.to(device)
log_density, log_proposal_density, unnormalized_log_density, log_normalizer = aem(
batch)
log_density_np = np.concatenate((
log_density_np, utils.tensor2numpy(log_density)
))
log_proposal_density_np = np.concatenate((
log_proposal_density_np, utils.tensor2numpy(log_proposal_density)
))
fig, axs = plt.subplots(1, 3, figsize=(7.5, 2.5))
axs[0].hist2d(train_dataset.data[:, 0], train_dataset.data[:, 1],
range=bounds, bins=512, cmap=cm.viridis, rasterized=False)
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[1].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_proposal_density_np).reshape(grid_dataset.X.shape))
axs[1].set_xlim(bounds[0])
axs[1].set_ylim(bounds[1])
axs[1].set_xticks([])
axs[1].set_yticks([])
axs[2].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_density_np).reshape(grid_dataset.X.shape))
axs[2].set_xlim(bounds[0])
axs[2].set_ylim(bounds[1])
axs[2].set_xticks([])
axs[2].set_yticks([])
plt.tight_layout()
path = os.path.join(io.get_output_root(), 'pytorch', '{}.png'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_output_root())
plt.savefig(path, dpi=300)
writer.add_figure(tag='test-grid', figure=fig, global_step=step)
plt.close()
if (step + 1) % args.save_interval == 0:
path = os.path.join(io.get_checkpoint_root(), 'pytorch', '{}.t'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_checkpoint_root())
torch.save(aem.state_dict(), path)
path = os.path.join(io.get_checkpoint_root(),
'pytorch', '{}-{}.t'.format(args.dataset_name, timestamp))
torch.save(aem.state_dict(), path)
| 39.147766 | 98 | 0.675298 |
d3d9fd962e6f2a91b7a5a73a99c81d54531258d8 | 2,266 | py | Python | music/music.py | spacerunaway/world_recoder | fcafe0d910511cfd043735cf451564febb558e40 | [
"MIT"
] | null | null | null | music/music.py | spacerunaway/world_recoder | fcafe0d910511cfd043735cf451564febb558e40 | [
"MIT"
] | null | null | null | music/music.py | spacerunaway/world_recoder | fcafe0d910511cfd043735cf451564febb558e40 | [
"MIT"
] | null | null | null | import sys
sys.path.append('../utils')
from utils import *
from doubly_linkedlist import *
def link_chords(chordprogression):
"""
Chord progression is a sequences of chords.
A valid linked_chords can be one of the following:
1: the chord name(str) in CHORD dict
2: the key(type Key)
and a music have to a signal of start and end.
>>> c_p1 = [START,C_Major,'C','Am','F','G','C','Am','F','G7',END]
>>> c_p2 = [START,C_Major,'C','Am','F','G','C','Am','F','G',G_Major,'Em','C','D','D7','G',END]
>>> l1 = link_chords(c_p1)
>>> l1
start - C - Am - F - G - C - Am - F - G7 - end
>>> l2 = link_chords(c_p2)
>>> l2
start - C - Am - F - G - C - Am - F - G - Em - C - D - D7 - G - end
>>> l2[8].key is C_Major
True
>>> l2[8].chord == CHORD['G']
True
>>> l2[9].key is G_Major
True
>>> l2[9].chord == CHORD['Em']
True
>>> c_p3 = [C_Major,C_Major,START,'C',END,START,START,END,'F',G_Major]
>>> l3 = link_chords(c_p3)
>>> l3
start - C - end - start - start - end - F
"""
key = None
res = LinkedList()
for item in chordprogression:
if type(item) is Major_Scale or type(item) is minor_Scale:
key = item
else:
if item not in CHORD:
chord = item
else:
chord = CHORD[item]
node = LinkedChord(chord,key,item)
res.append(node)
return res
| 29.815789 | 98 | 0.573698 |
d3da2efce64cb5f88a134e97d2a4092ee8ea80bb | 5,777 | py | Python | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.documentdb
from msrestazure.azure_exceptions import CloudError
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
import logging
#logging.basicConfig(level=logging.DEBUG)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.512987 | 101 | 0.570885 |
d3da88558c778364e49a959d5f0d8f942db1cc43 | 3,758 | py | Python | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | null | null | null | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | null | null | null | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | null | null | null | import argparse
PROJROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplusbaseline',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/speedplusbaseline'}
DATAROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplus/data/datasets',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/dataset'}
parser = argparse.ArgumentParser('Configurations for SPEED+ Baseline Study')
# ------------------------------------------------------------------------------------------
# Basic directories and names
parser.add_argument('--seed', type=int, default=2021)
parser.add_argument('--projroot', type=str, default=PROJROOTDIR['linux'])
parser.add_argument('--dataroot', type=str, default=DATAROOTDIR['linux'])
parser.add_argument('--dataname', type=str, default='speedplus')
parser.add_argument('--savedir', type=str, default='checkpoints/synthetic/krn')
parser.add_argument('--resultfn', type=str, default='')
parser.add_argument('--logdir', type=str, default='log/synthetic/krn')
parser.add_argument('--pretrained', type=str, default='')
# ------------------------------------------------------------------------------------------
# Model config.
parser.add_argument('--model_name', type=str, default='krn')
parser.add_argument('--input_shape', nargs='+', type=int, default=(224, 224))
parser.add_argument('--num_keypoints', type=int, default=11) # KRN-specific
parser.add_argument('--num_classes', type=int, default=5000) # SPN-specific
parser.add_argument('--num_neighbors', type=int, default=5) # SPN-specific
parser.add_argument('--keypts_3d_model', type=str, default='src/utils/tangoPoints.mat')
parser.add_argument('--attitude_class', type=str, default='src/utils/attitudeClasses.mat')
# ------------------------------------------------------------------------------------------
# Training config.
parser.add_argument('--start_over', dest='auto_resume', action='store_false', default=True)
parser.add_argument('--randomize_texture', dest='randomize_texture', action='store_true', default=False)
parser.add_argument('--perform_dann', dest='dann', action='store_true', default=False)
parser.add_argument('--texture_alpha', type=float, default=0.5)
parser.add_argument('--texture_ratio', type=float, default=0.5)
parser.add_argument('--use_fp16', dest='fp16', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=75)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--test_epoch', type=int, default=-1)
parser.add_argument('--optimizer', type=str, default='rmsprop')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--lr_decay_alpha', type=float, default=0.96)
parser.add_argument('--lr_decay_step', type=int, default=1)
# ------------------------------------------------------------------------------------------
# Dataset-related inputs
parser.add_argument('--train_domain', type=str, default='synthetic')
parser.add_argument('--test_domain', type=str, default='lightbox')
parser.add_argument('--train_csv', type=str, default='train.csv')
parser.add_argument('--test_csv', type=str, default='lightbox.csv')
# ------------------------------------------------------------------------------------------
# Other miscellaneous settings
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--no_cuda', dest='use_cuda', action='store_false', default=True)
# End
cfg = parser.parse_args() | 58.71875 | 104 | 0.631453 |
d3dacb32ea41d2fb0546ec04640a3b17315faa08 | 118,963 | py | Python | h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.insight_project_journal_create import InsightProjectJournalCreate
from h1.model.insight_project_journal_credential_patch import InsightProjectJournalCredentialPatch
from h1.model.insight_project_journal_transfer import InsightProjectJournalTransfer
from h1.model.insight_project_journal_update import InsightProjectJournalUpdate
from h1.model.journal import Journal
from h1.model.journal_credential import JournalCredential
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
| 37.362751 | 179 | 0.451342 |
d3db48db20a20bc47e28f0062af79ebd64f3fa41 | 811 | py | Python | forms/views.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
] | null | null | null | forms/views.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
] | null | null | null | forms/views.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.utils.module_loading import import_string
def pdf(request):
"""
Get form's number (decimal type: 101.15 - where "101" is form's group and "15"-number itsels).
Can't use 1,2,3,4,5,6,7,8,9 for number itsels - which stands after the point.
Bacause in database field store in decimal format xxx.yy - two number after dot, and active status.
Must use: 01,02,03-09,10,11,12-19,20,21,22-29,30,31.....
:param request:
:return:
"""
response = HttpResponse(content_type='application/pdf')
t = request.GET.get("type")
response['Content-Disposition'] = 'inline; filename="form-' + t + '.pdf"'
f = import_string('forms.forms' + t[0:3] + '.form_' + t[4:6])
response.write(f(request_data=request.GET))
return response
| 38.619048 | 103 | 0.670777 |
d3dcc92c42ee28b5565e1b1bdf3f0bd8727161d9 | 5,087 | py | Python | main.py | code-aifarmer/Python-EXE-maker | 4b7436353c9a0d46b52543304209b057dcac51c1 | [
"MIT"
] | 2 | 2021-01-26T10:19:15.000Z | 2021-06-27T03:38:00.000Z | main.py | code-aifarmer/Python-EXE-maker | 4b7436353c9a0d46b52543304209b057dcac51c1 | [
"MIT"
] | null | null | null | main.py | code-aifarmer/Python-EXE-maker | 4b7436353c9a0d46b52543304209b057dcac51c1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import PySimpleGUI as sg
import cv2
import subprocess
import shutil
import os
import sys
# Demonstrates a number of PySimpleGUI features including:
# Default element size
# auto_size_buttons
# Button
# Dictionary return values
# update of elements in form (Text, Input)
def runCommand(cmd, timeout=None, window=None):
""" run shell command
@param cmd: command to execute
@param timeout: timeout for command execution
@return: (return code from command, command output)
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ''
for line in p.stdout:
line = line.decode(errors='replace' if (sys.version_info) < (3, 5)
else 'backslashreplace').rstrip()
output += line
print(line)
if window:
window.Refresh()
retval = p.wait(timeout)
return (retval, output)
layout = [[sg.Text('Enter Your Passcode')],
[sg.Input('', size=(10, 1), key='input')],
[sg.Button('1'), sg.Button('2'), sg.Button('3')],
[sg.Button('4'), sg.Button('5'), sg.Button('6')],
[sg.Button('7'), sg.Button('8'), sg.Button('9')],
[sg.Button('Submit'), sg.Button('0'), sg.Button('Clear')],
[sg.Text('', size=(15, 1), font=('Helvetica', 18),
text_color='red', key='out')],
]
window = sg.Window('Keypad', layout,
default_button_element_size=(5, 2),
auto_size_buttons=False,
grab_anywhere=False)
# Loop forever reading the form's values, updating the Input field
keys_entered = ''
while True:
event, values = window.read() # read the form
if event == sg.WIN_CLOSED: # if the X button clicked, just exit
break
if event == 'Clear': # clear keys if clear button
keys_entered = ''
elif event in '1234567890':
keys_entered = values['input'] # get what's been entered so far
keys_entered += event # add the new digit
elif event == 'Submit':
keys_entered = values['input']
if values['input']=='123456':
sg.popup('')
w()
else:
sg.popup('')
window['out'].update(keys_entered) # output the final string
# change the form to reflect current key string
window['input'].update(keys_entered)
window.close()
| 38.832061 | 120 | 0.569294 |
d3dd8c075e4b425fd099e7113200bcfa2c88d3c5 | 152 | py | Python | seisflows/system/lsf_sm.py | jpvantassel/seisflows | 5155ec177b5df0218e1fb5204f1fcd6969c66f20 | [
"BSD-2-Clause"
] | 97 | 2016-11-18T21:19:28.000Z | 2022-03-31T15:02:42.000Z | seisflows/system/lsf_sm.py | SuwenJunliu/seisflows | 14d246691acf8e8549487a5db7c7cd877d23a2ae | [
"BSD-2-Clause"
] | 30 | 2017-02-21T14:54:14.000Z | 2021-08-30T01:44:39.000Z | seisflows/system/lsf_sm.py | SuwenJunliu/seisflows | 14d246691acf8e8549487a5db7c7cd877d23a2ae | [
"BSD-2-Clause"
] | 78 | 2017-03-01T15:32:29.000Z | 2022-01-31T09:09:17.000Z | #
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
raise NotImplementedError
| 16.888889 | 79 | 0.348684 |
d3ddd574dde8899b673c876fe79246ef6fe9f23e | 938 | py | Python | data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 8ae482a2e69aa5affe94bcd7982e53ad69228d43 | [
"Apache-2.0"
] | null | null | null | data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 8ae482a2e69aa5affe94bcd7982e53ad69228d43 | [
"Apache-2.0"
] | null | null | null | data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 8ae482a2e69aa5affe94bcd7982e53ad69228d43 | [
"Apache-2.0"
] | null | null | null | from data.objects.data import Data
| 42.636364 | 80 | 0.711087 |
d3de757442c04a58c632f23911d3bb3230eadbab | 572 | py | Python | parkrundata/views.py | remarkablerocket/parkrundata | c717b59771629d6308ec093e29fd373981726fde | [
"BSD-3-Clause"
] | null | null | null | parkrundata/views.py | remarkablerocket/parkrundata | c717b59771629d6308ec093e29fd373981726fde | [
"BSD-3-Clause"
] | null | null | null | parkrundata/views.py | remarkablerocket/parkrundata | c717b59771629d6308ec093e29fd373981726fde | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import Country, Event
from .serializers import CountrySerializer, EventSerializer
| 28.6 | 64 | 0.798951 |
d3df3020e02d0033dd7ab9554f7528acd2742527 | 21,764 | py | Python | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | null | null | null | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | null | null | null | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | null | null | null | # -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the Software). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (Toronto), and Hugo Larochelle at the
# Universit de Sherbrooke (Sherbrooke), which assigned its rights
# in the Software to Socpra Sciences et Gnie
# S.E.C. (Socpra). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (Harvard).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (Agreement). Absent your agreement
# to the terms below, you (the End User) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# Users internal, non-commercial research and academic use at End
# Users academic or not-for-profit research institution
# (Institution) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End Users Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End Users use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End Users
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End Users patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USERS OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End Users breach of this
# Agreement or its Institutions use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms Harvard or
# University of Toronto or Universit de Sherbrooke or Socpra
# Sciences et Gnie S.E.C. (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import copy
import sys, logging
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.optimize as spo
import scipy.io as sio
import scipy.stats as sps
try:
import scipy.weave as weave
except ImportError:
import weave
from .gp import GP
from ..utils.param import Param as Hyperparameter
from ..kernels import Matern52, Noise, Scale, SumKernel, TransformKernel
from ..sampling.slice_sampler import SliceSampler
from ..sampling.whitened_prior_slice_sampler import WhitenedPriorSliceSampler
from ..sampling.elliptical_slice_sampler import EllipticalSliceSampler
from ..utils import priors
from ..transformations import BetaWarp, Transformer
try:
module = sys.modules['__main__'].__file__
log = logging.getLogger(module)
except:
log = logging.getLogger()
print 'Not running from main.'
| 43.354582 | 142 | 0.684111 |
d3e0c86fec4a82ec7ab6e46d19afaf6635bc9e88 | 1,125 | py | Python | pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
] | null | null | null | pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
] | null | null | null | pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
] | null | null | null | from machine import Pin, ADC
from network import LoRa
import socket
from utime import sleep
# Use a pin for a 'config' mode
configPin = Pin('P21', Pin.IN, Pin.PULL_UP)
# Create an ADC object
adc = ADC()
# vbatt pin:
vbatt = adc.channel(attn=1, pin='P16')
# Initialise LoRa in LoRa mode
# For Europe, use LoRa.EU868
lora = LoRa(mode=LoRa.LORA, region=LoRa.EU868)
# Create a raw LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# Check the Config pin:
configMode = not configPin()
if not configMode:
print('Reading Battery')
pycom.rgbled(0x0000FF)
message = 'Battery Status: {}'.format(battConversion())
print(message)
sleep(2)
print('Sending battery status estimate...')
pycom.rgbled(0xFF0000)
sleep(2)
s.setblocking(True)
# Send some data
s.send(message)
print('Message Sent!')
pycom.rgbled(0x00FF00)
sleep(2)
print('Going to sleep')
machine.deepsleep(300000)
# Otherwise, we are in 'config' so exit to REPL
print('Config Mode')
| 20.454545 | 59 | 0.686222 |