hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7955298b73d5f2579d3f326020afc0190964952 | 6,150 | py | Python | python/plugins/db_manager/db_plugins/oracle/data_model.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | python/plugins/db_manager/db_plugins/oracle/data_model.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | python/plugins/db_manager/db_plugins/oracle/data_model.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Mdric RIBREUX
email : mederic.ribreux@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <brush.tyler@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import QTime
from qgis.core import QgsMessageLog
from ..data_model import (TableDataModel,
SqlResultModel,
SqlResultModelAsync,
SqlResultModelTask,
BaseTableModel)
from ..plugin import DbError
from ..plugin import BaseError
| 33.064516 | 77 | 0.532195 |
a795b31c14f03e20f3936fc4c94e68f3ce593493 | 1,711 | py | Python | Opencv1/prayog4.py | priyanshgupta1998/Image_Processing | d1848838ff1acd6dfcf551b99380a8bbf9c879fa | [
"MIT"
] | null | null | null | Opencv1/prayog4.py | priyanshgupta1998/Image_Processing | d1848838ff1acd6dfcf551b99380a8bbf9c879fa | [
"MIT"
] | null | null | null | Opencv1/prayog4.py | priyanshgupta1998/Image_Processing | d1848838ff1acd6dfcf551b99380a8bbf9c879fa | [
"MIT"
] | null | null | null | # with the TRACKBAR gui component
# we can perform some action my moving cursor
import cv2
import numpy as np
if __name__ == "__main__":
main()
| 39.790698 | 157 | 0.64173 |
a795b8548eef5888745b44763ff2a163b2d8f0d7 | 9,814 | py | Python | qiushaoyi/study_practices/qiushaoyi_study_path/L2_Module.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 2 | 2018-03-29T08:26:17.000Z | 2019-06-17T10:56:19.000Z | qiushaoyi/study_practices/qiushaoyi_study_path/L2_Module.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 1 | 2022-03-22T20:26:08.000Z | 2022-03-22T20:26:08.000Z | qiushaoyi/study_practices/qiushaoyi_study_path/L2_Module.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 1 | 2019-02-18T10:44:20.000Z | 2019-02-18T10:44:20.000Z | import sys
from PIL import Image
from enum import Enum, unique
# : cd ls , ,: ./
im = Image.open('test.png')
print(im.format, im.size, im.mode)
#
# (Object Oriented ProgrammingOOP)()
std1 = {'name': 'Machileg', 'score': 89}
std2 = {'name': 'mahfljg', 'score': 67}
print_score(std1)
std666 = Student(std2['name'], std2['score'])
std666.print_score()
print(std666.get_grade())
# class: __private,,getset
# 1 self.name 2 self.__name 3 self.__name__
human1 = Human('Borjglj', '', 160)
human1.print_hunman()
print(human1.set_name(''), \
human1.get_name(), human1.get_sex(), human1.get_height())
# ,python :
'''
bridge,
funcfunc
'''
dog = Dog()
cat = Cat()
dog.run()
dog.eat()
#
dog.invokeAnimalSubclass(Cat())
cat.run()
cat.eat()
cat.invokeAnimalSubclass(Dog())
# type(),isinstance(),issubclass()
if issubclass(Dog, Animal):
print('Dog Animal')
if isinstance(dog, Dog):
print('dog Dog')
if isinstance(dog, Animal):
print('dog Animal')
if type(dog) == Dog:
print('typedog Dog')
if type(dog) == Animal:
print('typedog Animal')
if type(cat) == Cat:
print('typecat Cat,', dir(cat))
__repr__ = __str__
# list
# list
myDog = MyDog(150)
myDog.typeName = 'type'
myDog.score = '100'
myDog.set_name('')
print(myDog, hasattr(myDog, 'name'), setattr(myDog, 'name', 'name'), getattr(myDog, 'name'))
for n in myDog:
print(n)
print(myDog[4])
print(myDog[0:5])
print(myDog.qsy()) # myDog.qsyNo()
myDog()
print('', callable(myDog), callable(dog))
# SDKURLAPI,__getattr__
chain = Chain()
print(chain.status.user.timeline.list)
#
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
day1 = Weekday.Mon
day2 = Weekday.Sun
print(day1.value, day2)
#
# 1class
# Helloclasstype. hclass Hello
h = Hello()
h.hello()
# 2typeclass
Hello1 = type('Hello', (object,), dict(hello=fn, sex=vars))
h3 = Hello1()
h3.hello()
# metaclassmetaclass
L = MyList()
L.add(12)
L.add(150)
print(L)
# metaclassORM:\
# ORMObject Relational Mapping-
# testing code: User
u1 = User(id=123456, name='Michael', email='1129331905@qq.com', password='my-pwdTest')
u2 = User(id=78910, name='Jack', email='1279487948@qq.com', password='you-pwdTest')
u1.add()
u2.add()
| 23.99511 | 135 | 0.598431 |
a795e8692fbeff0ef48524f2158106ba51c6886a | 1,404 | py | Python | 2019/day-03/3.py | Valokoodari/advent-of-code | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | 2 | 2021-12-27T18:59:11.000Z | 2022-01-10T02:31:36.000Z | 2019/day-03/3.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | null | null | null | 2019/day-03/3.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | 2 | 2021-12-23T17:29:10.000Z | 2021-12-24T03:21:49.000Z | inputFile = "3-input"
outputFile = "3-output"
dir = {'L': [-1,0],'R': [1,0],'U': [0,1],'D': [0,-1]}
if __name__ == '__main__': main() | 24.206897 | 63 | 0.491453 |
a795f6ac4554f8ba46a00dda59b825a3e846ebb5 | 942 | py | Python | notebook/3_tst_3dep.py | ACWI-SSWD/nldi_el_serv | 35276014728662b1a9bdbbbf4790c562c1df760c | [
"CC0-1.0"
] | null | null | null | notebook/3_tst_3dep.py | ACWI-SSWD/nldi_el_serv | 35276014728662b1a9bdbbbf4790c562c1df760c | [
"CC0-1.0"
] | 1 | 2021-03-04T18:50:19.000Z | 2021-03-04T18:50:19.000Z | notebook/3_tst_3dep.py | ACWI-SSWD/nldi_el_serv | 35276014728662b1a9bdbbbf4790c562c1df760c | [
"CC0-1.0"
] | null | null | null | from nldi_el_serv.XSGen import XSGen
from nldi_el_serv.dem_query import query_dems_shape
import py3dep
from pynhd import NLDI
gagebasin = NLDI().get_basins("06721000").to_crs('epsg:3857')
gageloc = NLDI().getfeature_byid("nwissite", "USGS-06721000").to_crs('epsg:3857')
cid = gageloc.comid.values.astype(str)
print(cid, gageloc.comid.values.astype(int)[0])
# strmseg_basin = NLDI().getfeature_byid("comid", cid[0], basin=True).to_crs('epsg:3857')
strmseg_loc = NLDI().getfeature_byid("comid", cid[0]).to_crs('epsg:3857')
xs = XSGen(point=gageloc, cl_geom=strmseg_loc, ny=101, width=1000)
xs_line = xs.get_xs()
xs_line_geom = xs_line.to_crs('epsg:4326')
print(xs_line_geom)
bbox = xs_line_geom.geometry[0].envelope.bounds
print(bbox)
query = query_dems_shape(bbox)
print(query)
t1 = (xs_line.total_bounds) + ((-100., -100., 100., 100.))
dem = py3dep.get_map("DEM", tuple(t1), resolution=10, geo_crs="EPSG:3857", crs="epsg:3857")
tmp = 0
| 33.642857 | 91 | 0.740977 |
a7970ccf14ddf7d03195daa316d4ddd0d08e404f | 3,437 | py | Python | Python/Product/Pyvot/Pyvot/setup.py | mikiec84/PTVS | 6cbeadd70a4438d6e6ea4d22a465d678eacf5eb2 | [
"Apache-2.0"
] | 3 | 2015-04-09T03:57:26.000Z | 2016-07-25T10:00:34.000Z | Python/Product/Pyvot/Pyvot/setup.py | anstkosh/PTVS | 1854ab3d6397e66765697bb63d898aec16d0406e | [
"Apache-2.0"
] | 2 | 2019-06-05T18:13:41.000Z | 2019-06-05T20:13:16.000Z | Python/Product/Pyvot/Pyvot/setup.py | RaymonGulati1/PTVS | ee1d09f2a94be4e21016f7579205bb65ec82c616 | [
"Apache-2.0"
] | 2 | 2018-03-02T19:55:14.000Z | 2019-02-14T22:37:28.000Z | # PyVot
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
# This file must work in both Python 2 and 3 as-is (without applying 2to3)
#
import sys
# If setuptools available, we normally want to install dependencies. The --no-downloads flag
# allows the PTVS installer to prevent this, to avoid network-related failure cases
allow_downloads = True
no_downloads_flag = '--no-downloads'
if no_downloads_flag in sys.argv:
sys.argv.remove(no_downloads_flag)
allow_downloads = False
try:
from setuptools import setup, Distribution
use_setuptools = True
except ImportError:
from distutils.core import setup, Distribution
use_setuptools = False
running_python3 = sys.version_info.major > 2
# Sets __version__ as a global without importing xl's __init__. We might not have pywin32 yet.
with open(r'.\xl\version.py') as version_file:
exec(version_file.read(), globals())
long_description = \
"""Pyvot connects familiar data-exploration and visualization tools in Excel with the powerful data analysis
and transformation capabilities of Python, with an emphasis on tabular data. It provides a minimal and Pythonic
interface to Excel, smoothing over the pain points in using the existing Excel object model as exposed via COM."""
setup_options = dict(
name="Pyvot",
version=__version__,
author="Microsoft Corporation",
author_email="ptvshelp@microsoft.com",
license="Apache License 2.0",
description="Pythonic interface for data exploration in Excel",
long_description=long_description,
download_url="http://pypi.python.org/pypi/Pyvot",
url="http://pytools.codeplex.com/wikipage?title=Pyvot",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Win32 (MS Windows)',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Office/Business :: Financial :: Spreadsheet',
'License :: OSI Approved :: Apache Software License'],
packages=['xl', 'xl._impl'],
distclass=PyvotDistribution
)
if running_python3:
use_2to3 = True
from distutils.command.build_py import build_py_2to3
setup_options.update(dict(
cmdclass={'build_py': build_py_2to3}
))
if use_setuptools:
setup_options.update(dict(
zip_safe=True
))
if use_setuptools and allow_downloads:
setup_options.update(dict(
setup_requires=["Sphinx"],
))
setup(**setup_options)
| 35.802083 | 118 | 0.719814 |
a797c1cadcd8bbf1b052c2b8f77c7e0d396cdcfb | 271 | py | Python | src/ralph/api/__init__.py | DoNnMyTh/ralph | 97b91639fa68965ad3fd9d0d2652a6545a2a5b72 | [
"Apache-2.0"
] | 1,668 | 2015-01-01T12:51:20.000Z | 2022-03-29T09:05:35.000Z | src/ralph/api/__init__.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 2,314 | 2015-01-02T13:26:26.000Z | 2022-03-29T04:06:03.000Z | src/ralph/api/__init__.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 534 | 2015-01-05T12:40:28.000Z | 2022-03-29T21:10:12.000Z | from ralph.api.serializers import RalphAPISerializer
from ralph.api.viewsets import RalphAPIViewSet, RalphReadOnlyAPIViewSet
from ralph.api.routers import router
__all__ = [
'RalphAPISerializer',
'RalphAPIViewSet',
'RalphReadOnlyAPIViewSet',
'router',
]
| 24.636364 | 71 | 0.778598 |
a799e70dc24ceb42c8e876b81ace1c8f5d0f6ceb | 727 | py | Python | demo_odoo_tutorial_wizard/models/models.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 57 | 2020-06-22T05:28:11.000Z | 2022-03-25T08:15:08.000Z | demo_odoo_tutorial_wizard/models/models.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 2 | 2020-11-20T07:11:27.000Z | 2022-03-30T00:20:29.000Z | demo_odoo_tutorial_wizard/models/models.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 29 | 2020-07-04T15:24:01.000Z | 2022-03-28T01:29:03.000Z | from odoo import models, fields, api
from odoo.exceptions import ValidationError | 34.619048 | 72 | 0.696011 |
a79daf8941b0f06f1e88f279de06585e5430d9d8 | 659 | py | Python | eaa_donations/donations/models/partner_charity.py | andrewbird2/eaa_donations | 40a2cb2431130b330130f101c89bd3f8c503d2e2 | [
"MIT"
] | null | null | null | eaa_donations/donations/models/partner_charity.py | andrewbird2/eaa_donations | 40a2cb2431130b330130f101c89bd3f8c503d2e2 | [
"MIT"
] | 13 | 2020-06-05T19:27:58.000Z | 2022-02-26T13:40:54.000Z | eaa_donations/donations/models/partner_charity.py | andrewbird2/eaa_donations | 40a2cb2431130b330130f101c89bd3f8c503d2e2 | [
"MIT"
] | null | null | null | from django.db import models
| 36.611111 | 101 | 0.732929 |
a7a100722ec7bb48d5749ae19f7101fc66740935 | 2,931 | py | Python | python_data_utils/spark/ml/lightgbm.py | surajiyer/python-data-utils | d6e9bf81204a01545a3edb165c5724eb24f37c18 | [
"MIT"
] | 4 | 2019-01-06T00:09:21.000Z | 2022-01-28T06:03:13.000Z | python_data_utils/spark/ml/lightgbm.py | surajiyer/python-data-utils | d6e9bf81204a01545a3edb165c5724eb24f37c18 | [
"MIT"
] | null | null | null | python_data_utils/spark/ml/lightgbm.py | surajiyer/python-data-utils | d6e9bf81204a01545a3edb165c5724eb24f37c18 | [
"MIT"
] | null | null | null | __all__ = ['LightGBMRegressorModel']
from mmlspark.lightgbm.LightGBMRegressor import LightGBMRegressor, LightGBMRegressionModel
from mmlspark.train import ComputeModelStatistics
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql import DataFrame
import pyspark.sql.functions as F
from python_data_utils.spark.ml.base import BinaryClassCVModel, Metrics, RegressionCVModel
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
| 37.101266 | 90 | 0.643466 |
a7a176d1745c365cc7c57002a3194eb70a8c838f | 80 | py | Python | UDTherapy/__init__.py | JonSn0w/Urban-Dictionary-Therapy | 8257cd3883bcef31207c2b089197ee9b0788727f | [
"MIT"
] | 3 | 2017-05-08T11:59:51.000Z | 2017-06-20T22:36:07.000Z | UDTherapy/__init__.py | JonSn0w/Urban-Dictionary-Therapy | 8257cd3883bcef31207c2b089197ee9b0788727f | [
"MIT"
] | null | null | null | UDTherapy/__init__.py | JonSn0w/Urban-Dictionary-Therapy | 8257cd3883bcef31207c2b089197ee9b0788727f | [
"MIT"
] | null | null | null | name = 'Urban Dictionary Therapy'
__all__ = ['UDTherapy',
'helper']
| 16 | 33 | 0.6 |
a7a1b1b2f4da0731134b12b8f90d117d800a2c6f | 2,453 | py | Python | torch-test/mpich-3.4.3/modules/libfabric/contrib/intel/jenkins/runtests.py | alchemy315/NoPFS | f3901e963e2301e8a6f1c7aac0511d0cf9a1889d | [
"BSD-3-Clause"
] | null | null | null | torch-test/mpich-3.4.3/modules/libfabric/contrib/intel/jenkins/runtests.py | alchemy315/NoPFS | f3901e963e2301e8a6f1c7aac0511d0cf9a1889d | [
"BSD-3-Clause"
] | null | null | null | torch-test/mpich-3.4.3/modules/libfabric/contrib/intel/jenkins/runtests.py | alchemy315/NoPFS | f3901e963e2301e8a6f1c7aac0511d0cf9a1889d | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
import sys
sys.path.append(os.environ['CI_SITE_CONFIG'])
import ci_site_config
import run
import common
parser = argparse.ArgumentParser()
parser.add_argument("--prov", help="core provider", choices=["psm2", "verbs", \
"tcp", "udp", "sockets", "shm"])
parser.add_argument("--util", help="utility provider", choices=["rxd", "rxm"])
parser.add_argument("--ofi_build_mode", help="specify the build configuration", \
choices = ["dbg", "dl"])
args = parser.parse_args()
args_core = args.prov
args_util = args.util
if (args.ofi_build_mode):
ofi_build_mode = args.ofi_build_mode
else:
ofi_build_mode='reg'
node = (os.environ['NODE_NAME']).split('-')[0]
hosts = [node]
# Note: Temporarily disabling all mpich testing
# due to mpich options issues which is causing
# multiple tests to fail.
#mpilist = ['impi', 'mpich', 'ompi']
mpilist = ['impi', 'ompi']
#this script is executed from /tmp
#this is done since some mpi tests
#look for a valid location before running
# the test on the secondary host(client)
# but jenkins only creates a valid path on
# the primary host (server/test node)
os.chdir('/tmp/')
if(args_core):
for host in ci_site_config.node_map[node]:
hosts.append(host)
if (args_util == None):
run.fi_info_test(args_core, hosts, ofi_build_mode)
run.fabtests(args_core, hosts, ofi_build_mode)
run.shmemtest(args_core, hosts, ofi_build_mode)
for mpi in mpilist:
run.intel_mpi_benchmark(args_core, hosts, mpi, ofi_build_mode)
run.mpistress_benchmark(args_core, hosts, mpi, ofi_build_mode)
run.osu_benchmark(args_core, hosts, mpi, ofi_build_mode)
else:
run.fi_info_test(args_core, hosts, ofi_build_mode, util=args_util)
run.fabtests(args_core, hosts, ofi_build_mode, util=args_util)
run.shmemtest(args_core, hosts, ofi_build_mode, util=args_util)
for mpi in mpilist:
run.intel_mpi_benchmark(args_core, hosts, mpi, ofi_build_mode, \
util=args_util,)
run.mpistress_benchmark(args_core, hosts, mpi, ofi_build_mode, \
util=args_util)
run.osu_benchmark(args_core, hosts, mpi, ofi_build_mode, \
util=args_util)
else:
print("Error : Specify a core provider to run tests")
| 35.042857 | 81 | 0.653078 |
a7a23de4cab3d9668dde589a3d12beefa58de55c | 664 | py | Python | interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0075_sort_colors.py | mrinalini-m/data_structures_and_algorithms | f9bebcca8002064e26ba5b46e47b8abedac39c3e | [
"MIT"
] | 2 | 2020-12-18T21:42:05.000Z | 2020-12-21T06:07:33.000Z | interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0075_sort_colors.py | mrinalini-m/data_structures_and_algorithms | f9bebcca8002064e26ba5b46e47b8abedac39c3e | [
"MIT"
] | null | null | null | interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0075_sort_colors.py | mrinalini-m/data_structures_and_algorithms | f9bebcca8002064e26ba5b46e47b8abedac39c3e | [
"MIT"
] | 2 | 2020-07-04T20:30:19.000Z | 2021-08-31T08:32:36.000Z | from typing import List
list = [1, 2, 0, 1, 0, 2, 0, 0]
Solution().sortColors(list)
print(list)
| 18.971429 | 61 | 0.414157 |
a7a370eb2ff4da9ac60e7150eb94a68c2fab78e2 | 14,452 | py | Python | bnpy/data/GroupXData.py | raphael-group/bnpy | b11dc6f5689b06fc967bab6dffe7e01551d84667 | [
"BSD-3-Clause"
] | 184 | 2016-12-13T21:05:48.000Z | 2022-02-28T11:47:23.000Z | bnpy/data/GroupXData.py | raphael-group/bnpy | b11dc6f5689b06fc967bab6dffe7e01551d84667 | [
"BSD-3-Clause"
] | 37 | 2016-12-18T14:07:53.000Z | 2022-03-13T10:58:14.000Z | bnpy/data/GroupXData.py | raphael-group/bnpy | b11dc6f5689b06fc967bab6dffe7e01551d84667 | [
"BSD-3-Clause"
] | 50 | 2017-01-25T19:44:34.000Z | 2022-03-15T10:22:01.000Z | '''
Classes
-----
GroupXData
Data object for holding a dense matrix X of real 64-bit floats,
organized contiguously based on provided group structure.
'''
import numpy as np
from collections import namedtuple
from bnpy.data.XData import XData
from bnpy.util import as1D, as2D, as3D, toCArray
from bnpy.util import numpyToSharedMemArray, sharedMemToNumpyArray
def get_text_summary(self):
''' Returns human-readable description of this dataset
'''
if hasattr(self, 'summary'):
s = self.summary
else:
s = 'GroupXData'
return s
def get_stats_summary(self):
''' Returns human-readable summary of this dataset's basic properties
'''
s = ' size: %d units (documents)\n' % (self.get_size())
s += ' dimension: %d' % (self.get_dim())
return s
def toXData(self):
''' Return simplified XData instance, losing group structure
'''
if hasattr(self, 'TrueParams'):
TParams = self.TrueParams
else:
TParams=None
if hasattr(self, 'Xprev'):
return XData(self.X, Xprev=self.Xprev, TrueParams=TParams)
else:
return XData(self.X, TrueParams=TParams)
# Create Subset
#########################################################
def make_subset(self,
docMask=None,
atomMask=None,
doTrackTruth=False,
doTrackFullSize=True):
""" Get subset of this dataset identified by provided unit IDs.
Parameters
-------
docMask : 1D array_like of ints
Identifies units (documents) to use to build subset.
doTrackFullSize : boolean, optional
default=True
If True, return DataObj with same nDocTotal value as this
dataset. If False, returned DataObj has smaller size.
atomMask : 1D array_like of ints, optional
default=None
If present, identifies rows of X to return as XData
Returns
-------
Dchunk : bnpy.data.GroupXData instance
"""
if atomMask is not None:
return self.toXData().select_subset_by_mask(atomMask)
if len(docMask) < 1:
raise ValueError('Cannot select empty subset')
newXList = list()
newXPrevList = list()
newDocRange = np.zeros(len(docMask) + 1)
newPos = 1
for d in range(len(docMask)):
start = self.doc_range[docMask[d]]
stop = self.doc_range[docMask[d] + 1]
newXList.append(self.X[start:stop])
if hasattr(self, 'Xprev'):
newXPrevList.append(self.Xprev[start:stop])
newDocRange[newPos] = newDocRange[newPos - 1] + stop - start
newPos += 1
newX = np.vstack(newXList)
if hasattr(self, 'Xprev'):
newXprev = np.vstack(newXPrevList)
else:
newXprev = None
if doTrackFullSize:
nDocTotal = self.nDocTotal
else:
nDocTotal = None
if hasattr(self, 'alwaysTrackTruth'):
doTrackTruth = doTrackTruth or self.alwaysTrackTruth
hasTrueZ = hasattr(self, 'TrueParams') and 'Z' in self.TrueParams
if doTrackTruth and hasTrueZ:
TrueZ = self.TrueParams['Z']
newTrueZList = list()
for d in range(len(docMask)):
start = self.doc_range[docMask[d]]
stop = self.doc_range[docMask[d] + 1]
newTrueZList.append(TrueZ[start:stop])
newTrueZ = np.hstack(newTrueZList)
assert newTrueZ.size == newDocRange[-1]
else:
newTrueZ = None
return GroupXData(newX, newDocRange,
Xprev=newXprev,
nDocTotal=nDocTotal,
TrueZ=newTrueZ)
def add_data(self, XDataObj):
""" Appends (in-place) provided dataset to this dataset.
Post Condition
-------
self.Data grows by adding all units from provided DataObj.
"""
if not self.dim == XDataObj.dim:
raise ValueError("Dimensions must match!")
self.nObs += XDataObj.nObs
self.nDocTotal += XDataObj.nDocTotal
self.nDoc += XDataObj.nDoc
self.X = np.vstack([self.X, XDataObj.X])
if hasattr(self, 'Xprev'):
self.Xprev = np.vstack([self.Xprev, XDataObj.Xprev])
new_doc_range = XDataObj.doc_range[1:] + self.doc_range[-1]
self.doc_range = np.hstack([self.doc_range, new_doc_range])
self._check_dims()
def getRawDataAsSharedMemDict(self):
''' Create dict with copies of raw data as shared memory arrays
'''
dataShMemDict = dict()
dataShMemDict['X'] = numpyToSharedMemArray(self.X)
dataShMemDict['doc_range'] = numpyToSharedMemArray(self.doc_range)
dataShMemDict['nDocTotal'] = self.nDocTotal
if hasattr(self, 'Xprev'):
dataShMemDict['Xprev'] = numpyToSharedMemArray(self.Xprev)
return dataShMemDict
def getDataSliceFunctionHandle(self):
""" Return function handle that can make data slice objects.
Useful with parallelized algorithms,
when we need to use shared memory.
Returns
-------
f : function handle
"""
return makeDataSliceFromSharedMem
def makeDataSliceFromSharedMem(dataShMemDict,
cslice=(0, None),
batchID=None):
""" Create data slice from provided raw arrays and slice indicators.
Returns
-------
Dslice : namedtuple with same fields as GroupXData object
* X
* nObs
* nObsTotal
* dim
Represents subset of documents identified by cslice tuple.
Example
-------
>>> Data = GroupXData(np.random.rand(25,2), doc_range=[0,4,12,25])
>>> shMemDict = Data.getRawDataAsSharedMemDict()
>>> Dslice = makeDataSliceFromSharedMem(shMemDict)
>>> np.allclose(Data.X, Dslice.X)
True
>>> np.allclose(Data.nObs, Dslice.nObs)
True
>>> Data.dim == Dslice.dim
True
>>> Aslice = makeDataSliceFromSharedMem(shMemDict, (0, 2))
>>> Aslice.nDoc
2
>>> np.allclose(Aslice.doc_range, Dslice.doc_range[0:(2+1)])
True
"""
if batchID is not None and batchID in dataShMemDict:
dataShMemDict = dataShMemDict[batchID]
# Make local views (NOT copies) to shared mem arrays
doc_range = sharedMemToNumpyArray(dataShMemDict['doc_range'])
X = sharedMemToNumpyArray(dataShMemDict['X'])
nDocTotal = int(dataShMemDict['nDocTotal'])
dim = X.shape[1]
if cslice is None:
cslice = (0, doc_range.size - 1)
elif cslice[1] is None:
cslice = (0, doc_range.size - 1)
tstart = doc_range[cslice[0]]
tstop = doc_range[cslice[1]]
keys = ['X', 'Xprev', 'doc_range', 'nDoc', 'nObs', 'dim', 'nDocTotal']
if 'Xprev' in dataShMemDict:
Xprev = sharedMemToNumpyArray(dataShMemDict['Xprev'])[tstart:tstop]
else:
Xprev = None
Dslice = namedtuple("GroupXDataTuple", keys)(
X=X[tstart:tstop],
Xprev=Xprev,
doc_range=doc_range[cslice[0]:cslice[1] + 1] - doc_range[cslice[0]],
nDoc=cslice[1] - cslice[0],
nObs=tstop - tstart,
dim=dim,
nDocTotal=nDocTotal,
)
return Dslice
| 34.657074 | 77 | 0.588846 |
a7a3c07297bdc5a9d9dc9e8e2723b1d3e587876e | 915 | py | Python | sghymnal/users/models.py | shortnd/sghymnal | c10d9a7e2fda803dcb5046b9f7bc099f32b6c603 | [
"MIT"
] | null | null | null | sghymnal/users/models.py | shortnd/sghymnal | c10d9a7e2fda803dcb5046b9f7bc099f32b6c603 | [
"MIT"
] | null | null | null | sghymnal/users/models.py | shortnd/sghymnal | c10d9a7e2fda803dcb5046b9f7bc099f32b6c603 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db.models import BooleanField, CharField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
| 39.782609 | 74 | 0.748634 |
a7a3c324c55d54b727b474911571c79dbd56bbdd | 3,285 | py | Python | GNetLMM/pycore/mtSet/linalg/linalg_matrix.py | PMBio/GNetLMM | 103d6433ff6d4a13b5787c116032fda268dc4302 | [
"Apache-2.0"
] | 4 | 2016-02-25T18:40:36.000Z | 2019-05-06T06:15:47.000Z | GNetLMM/pycore/mtSet/linalg/linalg_matrix.py | PMBio/GNetLMM | 103d6433ff6d4a13b5787c116032fda268dc4302 | [
"Apache-2.0"
] | 6 | 2016-03-29T02:55:17.000Z | 2017-11-27T19:30:04.000Z | GNetLMM/pycore/mtSet/linalg/linalg_matrix.py | PMBio/GNetLMM | 103d6433ff6d4a13b5787c116032fda268dc4302 | [
"Apache-2.0"
] | 2 | 2017-05-09T05:23:50.000Z | 2019-07-27T13:19:22.000Z | """Matrix linear algebra routines needed for GP models"""
import scipy as SP
import scipy.linalg as linalg
import logging
def solve_chol(A,B):
"""
Solve cholesky decomposition::
return A\(A'\B)
"""
# X = linalg.solve(A,linalg.solve(A.transpose(),B))
# much faster version
X = linalg.cho_solve((A, True), B)
return X
def jitChol(A, maxTries=10, warning=True):
"""Do a Cholesky decomposition with jitter.
Description:
U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky
decomposition on the given matrix, if matrix isn't positive
definite the function adds 'jitter' and tries again. Thereafter
the amount of jitter is multiplied by 10 each time it is added
again. This is continued for a maximum of 10 times. The amount of
jitter added is returned.
Returns:
U - the Cholesky decomposition for the matrix.
jitter - the amount of jitter that was added to the matrix.
Arguments:
A - the matrix for which the Cholesky decomposition is required.
maxTries - the maximum number of times that jitter is added before
giving up (default 10).
warning - whether to give a warning for adding jitter (default is True)
See also
CHOL, PDINV, LOGDET
Copyright (c) 2005, 2006 Neil D. Lawrence
"""
jitter = 0
i = 0
while(True):
try:
# Try --- need to check A is positive definite
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
LC = linalg.cholesky(A, lower=True)
return LC.T, 0.0
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitChol()." % jitter)
LC = linalg.cholesky(A+jitter*SP.eye(A.shape[0]), lower=True)
return LC.T, jitter
except linalg.LinAlgError:
# Seems to have been non-positive definite.
if i<maxTries:
jitter = jitter*10
else:
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
i += 1
return LC
def jitEigh(A,maxTries=10,warning=True):
"""
Do a Eigenvalue Decompsition with Jitter,
works as jitChol
"""
warning = True
jitter = 0
i = 0
while(True):
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
S,U = linalg.eigh(A)
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitEigh()." % jitter)
S,U = linalg.eigh(A+jitter*SP.eye(A.shape[0]))
if S.min()>1E-10:
return S,U
if i<maxTries:
jitter = jitter*10
i += 1
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
| 26.92623 | 150 | 0.565297 |
a7a51a41fbb112ee2ecd860311e6db4a6211d1fb | 12,504 | py | Python | WebDev/Task 3/we-poll/flaskapp/main.py | vigneshd332/delta-inductions-2021-master | 70d4e7ecd92d69a2521df72ca99ac0ef2f135a23 | [
"MIT"
] | null | null | null | WebDev/Task 3/we-poll/flaskapp/main.py | vigneshd332/delta-inductions-2021-master | 70d4e7ecd92d69a2521df72ca99ac0ef2f135a23 | [
"MIT"
] | null | null | null | WebDev/Task 3/we-poll/flaskapp/main.py | vigneshd332/delta-inductions-2021-master | 70d4e7ecd92d69a2521df72ca99ac0ef2f135a23 | [
"MIT"
] | null | null | null | import ast
from flask import Flask, request
from flaskext.mysql import MySQL
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
mysql=MySQL()
app.config['MYSQL_DATABASE_USER'] = 'admin'
app.config['MYSQL_DATABASE_PASSWORD'] = 'noobmaster69'
app.config['MYSQL_DATABASE_DB'] = 'wepoll'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
if __name__ == "__main__":
app.run(debug=True)
| 37.437126 | 193 | 0.610125 |
a7a53397912424502a2174602b255501283824ca | 3,514 | py | Python | src/models/participants/participants.py | jfblg/Tracktime-UZE | f43666ac62d6e6450d0fb7082b5e68bae360547f | [
"MIT"
] | null | null | null | src/models/participants/participants.py | jfblg/Tracktime-UZE | f43666ac62d6e6450d0fb7082b5e68bae360547f | [
"MIT"
] | null | null | null | src/models/participants/participants.py | jfblg/Tracktime-UZE | f43666ac62d6e6450d0fb7082b5e68bae360547f | [
"MIT"
] | null | null | null | from wtforms import Form, BooleanField, IntegerField, StringField, PasswordField, validators
from wtforms.fields.html5 import EmailField
from src.common.database import db
from sqlalchemy import exc
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| 31.945455 | 100 | 0.615822 |
a7a5cb2262b40e9f0b4ed01f3aebd41690c8929f | 901 | py | Python | demo.py | ruixuantan/FourParts | d28f7fa87830406a08e618bdfcb25d5d87e3e2a1 | [
"MIT"
] | null | null | null | demo.py | ruixuantan/FourParts | d28f7fa87830406a08e618bdfcb25d5d87e3e2a1 | [
"MIT"
] | 6 | 2020-10-09T04:25:07.000Z | 2021-04-20T20:29:03.000Z | demo.py | ruixuantan/FourParts | d28f7fa87830406a08e618bdfcb25d5d87e3e2a1 | [
"MIT"
] | null | null | null | import fourparts as fp
import pandas as pd
file_name = 'chorale_F'
df = fp.midi_to_df('samples/' + file_name + '.mid', save=True)
chords = fp.PreProcessor(4).get_progression(df)
chord_progression = fp.ChordProgression(chords)
# gets pitch class sets
pitch_class_sets = chord_progression.get_pitch_class_sets()
pd.DataFrame(pitch_class_sets).to_csv(file_name + '_pitch_class_sets.csv')
# check parallels
result = chord_progression.check_parallels()
pd.DataFrame(result).to_csv(file_name + '_parallel_results.csv')
# demonstration for 2 parts
file_name = 'chorale_G_2parts'
df = fp.midi_to_df('samples/' + file_name + '.mid', save=True)
dyads = fp.PreProcessor(2).get_progression(df)
dyad_progression = fp.DyadProgression(dyads)
# gets intervals between each dyad
dyad_intervals = dyad_progression.get_harmonic_intervals()
pd.DataFrame(dyad_intervals).to_csv(file_name + '_dyad_intervals.csv')
| 30.033333 | 74 | 0.789123 |
a7a5eea8dc1da0b751835f89feabfe81cc8149c8 | 14,677 | py | Python | jenkins_status.py | tektronix/obsidian | d7ee7cd1a8511ecea8d3b475c2b308bcd81c7706 | [
"Apache-2.0"
] | 2 | 2019-05-03T19:45:22.000Z | 2019-08-01T18:33:49.000Z | jenkins_status.py | tektronix/obsidian | d7ee7cd1a8511ecea8d3b475c2b308bcd81c7706 | [
"Apache-2.0"
] | 5 | 2019-09-05T15:09:24.000Z | 2019-10-02T20:36:35.000Z | jenkins_status.py | tektronix/obsidian | d7ee7cd1a8511ecea8d3b475c2b308bcd81c7706 | [
"Apache-2.0"
] | 1 | 2019-05-13T20:13:00.000Z | 2019-05-13T20:13:00.000Z | #!/usr/bin/env python3
# Display a Jenkins build job status and progress
# Re-use animation functions from https://github.com/jgarff/rpi_ws281x/blob/master/python/examples/strandtest.py
import argparse
import random
import sys
import datetime
import time
from rpi_ws281x import Adafruit_NeoPixel, Color
from requests import get
MAX_LED_COUNT = 10000
POLL_PERIOD_SECONDS = 10
CHANNEL_1_PWM_PINS = (13, 19, 41, 45, 53)
# LED strip default configuration:
LED_COUNT = 144 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
# LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 55 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
JENKINS_FAILURE = 'FAILURE'
JENKINS_SUCCESS = 'SUCCESS'
JENKINS_ABORTED = 'ABORTED'
JENKINS_NO_RESULT = None
COLOR_RED = Color(255, 0, 0)
COLOR_GREEN = Color(0, 255, 0)
COLOR_BLUE = Color(0, 0, 255)
COLOR_WHITE = Color(255, 255, 255)
COLOR_BLACK = Color(0, 0, 0)
def color_wipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms / 1000.0)
def theater_chase(strip, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, color)
strip.show()
time.sleep(wait_ms / 1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256 * iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i + j) & 255))
strip.show()
time.sleep(wait_ms / 1000.0)
def rainbow_cycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256 * iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms / 1000.0)
def theater_chase_rainbow(strip, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, wheel((i + j) % 255))
strip.show()
time.sleep(wait_ms / 1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, 0)
def color_shuffle(strip, color, wait_ms=50):
"""Shuffle color onto display a pixel at a time."""
indexes = [i for i in range(strip.numPixels())]
random.shuffle(indexes)
for i in indexes:
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms / 1000.0)
def tail_entry(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Animate a pixel enter from the tail of the strip to the specified pixel"""
if strip.numPixels() > pixel:
wait_ms = travel_time_ms / float((strip.numPixels() - pixel))
else:
wait_ms = travel_time_ms
for j in range(strip.numPixels(), pixel - 1, -1):
strip.setPixelColor(j, color)
for k in range(j + 1, strip.numPixels()):
strip.setPixelColor(k, bg_color)
strip.show()
time.sleep(wait_ms / 1000.0)
def head_entry(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=1000):
"""Animate a pixel enter from the head of the strip to the specified pixel"""
wait_ms = travel_time_ms / pixel
for j in range(pixel):
strip.setPixelColor(j, color)
for i in range(j):
strip.setPixelColor(i, bg_color)
strip.show()
time.sleep(wait_ms / 1000.0)
def head_solid(strip, pixel, color):
"""Set solid color from the head of the strip to the specified pixel"""
for i in range(pixel):
strip.setPixelColor(i, color)
strip.show()
def tail_solid(strip, pixel, color):
"""Set solid color from the specified pixel to the end of the strip"""
for i in range(strip.numPixels, pixel - 1):
strip.setPixelColor(i, color)
strip.show()
def tail_fill(strip, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Tail fill the entire strip"""
for i in range(strip.numPixels()):
tail_entry(strip, i, color=color, bg_color=bg_color, travel_time_ms=travel_time_ms)
head_solid(strip, i, color=color)
def progress_bar_tail_entry(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Animate the last fill from tail end up to the specified pixel"""
head_solid(strip, pixel, color=color)
tail_entry(strip, pixel, color=color, bg_color=bg_color, travel_time_ms=travel_time_ms)
def progress_bar_tail_fill(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Animate progress bar fill from tail end from start up to the specified pixel"""
for i in range(pixel):
head_solid(strip, i, color=color)
tail_entry(strip, i, color=color, bg_color=bg_color, travel_time_ms=travel_time_ms)
def progress_bar(strip, percentage, progressColor, remainColor=COLOR_BLACK, wait_ms=10):
"""Animate progress bar"""
finishedProgress = strip.numPixels() * percentage / 100
for index in range(0, finishedProgress):
strip.setPixelColor(index, progressColor)
for index in range(finishedProgress, strip.numPixels()):
strip.setPixelColor(index, remainColor)
strip.show()
rainbow_pixel(strip, finishedProgress, wait_ms=wait_ms)
def rainbow_pixel(strip, pixel, wait_ms=100):
"""Cycle all colors for a given pixel"""
for j in range(256):
strip.setPixelColor(pixel, wheel(j))
strip.show()
time.sleep(wait_ms / 1000.0)
def rainbow_pixel_duration(strip, pixel, duration=10):
"""Animate rainbow for a fixed duration in seconds"""
et = datetime.datetime.now() + datetime.timedelta(0, duration)
while (et > datetime.datetime.now()):
rainbow_pixel(strip, pixel, wait_ms=10)
def show_success(strip):
"""Animate build result success"""
color_wipe(strip, COLOR_GREEN, 10)
def show_failure(strip):
"""Animate build result failure"""
color_wipe(strip, COLOR_RED, 10)
def show_aborted(strip):
"""Animate build result aborted"""
color_wipe(strip, Color(200, 200, 200), 10)
def show_build_started(strip):
"""Animate build started"""
color_shuffle(strip, color=COLOR_BLACK, wait_ms=10)
def show_build_in_progress(strip, progress, travel_time_s=POLL_PERIOD_SECONDS):
"""
Animate build in progress
"""
pixel = int(progress * strip.numPixels() / 100)
print("progress=%s%% => pixel=%s" % (progress, pixel))
if pixel == strip.numPixels():
travel_time_ms = 1000
else:
travel_time_ms = travel_time_s * 1000
progress_bar_tail_entry(strip, pixel, color=COLOR_BLUE, travel_time_ms=travel_time_ms)
def show_build_finished(strip):
"""Animate build is finished"""
theater_chase(strip, COLOR_WHITE, iterations=20)
def light_check(strip):
"""Check each RGB pixel"""
travel_time = 100
solid(strip, color=COLOR_BLACK)
head_entry(strip, strip.numPixels(), color=COLOR_RED, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_RED, travel_time_ms=travel_time)
head_entry(strip, strip.numPixels(), color=COLOR_GREEN, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_GREEN, travel_time_ms=travel_time)
head_entry(strip, strip.numPixels(), color=COLOR_BLUE, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_BLUE, travel_time_ms=travel_time)
head_entry(strip, strip.numPixels(), color=COLOR_WHITE, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_WHITE, travel_time_ms=travel_time)
color_shuffle(strip, color=COLOR_RED)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
color_shuffle(strip, color=COLOR_GREEN)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLUE)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
color_shuffle(strip, color=COLOR_WHITE)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
solid(strip, color=COLOR_BLACK)
def validate_brightness_value(value):
"""Validate the brightness value"""
error_message = "The value of brightness must be between %d and %d."
return validate_range(value, 0, 255, error_message)
def validate_range(value, min_value, max_value, error_message):
"""Validate a value is between a given range (inclusive)"""
x = int(value)
if min_value <= x <= max_value:
return x
raise argparse.ArgumentTypeError(error_message % (min_value, max_value))
def validate_led_count(value):
"""Validate the LED Count"""
error_message = "The number of LED on a single strip should be between %d and %d"
return validate_range(value, 1, MAX_LED_COUNT, error_message)
def validate_poll_period(value):
"""Validate the period to poll for status"""
seconds_per_day = 60 * 60 * 24
error_message = "The period to poll for status change should be between between %d and %d"
return validate_range(value, 1, seconds_per_day, error_message)
if __name__ == '__main__':
args = process_args()
if args.length <= 0:
print("Not enough LED to work with!")
sys.exit()
print("Pin used %d" % args.pin)
pwm_channel = 0
if args.pin in CHANNEL_1_PWM_PINS:
pwm_channel = 1
strip = Adafruit_NeoPixel(args.length, args.pin, LED_FREQ_HZ, LED_DMA, LED_INVERT, args.brightness, pwm_channel)
strip.begin()
if args.check:
try:
light_check(strip)
except KeyboardInterrupt:
print("\nKeyboard Interrupt signal received.")
if not args.donotclear:
print("Clearing all LEDs...")
color_wipe(strip, COLOR_BLACK, wait_ms=5)
finally:
sys.exit()
if not args.job:
print("A Jenkins Job URL is required to query for its status. "
"Run this command again with the -h or --help on how to specify them.")
sys.exit()
job_url = args.job + "/lastBuild/api/json"
progress_url = job_url + "?tree=executor[progress]"
print('Monitor job: %s' % job_url)
print("")
print('Press Ctrl-C to quit.')
is_building = True
while True:
try:
while True:
response = get(job_url, verify=args.verifyssl)
job_status = response.json()
if job_status["result"] == JENKINS_NO_RESULT:
if not is_building:
show_build_started(strip)
is_building = True
response = get(progress_url, verify=args.verifyssl)
progress = int(response.json()["executor"]["progress"])
show_build_in_progress(strip, progress, travel_time_s=args.pollperiod)
else:
if is_building:
show_build_in_progress(strip, 100, travel_time_s=1)
show_build_finished(strip)
print("Done with status: %s" % job_status["result"])
if job_status["result"] == JENKINS_FAILURE:
show_failure(strip)
elif job_status["result"] == JENKINS_SUCCESS:
show_success(strip)
elif job_status["result"] == JENKINS_ABORTED:
show_aborted(strip)
is_building = False
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
print("\nKeyboard Interrupt signal received.")
if not args.donotclear:
print("Clearing all LEDs...")
color_wipe(strip, COLOR_BLACK, wait_ms=5)
sys.exit()
except Exception as e:
print(e)
print("\nSleep 1 minutes and will try again")
rainbow_pixel_duration(strip, 1, 60)
| 36.239506 | 119 | 0.644614 |
a7a6463865702f264ac1badc4373232d010a238b | 3,649 | py | Python | RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/MyTcMutation.py | dgumenyuk/Environment_generation | 092fbecdc208f84aa58f2ccd3522262984e79cda | [
"MIT"
] | null | null | null | RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/MyTcMutation.py | dgumenyuk/Environment_generation | 092fbecdc208f84aa58f2ccd3522262984e79cda | [
"MIT"
] | null | null | null | RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/MyTcMutation.py | dgumenyuk/Environment_generation | 092fbecdc208f84aa58f2ccd3522262984e79cda | [
"MIT"
] | null | null | null | import numpy as np
from pymoo.model.mutation import Mutation
import copy
import config as cf
import random as rm
| 34.424528 | 87 | 0.417923 |
a7a7a93a99b79c8510b21680dab63b937c097a0a | 6,609 | py | Python | yufeng_code/models.py | BrandonThaiTran/stressed_emotion | 72a24ae66a41a9cdf811145ada5f4d5ecbe3c680 | [
"MIT"
] | null | null | null | yufeng_code/models.py | BrandonThaiTran/stressed_emotion | 72a24ae66a41a9cdf811145ada5f4d5ecbe3c680 | [
"MIT"
] | null | null | null | yufeng_code/models.py | BrandonThaiTran/stressed_emotion | 72a24ae66a41a9cdf811145ada5f4d5ecbe3c680 | [
"MIT"
] | null | null | null | """
Defines models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
| 35.342246 | 80 | 0.55606 |
a7a883b149b66dd30bd8e6cfd41ec11cd9ae06f6 | 1,007 | py | Python | intercom/api_operations/find.py | orikalinski/python-intercom | 7acd881ac9fa042e88a31f540040be7027edafa1 | [
"MIT"
] | null | null | null | intercom/api_operations/find.py | orikalinski/python-intercom | 7acd881ac9fa042e88a31f540040be7027edafa1 | [
"MIT"
] | null | null | null | intercom/api_operations/find.py | orikalinski/python-intercom | 7acd881ac9fa042e88a31f540040be7027edafa1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Operation to find an instance of a particular resource."""
from intercom import HttpError
from intercom import utils
| 34.724138 | 84 | 0.606753 |
a7aa730f8abab95fb0a5ea5e1812ce4926111dc2 | 4,743 | py | Python | Python/maximum-number-of-occurrences-of-a-substring.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/maximum-number-of-occurrences-of-a-substring.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/maximum-number-of-occurrences-of-a-substring.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(n)
# Space: O(n)
# 1297 weekly contest 168 12/21/2019
# Given a string s, return the maximum number of ocurrences of any substring under the following rules:
#
# The number of unique characters in the substring must be less than or equal to maxLetters.
# The substring size must be between minSize and maxSize inclusive.
# Constraints:
#
# 1 <= s.length <= 10^5
# 1 <= maxLetters <= 26
# 1 <= minSize <= maxSize <= min(26, s.length)
import collections
# rolling hash (Rabin-Karp Algorithm)
# Time: O(m * n), m = 26
# Space: O(m * n)
print(Solution().maxFreq("babcbceccaaacddbdaedbadcddcbdbcbaaddbcabcccbacebda",1,1,1)) # 13
print(Solution().maxFreq("aababcaab", 2,3,4)) # 2
print(Solution().maxFreq("aaaa", 1,3,3)) #2
print(Solution().maxFreq("aabcabcab",2,2,3)) # 3
print(Solution().maxFreq("abcde",2,3,3)) # 0
| 35.395522 | 103 | 0.527093 |
a7ab53160a52a5d1c5533255c3c66a5f638a75da | 2,899 | py | Python | source/protocol/image_0203_protocol.py | chopin1993/protocolmaster-20210731 | e23e235ee00b940a4161c606415574d2a52c701c | [
"Apache-2.0"
] | null | null | null | source/protocol/image_0203_protocol.py | chopin1993/protocolmaster-20210731 | e23e235ee00b940a4161c606415574d2a52c701c | [
"Apache-2.0"
] | null | null | null | source/protocol/image_0203_protocol.py | chopin1993/protocolmaster-20210731 | e23e235ee00b940a4161c606415574d2a52c701c | [
"Apache-2.0"
] | null | null | null | # encoding:utf-8
from .protocol import Protocol
from .protocol import find_head
from .codec import BinaryEncoder
from tools.converter import hexstr2bytes, str2hexstr
from .data_container import *
import time
import struct
from protocol.data_container import DataStruct
IMG0203_HEAD = bytes([0x54,0x17,0xfe,0x02])
IMG0203_TAIL = 0x03
# 'u8:STC=0x02 u8:CMD u32:Length byte[Length]:Data u8:CS u8:END=0x03' | 30.840426 | 80 | 0.600552 |
a7ac0fd2c72d5a1ba2a439615b6e2a8a3fe255b0 | 936 | py | Python | ucscsdk/methodmeta/ConfigUCEstimateImpactMeta.py | parag-may4/ucscsdk | 2ea762fa070330e3a4e2c21b46b157469555405b | [
"Apache-2.0"
] | 9 | 2016-12-22T08:39:25.000Z | 2019-09-10T15:36:19.000Z | ucscsdk/methodmeta/ConfigUCEstimateImpactMeta.py | parag-may4/ucscsdk | 2ea762fa070330e3a4e2c21b46b157469555405b | [
"Apache-2.0"
] | 10 | 2017-01-31T06:59:56.000Z | 2021-11-09T09:14:37.000Z | ucscsdk/methodmeta/ConfigUCEstimateImpactMeta.py | parag-may4/ucscsdk | 2ea762fa070330e3a4e2c21b46b157469555405b | [
"Apache-2.0"
] | 13 | 2016-11-14T07:42:58.000Z | 2022-02-10T17:32:05.000Z | """This module contains the meta information of ConfigUCEstimateImpact ExternalMethod."""
from ..ucsccoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("ConfigUCEstimateImpact", "configUCEstimateImpact", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_configs": MethodPropertyMeta("InConfigs", "inConfigs", "ConfigMap", "Version142b", "Input", True),
"in_impact_analyzer_id": MethodPropertyMeta("InImpactAnalyzerId", "inImpactAnalyzerId", "Xs:unsignedLong", "Version142b", "Input", False),
"out_impact_analyzer_dn": MethodPropertyMeta("OutImpactAnalyzerDn", "outImpactAnalyzerDn", "ReferenceObject", "Version142b", "Output", False),
}
prop_map = {
"cookie": "cookie",
"inConfigs": "in_configs",
"inImpactAnalyzerId": "in_impact_analyzer_id",
"outImpactAnalyzerDn": "out_impact_analyzer_dn",
}
| 44.571429 | 146 | 0.746795 |
a7ad127304af82024c33b51ab7d7e16625068796 | 2,315 | py | Python | src/midiutil/midiosc.py | neonkingfr/VizBench | e41f559cb6e761d717f2f5b202482d5d8dacd2d8 | [
"MIT"
] | 7 | 2015-01-05T06:32:49.000Z | 2020-10-30T19:29:07.000Z | src/midiutil/midiosc.py | neonkingfr/VizBench | e41f559cb6e761d717f2f5b202482d5d8dacd2d8 | [
"MIT"
] | null | null | null | src/midiutil/midiosc.py | neonkingfr/VizBench | e41f559cb6e761d717f2f5b202482d5d8dacd2d8 | [
"MIT"
] | 4 | 2016-03-09T22:29:26.000Z | 2021-04-07T13:52:28.000Z | """
This module provides an interface to MIDI things for OSC
"""
import sys
import time
import traceback
import thread
import threading
import copy
import string
import re
from threading import Thread,Lock
from math import sqrt
from ctypes import *
from time import sleep
from traceback import format_exc
from array import array
from nosuch.midiutil import *
from nosuch.oscutil import *
"""
This is executed when module is loaded
"""
| 21.238532 | 59 | 0.72959 |
a7ae12f206db8056cd85668443dc8c96311de3a2 | 7,128 | py | Python | argentum-api/api/tests/test_guest_view.py | devium/argentum | 2bbb0f663fe9be78d106b1afa409b094da449519 | [
"MIT"
] | 1 | 2019-10-07T09:47:08.000Z | 2019-10-07T09:47:08.000Z | argentum-api/api/tests/test_guest_view.py | devium/argentum | 2bbb0f663fe9be78d106b1afa409b094da449519 | [
"MIT"
] | null | null | null | argentum-api/api/tests/test_guest_view.py | devium/argentum | 2bbb0f663fe9be78d106b1afa409b094da449519 | [
"MIT"
] | null | null | null | import copy
import logging
from api.models import Transaction, BonusTransaction, Order, Tag, OrderItem
from api.models.guest import Guest
from api.models.label import Label
from api.tests.data.guests import TestGuests
from api.tests.data.statuses import TestStatuses
from api.tests.data.users import TestUsers
from api.tests.utils.combined_test_case import CombinedTestCase
LOG = logging.getLogger(__name__)
| 40.271186 | 104 | 0.658951 |
a7ae40689370a8b6c40572fb34aab6f86b6f10fd | 5,106 | py | Python | nonebot/adapters/qqguild/message.py | nonebot/adapter-qqguild | a3e4d353bfdaafb296743bc0f15ed5d643c64d85 | [
"MIT"
] | 39 | 2021-12-23T14:26:41.000Z | 2022-03-22T14:11:19.000Z | nonebot/adapters/qqguild/message.py | nonebot/adapter-qqguild | a3e4d353bfdaafb296743bc0f15ed5d643c64d85 | [
"MIT"
] | 4 | 2022-01-22T17:59:50.000Z | 2022-03-22T12:40:10.000Z | nonebot/adapters/qqguild/message.py | nonebot/adapter-qqguild | a3e4d353bfdaafb296743bc0f15ed5d643c64d85 | [
"MIT"
] | 2 | 2022-01-16T02:38:51.000Z | 2022-03-01T15:48:36.000Z | import re
from typing import Any, Type, Tuple, Union, Iterable
from nonebot.typing import overrides
from nonebot.adapters import Message as BaseMessage
from nonebot.adapters import MessageSegment as BaseMessageSegment
from .utils import escape, unescape
from .api import Message as GuildMessage
from .api import MessageArk, MessageEmbed
| 29.859649 | 87 | 0.607521 |
a7ae54fa73c46e74acbf26edef75a93f9daec057 | 2,689 | py | Python | creme/metrics/__init__.py | Raul9595/creme | 39cec7ac27ccd40ff0a7bdd6bceaf7ce25c1a8da | [
"BSD-3-Clause"
] | 1 | 2020-07-27T03:06:46.000Z | 2020-07-27T03:06:46.000Z | creme/metrics/__init__.py | 2torus/creme | bcc5e2a0155663a1f0ba779c68f23456695bcb54 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T06:24:42.000Z | 2022-02-10T06:24:42.000Z | creme/metrics/__init__.py | igorol/creme | 60977c4accfdca08cfd76a162095ff738ef87281 | [
"BSD-3-Clause"
] | 1 | 2021-04-16T08:27:14.000Z | 2021-04-16T08:27:14.000Z | """
A set of metrics used in machine learning that can be computed in a streaming fashion, without any
loss in precision.
"""
from .accuracy import Accuracy
from .accuracy import RollingAccuracy
from .confusion import ConfusionMatrix
from .confusion import RollingConfusionMatrix
from .cross_entropy import CrossEntropy
from .cross_entropy import RollingCrossEntropy
from .fbeta import F1
from .fbeta import FBeta
from .fbeta import MacroF1
from .fbeta import MacroFBeta
from .fbeta import MicroF1
from .fbeta import MicroFBeta
from .fbeta import MultiFBeta
from .fbeta import RollingF1
from .fbeta import RollingFBeta
from .fbeta import RollingMacroF1
from .fbeta import RollingMacroFBeta
from .fbeta import RollingMicroF1
from .fbeta import RollingMicroFBeta
from .fbeta import RollingMultiFBeta
from .jaccard import Jaccard
from .log_loss import LogLoss
from .log_loss import RollingLogLoss
from .mae import MAE
from .mae import RollingMAE
from .mcc import MCC
from .mcc import RollingMCC
from .mse import MSE
from .mse import RollingMSE
from .multioutput import RegressionMultiOutput
from .precision import MacroPrecision
from .precision import MicroPrecision
from .precision import Precision
from .precision import RollingMacroPrecision
from .precision import RollingMicroPrecision
from .precision import RollingPrecision
from .recall import MacroRecall
from .recall import MicroRecall
from .recall import Recall
from .recall import RollingMacroRecall
from .recall import RollingMicroRecall
from .recall import RollingRecall
from .rmse import RMSE
from .rmse import RollingRMSE
from .rmsle import RMSLE
from .rmsle import RollingRMSLE
from .roc_auc import ROCAUC
from .smape import RollingSMAPE
from .smape import SMAPE
__all__ = [
'Accuracy',
'ConfusionMatrix',
'CrossEntropy',
'F1',
'FBeta',
'Jaccard',
'LogLoss',
'MAE',
'MacroF1',
'MacroFBeta',
'MacroPrecision',
'MacroRecall',
'MCC',
'MicroF1',
'MicroFBeta',
'MicroPrecision',
'MicroRecall',
'MSE',
'MultiFBeta',
'Precision',
'Recall',
'RegressionMultiOutput',
'RMSE',
'RMSLE',
'ROCAUC',
'RollingAccuracy',
'RollingConfusionMatrix',
'RollingCrossEntropy',
'RollingF1',
'RollingFBeta',
'RollingLogLoss',
'RollingMAE',
'RollingMacroF1',
'RollingMacroFBeta',
'RollingMacroPrecision',
'RollingMacroRecall',
'RollingMCC',
'RollingMicroF1',
'RollingMicroFBeta',
'RollingMicroPrecision',
'RollingMicroRecall',
'RollingMSE',
'RollingMultiFBeta',
'RollingPrecision',
'RollingRecall',
'RollingRMSE',
'RollingRMSLE',
'RollingSMAPE',
'SMAPE'
]
| 25.130841 | 98 | 0.748605 |
a7aefa18f5080501a0e0759b55fb0c060403255f | 628 | py | Python | cd/checks/is_player_connected.py | Axelware/CD-bot | 8f923c09c3c8cfcff48813c6dd11ac50c410af72 | [
"MIT"
] | 2 | 2021-12-10T00:36:59.000Z | 2021-12-11T09:11:46.000Z | cd/checks/is_player_connected.py | Axelware/CD-bot | 8f923c09c3c8cfcff48813c6dd11ac50c410af72 | [
"MIT"
] | 2 | 2021-12-10T01:53:10.000Z | 2021-12-10T09:06:01.000Z | cd/checks/is_player_connected.py | Axelware/CD-bot | 8f923c09c3c8cfcff48813c6dd11ac50c410af72 | [
"MIT"
] | 1 | 2021-12-10T00:37:07.000Z | 2021-12-10T00:37:07.000Z | # Future
from __future__ import annotations
# Standard Library
from collections.abc import Callable
from typing import Literal, TypeVar
# Packages
from discord.ext import commands
# Local
from cd import custom, exceptions
__all__ = (
"is_player_connected",
)
T = TypeVar("T")
| 19.030303 | 95 | 0.719745 |
a7af30cf8c221d4921a6b97b3773efaa600307b2 | 986 | py | Python | src/test/base.py | vincent-lg/levantine | 21d5296eec2161d9fae404bbfd9d538ac7970f2c | [
"BSD-3-Clause"
] | null | null | null | src/test/base.py | vincent-lg/levantine | 21d5296eec2161d9fae404bbfd9d538ac7970f2c | [
"BSD-3-Clause"
] | null | null | null | src/test/base.py | vincent-lg/levantine | 21d5296eec2161d9fae404bbfd9d538ac7970f2c | [
"BSD-3-Clause"
] | null | null | null | """Base test for TalisMUD tests.
It creates an in-memory database for each test, so they run in independent
environments.
"""
import unittest
from pony.orm import db_session
from data.base import db
from data.properties import LazyPropertyDescriptor
# Bind to a temporary database
db.bind(provider="sqlite", filename=":memory:")
db.generate_mapping(create_tables=True)
| 25.947368 | 74 | 0.662272 |
a7b03e4ffb4bbb414e62639fa7c3f4af65273269 | 4,074 | py | Python | setup.py | zkbt/henrietta | 653d798b241ad5591b704967a0413a2457a4e734 | [
"MIT"
] | null | null | null | setup.py | zkbt/henrietta | 653d798b241ad5591b704967a0413a2457a4e734 | [
"MIT"
] | 12 | 2018-09-12T03:56:04.000Z | 2019-02-15T04:12:53.000Z | setup.py | zkbt/henrietta | 653d798b241ad5591b704967a0413a2457a4e734 | [
"MIT"
] | null | null | null | '''
This setup.py file sets up our package to be installable on any computer,
so that folks can `import henrietta` from within any directory.
Thanks to this file, you can...
...tell python to look for `henrietta` in the current directory (which you
can continue to edit), by typing *one* of the following commands:
`pip install -e .`
or
`python setup.py develop`
...move a copy of this code to your site-packages directory, where python will
be able to find it (but you won't be able to keep editing it), by typing *one*
of the following commands:
`pip install .`
or
`python setup.py install`
...upload the entire package to the Python Package Index, so that other folks
will be able to install your package via the simple `pip install henrietta`, by
running the following command:
`python setup.py release`
The template for this setup.py came was pieced together with help from
barentsen, christinahedges, timothydmorton, and dfm. Check them out on github
for more neat tricks!
[`python-packaging`](https://python-packaging.readthedocs.io/en/latest/index.html)
is a pretty useful resource too!
'''
# import our basic setup ingredients
from setuptools import setup, find_packages
import os,sys
# running `python setup.py release` from the command line will post to PyPI
if "release" in sys.argv[-1]:
os.system("python setup.py sdist")
# uncomment the next line to test out on test.pypi.com/project/tess-zap
#os.system("twine upload --repository-url https://test.pypi.org/legacy/ dist/*")
os.system("twine upload dist/*")
os.system("rm -rf dist/henrietta*")
sys.exit()
# a little kludge to get the version number from __version__
exec(open('henrietta/version.py').read())
# run the setup function
setup(
# people can type `import henrietta` to access this package
name = "henrietta",
# this package will only be installed if the current version doesn't exist
version = __version__,
# what's a short description of the package?
description = "Python toolkit playing with stellar brightness measurements, for ASTR3400 at CU Boulder.",
# what's a more detailed description?
long_description = open('README.md').read(),
# who's the main author?
author = "Zach Berta-Thompson",
# what's the main author's email?
author_email = "zach.bertathompson@colorado.edu",
# what's the URL for the repository?
url = "https://github.com/zkbt/henrietta",
# this figures out what subdirectories to include
packages = find_packages(),
# are the directories of data that should be accessible when installed?
include_package_data=False,
# where are those data directories?
package_data = {'henrietta':[]},
# any scripts will be copied into your $PATH, so that can run from the command line
scripts = [],
# some descriptions about this package (for searchability?)
classifiers=[
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy'
],
# what other packages are required. these must be pip-installable
install_requires=['numpy',
'astropy',
'scipy',
'ipython',
'matplotlib',
'lightkurve>=1.0b26',
'tqdm',
'thefriendlystars>=0.0.2',
'illumination>=0.0.3',
'ipywidgets',
'jupyter',
'photutils',
'ipympl',
'scikit-image',
'emcee',
'corner'],
# the packages in `key` will be installed if folks run `pip install henrietta[key]`
extras_require={'models':['batman-package', ],
'docs':['sphinx', 'nbsphinx', 'sphinx_rtd_theme', 'numpydoc']},
# (I think just leave this set to False)
zip_safe=False,
# under what license is this code released?
license='MIT')
| 37.376147 | 109 | 0.650957 |
a7b07918e04c601018fdac4669f2078472d113b2 | 18,739 | py | Python | zedenv/plugins/systemdboot.py | slicer69/zedenv | 415397b48fdc8ec137cc8f99d2d9a70e8c8981f1 | [
"BSD-3-Clause"
] | null | null | null | zedenv/plugins/systemdboot.py | slicer69/zedenv | 415397b48fdc8ec137cc8f99d2d9a70e8c8981f1 | [
"BSD-3-Clause"
] | null | null | null | zedenv/plugins/systemdboot.py | slicer69/zedenv | 415397b48fdc8ec137cc8f99d2d9a70e8c8981f1 | [
"BSD-3-Clause"
] | null | null | null | import shutil
import os
import re
import tempfile
import click
import zedenv.lib.be
import zedenv.plugins.configuration as plugin_config
import zedenv.lib.system
from zedenv.lib.logger import ZELogger
| 40.560606 | 99 | 0.509365 |
a7b14f4594c0e55bb15609c8994038940d71c37f | 1,158 | py | Python | dockerfile/web/mailman-web/main.py | TommyLike/kubernetes-mailman | abd50e2798f2c4417e71302ff64b8a52d0d5878a | [
"MIT"
] | null | null | null | dockerfile/web/mailman-web/main.py | TommyLike/kubernetes-mailman | abd50e2798f2c4417e71302ff64b8a52d0d5878a | [
"MIT"
] | null | null | null | dockerfile/web/mailman-web/main.py | TommyLike/kubernetes-mailman | abd50e2798f2c4417e71302ff64b8a52d0d5878a | [
"MIT"
] | null | null | null | import os
import socket
import ipaddress
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# NOTE: this is the MTA host, we need to update it.
EMAIL_HOST = 'mailman-exim4-0.mail-suit-service.default.svc.cluster.local'
EMAIL_PORT = 25
mailman_ip_address = socket.gethostbyname(
os.environ.get('MAILMAN_HOST_IP')).split('.')
mailman_ip_cidr = "{0}.{1}.0.0/16".format(mailman_ip_address[0],
mailman_ip_address[1])
MAILMAN_ARCHIVER_FROM = [str(ip) for ip in
ipaddress.IPv4Network(mailman_ip_cidr)]
ALLOWED_HOSTS = [
"localhost", # Archiving API from Mailman, keep it.
# Add here all production URLs you may have.
"mailman-database-0.mail-suit-service.default.svc.cluster.local",
"mailman-core-0.mail-suit-service.default.svc.cluster.local",
"mailman-web-0.mail-suit-service.default.svc.cluster.local",
"mail-web-service.default.svc.cluster.local",
# NOTE: This is the public ip address of the served host
"159.138.26.163",
"tommylike.me",
os.environ.get('SERVE_FROM_DOMAIN'),
os.environ.get('DJANGO_ALLOWED_HOSTS'),
] | 37.354839 | 74 | 0.692573 |
a7b26907d2d76169184d6413917d51a6f4b07b22 | 16,381 | py | Python | mask_functions.py | jhh37/wearmask3d | 67c9beba323a34d0d207d7b6897f97cd59145e63 | [
"MIT"
] | 6 | 2021-09-25T04:59:16.000Z | 2022-03-02T15:39:13.000Z | mask_functions.py | jhh37/wearmask3d | 67c9beba323a34d0d207d7b6897f97cd59145e63 | [
"MIT"
] | null | null | null | mask_functions.py | jhh37/wearmask3d | 67c9beba323a34d0d207d7b6897f97cd59145e63 | [
"MIT"
] | 1 | 2021-09-26T02:35:44.000Z | 2021-09-26T02:35:44.000Z | # WearMask3D
# Copyright 2021 Hanjo Kim and Minsoo Kim. All rights reserved.
# http://github.com/jhh37/wearmask3d
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: rlakswh@gmail.com (Hanjo Kim)
# devkim1102@gmail.com (Minsoo Kim)
import cv2
import geomdl
import torch
import torchvision.transforms as transforms
from PIL import Image
from geomdl import exchange, NURBS
from pygame.constants import *
from misc_utils import get_models, mask_transformation
from obj_loader import *
from utils.ddfa import ToTensorGjz, NormalizeGjz
from utils.inference import parse_roi_box_from_landmark, crop_img, predict_68pts, predict_dense
| 31.807767 | 110 | 0.565777 |
a7b2d4bcebc84c01285d54f2bcd39c69c67e7a6d | 249 | py | Python | adminapp/admin.py | gabyxbinnaeah/Bus-Booking | 51d2a521f890986e4e7e17775708cec3cd71d2b4 | [
"MIT"
] | null | null | null | adminapp/admin.py | gabyxbinnaeah/Bus-Booking | 51d2a521f890986e4e7e17775708cec3cd71d2b4 | [
"MIT"
] | null | null | null | adminapp/admin.py | gabyxbinnaeah/Bus-Booking | 51d2a521f890986e4e7e17775708cec3cd71d2b4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Admin, Profile
# from userapp.models import Book
# from driverapp.models import Bus
admin.site.register(Admin)
admin.site.register(Profile)
# admin.site.register(Bus)
# admin.site.register(Book) | 27.666667 | 35 | 0.795181 |
a7b37c8008c4aac92350512db834814661a375e1 | 41,807 | py | Python | zstackwoodpecker/zstackwoodpecker/operations/hybrid_operations.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | zstackwoodpecker/zstackwoodpecker/operations/hybrid_operations.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | zstackwoodpecker/zstackwoodpecker/operations/hybrid_operations.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | '''
All ldap operations for test.
@author: quarkonics
'''
from apibinding.api import ApiError
import apibinding.inventory as inventory
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import account_operations
import config_operations
import os
import inspect
| 50.73665 | 288 | 0.75107 |
a7b426f2174df82614170a394fb020de8df61abf | 293 | py | Python | datasets_example/populate_elastic.py | aleksbobic/csx | 151ac6644694ac585bf5d070bae7146e94c30aec | [
"MIT"
] | null | null | null | datasets_example/populate_elastic.py | aleksbobic/csx | 151ac6644694ac585bf5d070bae7146e94c30aec | [
"MIT"
] | null | null | null | datasets_example/populate_elastic.py | aleksbobic/csx | 151ac6644694ac585bf5d070bae7146e94c30aec | [
"MIT"
] | null | null | null | import requests
import sys
requests.put(f"http://localhost:9200/{sys.argv[1]}?pretty")
headers = {"Content-Type": "application/x-ndjson"}
data = open(sys.argv[2], "rb").read()
requests.post(
f"http://localhost:9200/{sys.argv[1]}/_bulk?pretty", headers=headers, data=data
)
| 22.538462 | 84 | 0.665529 |
a7b495865e605d6301abb8d08c5cad2ee915a172 | 7,369 | py | Python | tests/unit/utils/test_attributes.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | tests/unit/utils/test_attributes.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | tests/unit/utils/test_attributes.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Unit tests for Attributes.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import os
import shutil
import tempfile
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from qgis.PyQt.QtCore import QVariant
from qgis.core import (
QgsWkbTypes, QgsProject, QgsVectorLayer, QgsField,
QgsVectorDataProvider
)
from qgis_plutil.utils.attributes import (
variant_ctor_for_object, fields_from_data,
merge_fields_in_provider,
)
logger = logging.getLogger('tests.attributes')
| 39.832432 | 75 | 0.639707 |
a7b4c6f5823d6451d0e2b584c552a30bc5d41136 | 191 | py | Python | portal/template.py | SlapBass/nx-portal | ee262079db1e5230a24ebbc205e44926f11f8da9 | [
"Apache-2.0"
] | 5 | 2019-10-04T04:46:44.000Z | 2019-10-09T10:02:01.000Z | portal/template.py | SlapBass/nx-portal | ee262079db1e5230a24ebbc205e44926f11f8da9 | [
"Apache-2.0"
] | 9 | 2019-10-06T07:15:09.000Z | 2020-09-24T02:19:40.000Z | portal/template.py | SlapBass/nx-portal | ee262079db1e5230a24ebbc205e44926f11f8da9 | [
"Apache-2.0"
] | 1 | 2020-06-19T13:26:08.000Z | 2020-06-19T13:26:08.000Z | from django.conf import settings
| 21.222222 | 54 | 0.712042 |
a7b57910267fcbe3aaeef29154600de509e91fa3 | 12,923 | py | Python | app.py | Tesfa-eth/online_book_store | b3a9b24065f3952f2ea26107a72a96fe6c1988e8 | [
"Apache-2.0"
] | null | null | null | app.py | Tesfa-eth/online_book_store | b3a9b24065f3952f2ea26107a72a96fe6c1988e8 | [
"Apache-2.0"
] | null | null | null | app.py | Tesfa-eth/online_book_store | b3a9b24065f3952f2ea26107a72a96fe6c1988e8 | [
"Apache-2.0"
] | null | null | null | from enum import unique
from typing import Reversible
from flask import Flask, app, render_template, url_for, redirect, request
#import flask
from flask.helpers import flash
from flask_login.utils import login_fresh
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, login_manager, login_user, LoginManager, login_required,logout_user, current_user
from flask_wtf.form import FlaskForm
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Length, ValidationError
from flask_bcrypt import Bcrypt
import requests
app = Flask(__name__)
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' # connects to the db
app.config['SECRET_KEY'] = 'thisisasecretekey'
# log in manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view="login"
global_username = ''
store = ''
count = 0
# register and log in forms
def clean_data(data, i):
#global book_title, author_names, publish_year_first, isbn, subject
try:
book_title = data['docs'][i]['title']
except:
book_title = 'no book title'
try:
author_names = data['docs'][i]['author_name']
except:
author_names = 'no author name'
try:
publish_year_first = data['docs'][i]['publish_year'][0]
except:
publish_year_first = 'no published date'
try: # some may not have isbn
isbn = data['docs'][i]['isbn'][1] # set this to empty string later
except:
isbn = ''
try: # some may not have subject
subject = data['docs'][0]['subject'][0] # can take as many as needed
except:
subject = 'No subject available'
return book_title, author_names, publish_year_first, isbn, subject
def get_url(isbn):
if isbn:
try:
img_url = 'https://covers.openlibrary.org/b/isbn/' + str(isbn) + '-L.jpg'
except:
img_url = 'https://leadershiftinsights.com/wp-content/uploads/2019/07/no-book-cover-available.jpg'
else:
img_url = 'https://leadershiftinsights.com/wp-content/uploads/2019/07/no-book-cover-available.jpg'
return img_url
if __name__ == '__main__':
db.create_all()
app.run(debug=True) | 37.676385 | 137 | 0.686141 |
a7b6c1b55e93ba5383e18ba855c03c57d497af3c | 5,560 | py | Python | aei_net.py | ilyakava/faceshifter | aa48f9eb79991bae4ee9d8c69bb3b51891a9f721 | [
"BSD-3-Clause"
] | null | null | null | aei_net.py | ilyakava/faceshifter | aa48f9eb79991bae4ee9d8c69bb3b51891a9f721 | [
"BSD-3-Clause"
] | null | null | null | aei_net.py | ilyakava/faceshifter | aa48f9eb79991bae4ee9d8c69bb3b51891a9f721 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
from torchvision.models import resnet101
import pytorch_lightning as pl
from model.AEINet import ADDGenerator, MultilevelAttributesEncoder
from model.MultiScaleDiscriminator import MultiscaleDiscriminator
from model.loss import GANLoss, AEI_Loss
from dataset import *
| 40 | 140 | 0.660252 |
a7b811fd3215225ba11bbaec1274776a54d75f89 | 3,744 | py | Python | luna/gateware/usb/usb3/link/timers.py | macdaliot/luna | 97e725d0af001a6d4c4811eefb43b6c31a9b45e1 | [
"BSD-3-Clause"
] | 2 | 2020-11-04T10:54:15.000Z | 2022-03-17T20:38:21.000Z | luna/gateware/usb/usb3/link/timers.py | macdaliot/luna | 97e725d0af001a6d4c4811eefb43b6c31a9b45e1 | [
"BSD-3-Clause"
] | 4 | 2020-11-11T17:32:33.000Z | 2020-11-30T13:08:05.000Z | luna/gateware/usb/usb3/link/timers.py | macdaliot/luna | 97e725d0af001a6d4c4811eefb43b6c31a9b45e1 | [
"BSD-3-Clause"
] | 2 | 2021-06-26T06:06:52.000Z | 2022-01-19T22:36:19.000Z | #
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" U0 link-maintenance timers. """
from nmigen import *
| 34.036364 | 97 | 0.6461 |
a7bac68130322804ca0f8658958c0c6e65364b14 | 769 | py | Python | S4/S4 Library/simulation/ensemble/ensemble_interactions.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/ensemble/ensemble_interactions.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/ensemble/ensemble_interactions.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from objects.base_interactions import ProxyInteraction
from sims4.utils import classproperty, flexmethod
| 33.434783 | 89 | 0.723017 |
a7bb2b9d40a5d41ccee76669339046f57bb5ef22 | 742 | py | Python | setup.py | ryderdamen/phonetic-alphabet | 23239a532b05c5708629011dd02a228fc11f71e7 | [
"MIT"
] | 3 | 2020-05-01T22:23:38.000Z | 2021-03-30T17:47:40.000Z | setup.py | ryderdamen/phonetic-alphabet | 23239a532b05c5708629011dd02a228fc11f71e7 | [
"MIT"
] | null | null | null | setup.py | ryderdamen/phonetic-alphabet | 23239a532b05c5708629011dd02a228fc11f71e7 | [
"MIT"
] | null | null | null | import setuptools
INSTALL_REQUIRES = []
TESTS_REQUIRE = ['pytest']
setuptools.setup(
name='phonetic_alphabet',
version='0.1.0',
description='Convert characters and digits to phonetic alphabet equivalents.',
long_description=get_readme(),
long_description_content_type="text/markdown",
keywords='phonetic alphabet aviation flight alpha bravo charlie niner',
url='http://github.com/ryderdamen/phonetic-alphabet',
author='Ryder Damen',
author_email='dev@ryderdamen.com',
license='MIT',
packages=setuptools.find_packages(),
install_requires=INSTALL_REQUIRES,
test_suite='pytest',
tests_require=TESTS_REQUIRE,
) | 27.481481 | 82 | 0.719677 |
a7bbd5a285c6bb9093667ccc2d4fc99f883f732d | 1,573 | py | Python | tests/fixtures.py | vfxetc/sgcache | 670bfac2904373e19c2dac7504d2d7f87018833d | [
"BSD-3-Clause"
] | 13 | 2017-09-06T21:48:57.000Z | 2022-02-08T20:50:52.000Z | tests/fixtures.py | vfxetc/sgcache | 670bfac2904373e19c2dac7504d2d7f87018833d | [
"BSD-3-Clause"
] | 1 | 2021-04-04T18:07:04.000Z | 2021-04-04T18:07:04.000Z | tests/fixtures.py | vfxetc/sgcache | 670bfac2904373e19c2dac7504d2d7f87018833d | [
"BSD-3-Clause"
] | 1 | 2019-07-19T01:23:19.000Z | 2019-07-19T01:23:19.000Z | from . import uuid
| 34.195652 | 96 | 0.590591 |
a7bbfcd8b016b1703e3c92ed7c1cef2bc74d76e6 | 3,098 | py | Python | config.py | ricsonc/aptools | c6ff775a7ae5a7ea7b27235748fd45769d037ae8 | [
"MIT"
] | 1 | 2020-11-09T18:02:36.000Z | 2020-11-09T18:02:36.000Z | config.py | ricsonc/aptools | c6ff775a7ae5a7ea7b27235748fd45769d037ae8 | [
"MIT"
] | null | null | null | config.py | ricsonc/aptools | c6ff775a7ae5a7ea7b27235748fd45769d037ae8 | [
"MIT"
] | null | null | null | from munch import Munch as M
cores = 20
demosaic_params = M(
# at most one of use_flat or use_lens_profile should be True
# strongly recommended to have at least 1 be True
use_flat = False,
use_lens_profile = True,
alg = 'DCB', #alternatively, use LMMSE
camera = 'auto', # alternatively, specify something like "Canon EOS 6D Mark II"
lens_make = 'auto', # alternatively, specify somnething like 'Nikon'
lens = 'Canon EF 70-200mm f/2.8L IS II USM', #'Nikkor 80-200mm f/2.8 ED',
)
detection_params = M(
Nsig = 3, # number of kernel sizes to try
min_sig = 1.0, # smallest kernel in px/std
max_sig = 6.0, # largest kernel in px/std
# only consider star candidates above this percentile of pixel luminosity
# 99.5 good for HA images, 99 for dark skies, 90 for typical use
lum_pthresh = 99.5,
# only consider candidates with an aspect ratio of no more than
unround_threshold = 2,
)
registration_params = M(
max_stars = 500, # use this many stars to register at most.
nneighbors = 500,
ba_max_ratio = 0.99,
cb_max_ratio = 0.99,
epsilon = 1E-3, # match tolerance.
min_abs_diff = 1, #abs and rel diff for match success
min_rel_diff = 1.4,
# we discard outliers from the registration via a ransac process
ransac_iters = 50,
ransac_keep_percentile = 99,
# a point is an outlier if it's more than this many pixels from the linear fit
linear_fit_tol = 2.0,
)
warping_params = M(
coarseness = 10,
use_thinplate = False, # recommend only for multi-spectra
thinplate_smoothing=0,
min_stars = 20, # don't attempt warp with fewer stars
)
stacking_params = M(
# higher noise mul = more denoising, less robust to registration errors
# lower noise mul = more robust, less denoising
noise_mul = 32.0, # could also try 4, 16, 64, usually looks the same
patch_size = 32,
cache_path = '.cache', # a large np array is temporarily stored here
)
postprocess_params = M(
# crop this many pixels from the edge of the image before any processing
border_crop = 400,
# parameters for removing background "gradient".
gradient_window = 32+1, # size of the median kernel (odd)
dilation = 16, # dilation factor for median kernel
gradient_max = 90, # all pixels more luminous than this threshold are not counted as bkg
# excl_box is either None, or a list of 4 integers [miny, maxy, minx, maxx]
# this region will be ignored for the purposes of estimating background
excl_box = None,
# alternatively, you can pass in a path to a mask file, to ignore non-box regions
mask_file = None, #
# a pair of (input, output) pairs for the tone curve.
tone_curve = [
(0.05, -0.02),
(0.3, 0.0),
],
curve_type = 'thin-plate', # can also choose "cubic" for less overshoot
# if output border is given, the saved output will be the excl box, plus output border
# otherwise, you can manually specify the [miny, maxy, minx, maxx] for the output
output_border = 400,
output_box = None,
)
| 34.422222 | 92 | 0.679471 |
a7bc5dad7ba38fc7552e7848f03173e599033cdb | 10,809 | py | Python | src/cogs/ide/dialogs/navigated_saved.py | boopdev/Jarvide | 10920d53e4193c7e5526012b572c00f26536cd6c | [
"MIT"
] | null | null | null | src/cogs/ide/dialogs/navigated_saved.py | boopdev/Jarvide | 10920d53e4193c7e5526012b572c00f26536cd6c | [
"MIT"
] | null | null | null | src/cogs/ide/dialogs/navigated_saved.py | boopdev/Jarvide | 10920d53e4193c7e5526012b572c00f26536cd6c | [
"MIT"
] | null | null | null | import disnake
import time
from disnake.ext import commands
from typing import Optional
from odmantic import Model
from src.utils import ExitButton, EmbedFactory, File, get_info
| 33.258462 | 195 | 0.569248 |
a7be5a9cf5c8b15026fbcff5a02db179d5654ed0 | 6,098 | py | Python | BiBloSA/exp_SICK/src/evaluator.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 116 | 2018-02-01T08:33:35.000Z | 2021-08-04T05:28:04.000Z | BiBloSA/exp_SICK/src/evaluator.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | BiBloSA/exp_SICK/src/evaluator.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | from configs import cfg
from src.utils.record_log import _logger
import numpy as np
import tensorflow as tf
import scipy.stats as stats
| 51.677966 | 120 | 0.642178 |
a7c14fda1fe9509c8caa57724445fdcaee4171b6 | 3,696 | py | Python | pytorch_translate/tasks/translation_from_pretrained_xlm.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
] | 748 | 2018-05-02T17:12:53.000Z | 2022-03-26T04:44:44.000Z | pytorch_translate/tasks/translation_from_pretrained_xlm.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
] | 352 | 2018-05-02T19:05:59.000Z | 2022-02-25T16:54:27.000Z | pytorch_translate/tasks/translation_from_pretrained_xlm.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
] | 193 | 2018-05-02T17:14:56.000Z | 2022-02-24T21:10:56.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import options, tokenizer
from fairseq.tasks import register_task
from pytorch_translate import constants
from pytorch_translate.data.masked_lm_dictionary import MaskedLMDictionary
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
| 39.319149 | 85 | 0.688582 |
a7c26519485c15577e8b274b47dd3a678ef542d3 | 5,084 | py | Python | drugresnet/seya/layers/memnn2.py | Naghipourfar/CCLE | cd557928a003200c683861b29c607942029bffb1 | [
"MIT"
] | 429 | 2015-08-11T09:48:34.000Z | 2021-07-31T15:13:23.000Z | drugresnet/seya/layers/memnn2.py | Naghipourfar/CCLE | cd557928a003200c683861b29c607942029bffb1 | [
"MIT"
] | 55 | 2015-09-10T11:57:58.000Z | 2021-04-24T14:13:31.000Z | drugresnet/seya/layers/memnn2.py | Naghipourfar/CCLE | cd557928a003200c683861b29c607942029bffb1 | [
"MIT"
] | 135 | 2015-08-31T17:52:26.000Z | 2022-02-07T05:31:12.000Z | import theano.tensor as T
import keras.backend as K
from keras.layers.core import LambdaMerge
from keras import initializations
| 39.71875 | 78 | 0.54288 |
a7c2682f5396348598753e892a05447ab558cc24 | 419 | py | Python | nyan/utils/io.py | TWRogers/nyan | c224279467475c9ce81e4709dd357897e0e5c963 | [
"Apache-2.0"
] | 2 | 2019-10-11T16:41:16.000Z | 2019-10-11T16:42:08.000Z | nyan/utils/io.py | TWRogers/nyan | c224279467475c9ce81e4709dd357897e0e5c963 | [
"Apache-2.0"
] | null | null | null | nyan/utils/io.py | TWRogers/nyan | c224279467475c9ce81e4709dd357897e0e5c963 | [
"Apache-2.0"
] | null | null | null | import cv2
from PIL import Image
import os
import numpy as np
IMAGE_BE = os.environ.get('NYAN_IMAGE_BE', 'PIL')
if IMAGE_BE == 'PIL':
elif IMAGE_BE == 'cv2':
else:
raise NotImplementedError('IMAGE_BE {} not implemented'.format(IMAGE_BE))
| 23.277778 | 77 | 0.668258 |
a7c28ef23aa63e38de4b879a5620a01103796308 | 164 | py | Python | src/server.py | sqweelygig/a-pi-api | 7c83bf5d1a00e01a45edc7fda9b4887bf02b064a | [
"Apache-2.0"
] | null | null | null | src/server.py | sqweelygig/a-pi-api | 7c83bf5d1a00e01a45edc7fda9b4887bf02b064a | [
"Apache-2.0"
] | null | null | null | src/server.py | sqweelygig/a-pi-api | 7c83bf5d1a00e01a45edc7fda9b4887bf02b064a | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
import connexion
if __name__ == '__main__':
app = connexion.App('a-pi-api')
app.add_api('v0/spec.yml')
app.run(host='0.0.0.0', port=80)
| 20.5 | 33 | 0.682927 |
a7c34c83fcf273716cc8990f3dfde892e307229c | 661 | py | Python | detect_fraud_email_enron/tools/k_best_selector.py | gotamist/other_machine_learning | 70c7f5367ed5cf9b6fd4818cda16add24a2b468d | [
"MIT"
] | null | null | null | detect_fraud_email_enron/tools/k_best_selector.py | gotamist/other_machine_learning | 70c7f5367ed5cf9b6fd4818cda16add24a2b468d | [
"MIT"
] | null | null | null | detect_fraud_email_enron/tools/k_best_selector.py | gotamist/other_machine_learning | 70c7f5367ed5cf9b6fd4818cda16add24a2b468d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 18:26:45 2018
@author: gotamist
"""
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from sklearn.feature_selection import SelectKBest | 28.73913 | 69 | 0.721634 |
a7c3a82db91bce9cfb29cde148a1916bca6eaffc | 697 | py | Python | Python/179.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | Python/179.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | 1 | 2018-04-16T21:01:50.000Z | 2018-04-16T21:01:50.000Z | Python/179.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 179 - Consecutive positive divisors
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
if __name__ == "__main__":
print(run())
| 19.914286 | 79 | 0.573888 |
a7c3c43019bf1d3df920d62ddaf471e6b921d131 | 7,417 | py | Python | tweetx/environment.py | Druid-of-Luhn/TweetX | 15a2dc0ad619f846b40880ad5fc3ab690d835e0d | [
"MIT"
] | null | null | null | tweetx/environment.py | Druid-of-Luhn/TweetX | 15a2dc0ad619f846b40880ad5fc3ab690d835e0d | [
"MIT"
] | null | null | null | tweetx/environment.py | Druid-of-Luhn/TweetX | 15a2dc0ad619f846b40880ad5fc3ab690d835e0d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import asyncio, entity, io, json, logging, queue, random, threading, time, websockets, whale
from bot import bot
from random import randrange
logging.basicConfig()
log = logging.getLogger('tweetx')
log.setLevel(logging.DEBUG)
if __name__ == "__main__":
sim = Game()
try:
sim.run()
except KeyboardInterrupt:
sim.stop()
raise
except:
sim.stop(crashed=True)
raise
| 32.674009 | 145 | 0.556829 |
a7c3c5875178bbdc7bd8d2fd3aaff1ff122b86d9 | 682 | py | Python | retired/example_process_discharge_simulation.py | changliao1025/pyswat | cdcda1375be8c0f71459a78438b1e9f8a22a77bc | [
"MIT"
] | 2 | 2021-12-11T01:39:00.000Z | 2022-02-15T17:57:45.000Z | retired/example_process_discharge_simulation.py | changliao1025/pyswat | cdcda1375be8c0f71459a78438b1e9f8a22a77bc | [
"MIT"
] | 5 | 2022-03-10T16:38:30.000Z | 2022-03-28T17:31:20.000Z | retired/example_process_discharge_simulation.py | changliao1025/pyswat | cdcda1375be8c0f71459a78438b1e9f8a22a77bc | [
"MIT"
] | null | null | null | from swaty.simulation.swat_main import swat_main
from swaty.swaty_read_model_configuration_file import swat_read_model_configuration_file
from swaty.classes.pycase import swaty
from swaty.postprocess.extract.swat_extract_stream_discharge import swat_extract_stream_discharge
sFilename_configuration_in = '/global/homes/l/liao313/workspace/python/swaty/swaty/shared/swat_simulation.xml'
#step 1
aConfig = swat_read_model_configuration_file(sFilename_configuration_in)
# iCase_index_in=iCase_index_in, sJob_in=sJob_in, iFlag_mode_in=iFlag_mode_in)
aConfig['sFilename_model_configuration'] = sFilename_configuration_in
oModel = swaty(aConfig)
swat_extract_stream_discharge(oModel) | 52.461538 | 110 | 0.879765 |
a7c4df68721fac6742030901c0c135b22a7c5979 | 861 | py | Python | ThinkPython/chap9/ex9.py | sokolowskik/Tutorials | d2681d4f18b03e00f90f9132c77f0b23b74d2629 | [
"MIT"
] | null | null | null | ThinkPython/chap9/ex9.py | sokolowskik/Tutorials | d2681d4f18b03e00f90f9132c77f0b23b74d2629 | [
"MIT"
] | null | null | null | ThinkPython/chap9/ex9.py | sokolowskik/Tutorials | d2681d4f18b03e00f90f9132c77f0b23b74d2629 | [
"MIT"
] | null | null | null | def is_reverse(i, j):
"""
Convert 2-digit numbers to strings and check if they are palindromic.
If one of the numbers has less then 2 digits, fill with zeros.
"""
str_i = str(i)
str_j = str(j)
if len(str_i) < 2:
str_i = str_i.zfill(2)
if len(str_j) < 2:
str_j = str_j.zfill(2)
return str_j[::-1] == str_i
age_diff = 15
d_age = 0
while age_diff <= 50:
reversible = 0
for d_age in range(0,80):
m_age = d_age + age_diff
if is_reverse(d_age, m_age):
reversible += 1
if reversible == 6:
print 'The daughter is', d_age, 'years old'
if reversible == 8:
print 'At the 8th time the daughter will be', d_age, 'years old and the mother will be', m_age, 'years old'
break
d_age += 1
age_diff += 1
| 26.90625 | 123 | 0.551684 |
a7c5b09df90f26ab3c6cd5143e35bfba31e9f2b0 | 10,237 | py | Python | Segnet/训练.py | 1044197988/- | 5c3ee7c9431ae85c68f418901378326b91c6f00d | [
"Apache-2.0"
] | 186 | 2019-03-20T08:54:08.000Z | 2022-03-30T04:34:37.000Z | Segnet/训练.py | sunkaiyue0/Semantic-segmentation-of-remote-sensing-images | 5c3ee7c9431ae85c68f418901378326b91c6f00d | [
"Apache-2.0"
] | 6 | 2019-08-29T08:18:13.000Z | 2021-10-09T10:29:58.000Z | Segnet/训练.py | sunkaiyue0/Semantic-segmentation-of-remote-sensing-images | 5c3ee7c9431ae85c68f418901378326b91c6f00d | [
"Apache-2.0"
] | 60 | 2019-10-23T03:50:36.000Z | 2022-03-25T03:16:25.000Z | #coding=utf-8
import matplotlib
matplotlib.use("Agg")
import tensorflow as tf
import argparse
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,UpSampling2D,BatchNormalization,Reshape,Permute,Activation
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import random
import os
from tqdm import tqdm
seed = 7
np.random.seed(seed)
#
img_w = 32
img_h = 32
#
n_label=6
classes=[0.0,17.0,34.0,51.0,68.0,255.0]
labelencoder = LabelEncoder()
labelencoder.fit(classes)
#
EPOCHS = 5
BS = 32
#
divisor=255.0
#
filepath ='C:\\Users\Administrator\Desktop\Project\src\\'
#
#
#
#
#-
def SegNet():
model = Sequential()
#encoder
model.add(Conv2D(64,(3,3),strides=(1,1),input_shape=(img_w,img_h,3),padding='same',activation='relu',data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
#(128,128)
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
#(64,64)
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(32,32)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(16,16)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(8,8)
#decoder
model.add(UpSampling2D(size=(2,2)))
#(16,16)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(32,32)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(64,64)
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(128,128)
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(256,256)
model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(img_w, img_h,3), padding='same', activation='relu',data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
model.summary()
return model
#
#
#
if __name__=='__main__':
args = args_parse()
train(args)
print("")
#predict()
| 40.623016 | 143 | 0.602911 |
a7ca81e026978ae19cb5d85995f5f61a2647b878 | 1,465 | py | Python | Python/295. FindMedianFromDataStream.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 263 | 2020-10-05T18:47:29.000Z | 2022-03-31T19:44:46.000Z | Python/295. FindMedianFromDataStream.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 1,264 | 2020-10-05T18:13:05.000Z | 2022-03-31T23:16:35.000Z | Python/295. FindMedianFromDataStream.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 760 | 2020-10-05T18:22:51.000Z | 2022-03-29T06:06:20.000Z | """
Problem:
--------
Design a data structure that supports the following two operations:
- `void addNum(int num)`: Add a integer number from the data stream to the data structure.
- `double findMedian()`: Return the median of all elements so far.
"""
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
| 29.897959 | 90 | 0.595222 |
a7cab894219f6b6cb8d8b7237bfbc980ae9c6023 | 1,089 | py | Python | openproblems/data/human_blood_nestorowa2016.py | bendemeo/SingleCellOpenProblems | e4c009f8c232bdae4c9e20b8e435d0fe474b3daf | [
"MIT"
] | 134 | 2020-08-19T07:35:56.000Z | 2021-05-19T11:37:50.000Z | openproblems/data/human_blood_nestorowa2016.py | bendemeo/SingleCellOpenProblems | e4c009f8c232bdae4c9e20b8e435d0fe474b3daf | [
"MIT"
] | 175 | 2020-08-17T15:26:06.000Z | 2021-05-14T11:03:46.000Z | openproblems/data/human_blood_nestorowa2016.py | LuckyMD/SingleCellOpenProblems | 0ae39db494557e1dd9f28e59dda765527191eee1 | [
"MIT"
] | 46 | 2020-10-08T21:11:37.000Z | 2021-04-25T07:05:28.000Z | from . import utils
import os
import scanpy as sc
import scprep
import tempfile
URL = "https://ndownloader.figshare.com/files/25555751"
| 27.225 | 78 | 0.663912 |
a7cb29cc32a2319fccf961ffb48796199a5ff0d3 | 1,110 | py | Python | jskparser/ast/stmt/ifstmt.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 15 | 2015-12-15T18:33:50.000Z | 2021-09-29T11:48:54.000Z | jskparser/ast/stmt/ifstmt.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 11 | 2015-11-16T22:14:58.000Z | 2021-09-23T05:28:40.000Z | jskparser/ast/stmt/ifstmt.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 8 | 2015-11-16T21:50:08.000Z | 2021-03-23T15:15:34.000Z | #!/usr/bin/env python
from .statement import Statement
from . import _import
| 27.75 | 74 | 0.621622 |
a7cb442e6c3a091d70b52f85f03d36c21282e2fd | 8,501 | py | Python | stdpages/profiling.py | nhartland/dashengine | 7e9f68f0fb1c447fa438eb18b2430cd9095ab17a | [
"MIT"
] | 12 | 2020-02-06T02:55:32.000Z | 2021-11-08T13:50:42.000Z | stdpages/profiling.py | nhartland/dashengine | 7e9f68f0fb1c447fa438eb18b2430cd9095ab17a | [
"MIT"
] | 1 | 2020-01-31T10:20:51.000Z | 2020-01-31T10:20:51.000Z | stdpages/profiling.py | nhartland/dashengine | 7e9f68f0fb1c447fa438eb18b2430cd9095ab17a | [
"MIT"
] | 2 | 2020-06-19T01:35:11.000Z | 2021-06-07T09:01:18.000Z | """ Page for the monitoring of query performance characteristics. """
import json
# Plotly
import plotly.graph_objs as go
# Dash
import dash_table as dt
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# DashEngine
from dashengine.dashapp import dashapp
import dashengine.bigquery as bigquery
# Route for profiling page
ROUTE = "/profile"
# Name used when linking, for example in the navigation bar
LINKNAME = "Profiling"
# Helper functions #################################################
def __fetch_query_from_uuid(uuid: str) -> bigquery.BigQueryResult:
""" Fetches a cached BigQuery result from its UUID.
Args:
uuid (str): The UUID of the query to be retrieved.
Returns:
(BigQueryResult): The corresponding BigQuery result object.
"""
# Fetch cached queries
queries = bigquery.fetch_cached_queries()
selected_query = None
for query in queries:
if query.uuid == uuid:
# Select a query UUID
selected_query = query
if selected_query is None:
raise RuntimeError(f"Cannot find query with UUID {uuid}")
return selected_query
def __index_query(query, key: str) -> float:
""" Returns a property of the query class, keyed by a string.
The key must be one of:
['Memory', 'Duration', 'Bytes Processed', 'Bytes Billed']
Args:
query (BigQueryResult): A BigQuery result class
key (string): A key of the BigQueryResult object
Returns:
(float): The value in `query` corresponding to the key.
"""
ResultDict = {
"Memory": query.memory_usage(),
"Duration": query.duration,
"Bytes Processed": query.bytes_processed,
"Bytes Billed": query.bytes_billed,
}
return ResultDict[key]
def __normalising_constants(cached_queries: list):
""" Computes totals over the full set of cached queries to normalise the summary chart. """
totals = {
"Memory": 0.0,
"Duration": 0.0,
"Bytes Processed": 0.0,
"Bytes Billed": 0.0,
}
for query in cached_queries:
for key in totals:
totals[key] += __index_query(query, key)
# Avoid dividing by zero
for key in totals:
if totals[key] == 0:
totals[key] = 1
return totals
# Dash callbacks #################################################
def _query_profile_body(selected_query) -> dcc.Markdown:
""" Returns the formatted SQL body of the selected query. """
# Build query body in markdown code block
query_code = " ``` \n " + selected_query.source.body + " \n ```"
return dcc.Markdown(query_code)
def _query_profile_parameters(selected_query):
""" Returns the parameters of the selected query. """
parameters = selected_query.parameters
if len(parameters) == 0:
return html.H6("No parameters")
# Build a table consisting of query parameters
columns = [
{"name": "Parameter", "id": "Parameter"},
{"name": "Value", "id": "Value"},
]
parameter_data = [
{"Parameter": key, "Value": str(value)} for key, value in parameters.items()
]
return dt.DataTable(
id="query-profile-parameter-table",
columns=columns,
data=parameter_data,
style_table={"margin-bottom": "30px"},
style_cell={"minWidth": "0px", "maxWidth": "180px", "whiteSpace": "normal"},
)
def _query_profile_preview(selected_query) -> dt.DataTable:
""" Returns the formatted SQL body of the selected query. """
df = selected_query.result.head()
return dt.DataTable(
id="query-profile-preview-table",
columns=[{"name": i, "id": i} for i in df.columns],
style_table={"margin-bottom": "30px"},
data=df.to_dict("records"),
)
# Layout #################################################################
def layout() -> list:
""" Generates the layout for the query profiling page. """
# No queries cached
if bigquery.fetch_num_cached_queries() == 0:
return html.H4(
"No queries in cache", style={"textAlign": "center", "margin-top": "30px"}
)
return [
html.H3(
"Cached Query Profiler", style={"textAlign": "center", "margin-top": "30px"}
),
dcc.Loading(
id="query-profile-loading",
children=[
html.Div(id="profile-trigger", children=[], style={"display": "none"}),
dcc.Graph(id="query-profile-summary-chart"),
],
type="graph",
fullscreen=True,
),
html.Div(id="query-profile-table-div"),
dcc.Loading(
id="query-details-loading", children=[html.Div(id="query-profile-details")]
),
]
| 33.337255 | 95 | 0.606399 |
a7cd3abce5d928c3da35821e7b78b76d44e1ec29 | 2,465 | py | Python | trial_inputs_pb2.py | adeandrade/bayesian-optimizer | 30427943d69130179f7ccb32f63a08a1c57462f8 | [
"Apache-2.0"
] | null | null | null | trial_inputs_pb2.py | adeandrade/bayesian-optimizer | 30427943d69130179f7ccb32f63a08a1c57462f8 | [
"Apache-2.0"
] | null | null | null | trial_inputs_pb2.py | adeandrade/bayesian-optimizer | 30427943d69130179f7ccb32f63a08a1c57462f8 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: trial_inputs.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='trial_inputs.proto',
package='com.wattpad.bayesian_optimizer',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x12trial_inputs.proto\x12\x1e\x63om.wattpad.bayesian_optimizer\".\n\x0bTrialInputs\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0e\n\x06inputs\x18\x02 \x03(\x01\x62\x06proto3')
)
_TRIALINPUTS = _descriptor.Descriptor(
name='TrialInputs',
full_name='com.wattpad.bayesian_optimizer.TrialInputs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='com.wattpad.bayesian_optimizer.TrialInputs.version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='com.wattpad.bayesian_optimizer.TrialInputs.inputs', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=54,
serialized_end=100,
)
DESCRIPTOR.message_types_by_name['TrialInputs'] = _TRIALINPUTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrialInputs = _reflection.GeneratedProtocolMessageType('TrialInputs', (_message.Message,), dict(
DESCRIPTOR = _TRIALINPUTS,
__module__ = 'trial_inputs_pb2'
# @@protoc_insertion_point(class_scope:com.wattpad.bayesian_optimizer.TrialInputs)
))
_sym_db.RegisterMessage(TrialInputs)
# @@protoc_insertion_point(module_scope)
| 32.012987 | 196 | 0.76146 |
a7cdd2df8475207fb1659d9d8f61d9efb7105a47 | 22,348 | py | Python | neutron_plugin_contrail/plugins/opencontrail/vnc_client/router_res_handler.py | alexelshamouty/tf-neutron-plugin | 3effc5e80f3fa0d8d0252d5f994a36386b987f7f | [
"Apache-2.0"
] | 3 | 2021-09-07T05:02:24.000Z | 2022-02-11T04:25:43.000Z | neutron_plugin_contrail/plugins/opencontrail/vnc_client/router_res_handler.py | alexelshamouty/tf-neutron-plugin | 3effc5e80f3fa0d8d0252d5f994a36386b987f7f | [
"Apache-2.0"
] | 1 | 2021-09-27T08:05:08.000Z | 2021-09-27T08:05:08.000Z | neutron_plugin_contrail/plugins/opencontrail/vnc_client/router_res_handler.py | alexelshamouty/tf-neutron-plugin | 3effc5e80f3fa0d8d0252d5f994a36386b987f7f | [
"Apache-2.0"
] | 5 | 2020-07-14T07:52:05.000Z | 2022-03-24T15:08:02.000Z | # Copyright 2015. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
try:
from neutron_lib import constants
except ImportError:
from neutron.plugins.common import constants
from neutron_plugin_contrail.common.utils import get_tenant_id
from vnc_api import vnc_api
from vnc_api import exceptions as vnc_exc
from neutron_plugin_contrail.plugins.opencontrail.vnc_client.contrail_res_handler import (
ResourceCreateHandler,
ResourceDeleteHandler,
ResourceGetHandler,
ResourceUpdateHandler,
)
| 43.05973 | 113 | 0.593297 |
a7d11260063260bb345e5b5925deed0ee559e5c2 | 725 | py | Python | ffmpeg-3.2.5/tools/zmqshell.py | huyu0415/FFmpeg | 7a3f75791cb3255805bf17126d4074a328f46c8c | [
"Apache-2.0"
] | 3,645 | 2016-08-25T09:31:17.000Z | 2022-03-25T06:28:34.000Z | ffmpeg-3.2.5/tools/zmqshell.py | huyu0415/FFmpeg | 7a3f75791cb3255805bf17126d4074a328f46c8c | [
"Apache-2.0"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | ffmpeg-3.2.5/tools/zmqshell.py | huyu0415/FFmpeg | 7a3f75791cb3255805bf17126d4074a328f46c8c | [
"Apache-2.0"
] | 764 | 2016-08-26T09:19:00.000Z | 2022-03-22T12:07:16.000Z | #!/usr/bin/env python2
import sys, zmq, cmd
try:
bind_address = sys.argv[1] if len(sys.argv) > 1 else "tcp://localhost:5555"
LavfiCmd(bind_address).cmdloop('FFmpeg libavfilter interactive shell')
except KeyboardInterrupt:
pass
| 26.851852 | 79 | 0.627586 |
a7d18ac67c2f08b3162719b80096d5cd8f93412f | 984 | py | Python | minidoc/minidoc.py | ihgazni2/minidoc | b2859069bad5e718692b57d5498389473c66bd2e | [
"MIT"
] | null | null | null | minidoc/minidoc.py | ihgazni2/minidoc | b2859069bad5e718692b57d5498389473c66bd2e | [
"MIT"
] | null | null | null | minidoc/minidoc.py | ihgazni2/minidoc | b2859069bad5e718692b57d5498389473c66bd2e | [
"MIT"
] | null | null | null | from minidoc import svg
from minidoc import tst
from efdir import fs
import shutil
import os
#still_frames
#rownums
#colnums
####
####
| 20.081633 | 55 | 0.602642 |
a7d2785f99402cef40bc5312be1781d2a6eaf683 | 3,843 | py | Python | qinhaifang/src/evalTools/script/convert_label_map_to_geojson.py | SpaceNetChallenge/BuildingFootprintDetectors | 3def3c44b5847c744cd2f3356182892d92496579 | [
"Apache-2.0"
] | 161 | 2017-02-03T05:33:01.000Z | 2022-03-31T02:11:21.000Z | qinhaifang/src/evalTools/script/convert_label_map_to_geojson.py | SpaceNetChallenge/BuildingFootprintDetectors | 3def3c44b5847c744cd2f3356182892d92496579 | [
"Apache-2.0"
] | 5 | 2017-02-03T05:51:38.000Z | 2019-06-18T18:54:00.000Z | qinhaifang/src/evalTools/script/convert_label_map_to_geojson.py | SpaceNetChallenge/BuildingFootprintDetectors | 3def3c44b5847c744cd2f3356182892d92496579 | [
"Apache-2.0"
] | 76 | 2017-03-23T23:15:46.000Z | 2022-02-10T21:58:18.000Z | #!/usr/bin/env python
# encoding=gbk
"""
Convert mask to geojson format
"""
import os
import os.path
import re
import logging
import logging.config
from multiprocessing import Pool
import skimage.io as sk
import numpy as np
import scipy.io as sio
import setting
from spaceNet import geoTools as gT
import spaceNet.image_util as img_util
def process_convert_mask_to_geojson():
"""docstring for process_convert_mask_to_geojson"""
if setting.CONVERT_RES == 1:
label_map_file_list = os.listdir(setting.PREDICT_LABEL_MAP_DIR)
else:
label_map_file_list = os.listdir(setting.LABEL_MAP_DIR_4X)
pool_size = 8
pool = Pool(pool_size)
case = 0
for convert_res in pool.imap_unordered(convert_worker, label_map_file_list):
case += 1
if case % 100 == 0:
logging.info('Convert {}'.format(case))
image_id, msg = convert_res
pool.close()
pool.join()
def convert_worker(mat_file):
"""docstring for convert_worker"""
try:
if setting.CONVERT_RES == 1:
image_id = '_'.join(mat_file.split('.')[0].split('_')[1:])
print('image_id:{}'.format(image_id))
mat_file = os.path.join(setting.PREDICT_LABEL_MAP_DIR, mat_file)
mat = sio.loadmat(mat_file)
#print(mat.keys())
#exit(0)
label_map = mat['inst_img']
building_list = img_util.create_buildinglist_from_label_map(image_id, label_map)
geojson_file = os.path.join(setting.PREDICT_PIXEL_GEO_JSON_DIR, '{}_predict.geojson'.format(image_id))
else:
#print('{}'.format(mat_file))
image_id = '_'.join(mat_file.split('.')[0].split('_')[:])
#print('{}'.format(image_id))
mat_file = os.path.join(setting.LABEL_MAP_DIR_4X, mat_file)
mat = sio.loadmat(mat_file)
label_map = mat['GTinst']['Segmentation'][0][0]
building_list = img_util.create_buildinglist_from_label_map(image_id, label_map)
geojson_file = os.path.join(setting.PIXEL_GEO_JSON_DIR_4X, '{}_Pixel.geojson'.format(image_id))
gT.exporttogeojson(geojson_file, building_list)
return image_id, 'Done'
except Exception as e:
logging.warning('Convert Exception[{}] image_id[{}]'.format(e, image_id))
return image_id, e
def test_geojson():
"""docstring for test_geojson"""
label_map_file_list = os.listdir(setting.PREDICT_LABEL_MAP_DIR)
for mat_file in label_map_file_list:
image_id = '_'.join(mat_file.split('.')[0].split('_')[1:])
predict_geojson_file = os.path.join(setting.PREDICT_PIXEL_GEO_JSON_DIR, '{}_predict.geojson'.format(image_id))
image_name = os.path.join(setting.PIC_3BAND_DIR, '3band_{}.tif'.format(image_id))
img = sk.imread(image_name, True)
label_map = np.zeros(img.shape, dtype=np.uint8)
label_map = img_util.create_label_map_from_polygons(gT.importgeojson(predict_geojson_file),
label_map)
label_img = img_util.create_label_img(img, label_map)
save_file = os.path.join(setting.TMP_DIR, '{}_predict.png'.format(image_id))
sk.imsave(save_file, label_img)
truth_geojson_file = os.path.join(setting.PIXEL_GEO_JSON_DIR, '{}_Pixel.geojson'.format(image_id))
print('{}'.format(truth_geojson_file))
label_map = np.zeros(img.shape, dtype=np.uint8)
print('label_map shape{}'.format(label_map.shape))
label_map = img_util.create_label_map_from_polygons(gT.importgeojson(truth_geojson_file), label_map)
label_img = img_util.create_label_img(img, label_map)
save_file = os.path.join(setting.TMP_DIR, '{}_Pixel.png'.format(image_id))
sk.imsave(save_file, label_img)
if __name__ == '__main__':
process_convert_mask_to_geojson()
#test_geojson()
| 40.03125 | 118 | 0.674993 |
a7d285c6e1ae9ac1ca025fdba430e5dba345f5fd | 412 | py | Python | core/migrations/0008_touristspot_photo.py | isnardsilva/django-attractions-api | feade087d840b72b603d2a4bf538b8c362aa91bd | [
"MIT"
] | 1 | 2021-12-31T12:59:49.000Z | 2021-12-31T12:59:49.000Z | core/migrations/0008_touristspot_photo.py | isnardsilva/django-attractions-api | feade087d840b72b603d2a4bf538b8c362aa91bd | [
"MIT"
] | null | null | null | core/migrations/0008_touristspot_photo.py | isnardsilva/django-attractions-api | feade087d840b72b603d2a4bf538b8c362aa91bd | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-19 03:55
from django.db import migrations, models
| 21.684211 | 77 | 0.604369 |
a7d331c87b462d04d58d991edd7603c1f1659a3c | 712 | py | Python | tests/integration/test_user_invite.py | donovan-PNW/dwellinglybackend | 448df61f6ea81f00dde7dab751f8b2106f0eb7b1 | [
"MIT"
] | 15 | 2020-07-09T20:51:09.000Z | 2021-11-28T21:59:02.000Z | tests/integration/test_user_invite.py | donovan-PNW/dwellinglybackend | 448df61f6ea81f00dde7dab751f8b2106f0eb7b1 | [
"MIT"
] | 148 | 2020-03-28T22:10:30.000Z | 2021-12-19T09:22:59.000Z | tests/integration/test_user_invite.py | donovan-PNW/dwellinglybackend | 448df61f6ea81f00dde7dab751f8b2106f0eb7b1 | [
"MIT"
] | 30 | 2020-03-12T02:31:27.000Z | 2021-07-29T02:40:36.000Z | import pytest
from models.user import RoleEnum
from unittest.mock import patch
from resources.email import Email
| 30.956522 | 84 | 0.695225 |
a7d350ee5dfc4cbea31fc30d8d13f43745b214cc | 778 | py | Python | setup.py | Louis-Navarro/decorators | 6339a32aae66608cbf7fd8a1221b47ae88992d53 | [
"MIT"
] | null | null | null | setup.py | Louis-Navarro/decorators | 6339a32aae66608cbf7fd8a1221b47ae88992d53 | [
"MIT"
] | null | null | null | setup.py | Louis-Navarro/decorators | 6339a32aae66608cbf7fd8a1221b47ae88992d53 | [
"MIT"
] | null | null | null | import setuptools
from decorators.__init__ import __version__ as v
with open('README.md') as fp:
long_description = fp.read()
setuptools.setup(
name='decorators-LOUIS-NAVARRO',
version=v,
author='Louis Navarro',
description='Function decorators I made',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Louis-Navarro/decorators",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Environment :: Plugins",
"Intended Audience :: Developers",
"Natural Language:: English",
],
python_requires='>=3.6',
)
| 29.923077 | 54 | 0.669666 |
a7d4fbe6bb2f93758e5bbd91a20e5e655a876720 | 7,386 | py | Python | test_undirect_graf.py | rodrigondec/Grafos | dd3bb7ffd56909395cc211c6d68f9e3eaa5fa9ba | [
"Unlicense"
] | null | null | null | test_undirect_graf.py | rodrigondec/Grafos | dd3bb7ffd56909395cc211c6d68f9e3eaa5fa9ba | [
"Unlicense"
] | null | null | null | test_undirect_graf.py | rodrigondec/Grafos | dd3bb7ffd56909395cc211c6d68f9e3eaa5fa9ba | [
"Unlicense"
] | null | null | null | from grafo import Grafo, DiGrafo
from no import No
from aresta import Aresta
import unittest
if __name__ == '__main__':
unittest.main() | 29.544 | 58 | 0.716084 |
a7d69ea8dc41116a3648dee50adba54f390698c0 | 2,476 | py | Python | fancy/config/option.py | susautw/fancy-config | 25a3bd51a40df071d00327640caa05b6288bd970 | [
"MIT"
] | 1 | 2022-03-17T04:47:52.000Z | 2022-03-17T04:47:52.000Z | fancy/config/option.py | susautw/fancy-config | 25a3bd51a40df071d00327640caa05b6288bd970 | [
"MIT"
] | 9 | 2021-12-01T08:01:52.000Z | 2022-03-16T13:05:07.000Z | fancy/config/option.py | susautw/fancy-config | 25a3bd51a40df071d00327640caa05b6288bd970 | [
"MIT"
] | 1 | 2022-03-07T09:29:06.000Z | 2022-03-07T09:29:06.000Z | import warnings
from typing import Any, Callable, TYPE_CHECKING
from . import ConfigStructure
from .process import auto_process_typ
from ..config import identical
if TYPE_CHECKING:
from ..config import BaseConfig
| 26.913043 | 96 | 0.631664 |
a7d7e86babb70ea3a37edccf05c803e8419f09d7 | 7,128 | py | Python | data_loader_manual.py | Chen-Yifan/DEM_building_segmentation | 1e9a41e87ec0ab1777a65146c5b31d88938480b7 | [
"MIT"
] | null | null | null | data_loader_manual.py | Chen-Yifan/DEM_building_segmentation | 1e9a41e87ec0ab1777a65146c5b31d88938480b7 | [
"MIT"
] | null | null | null | data_loader_manual.py | Chen-Yifan/DEM_building_segmentation | 1e9a41e87ec0ab1777a65146c5b31d88938480b7 | [
"MIT"
] | null | null | null | from PIL import Image
import numpy as np
import os
import re
import scipy.misc
import random
import sys
import csv
def load_feature_data(dataroot, frame_dir, mask_dir, feature_type='erosion', dim=128):
'''load frames and masks into two numpy array respectively
-----
condition: with feature
arguments:
frame_dir, mask_dir,
feature_type: str, either erosion or building
dim: width and height of the image
process: always resize to 128x128 as model input
normalize on local image maxx and minn
-----
'''
low=0.1
hi=1.0
test_frames = []
test_masks = []
test_masks_ext = []
test_masks_MS = []
frames = []
masks = []
name_list = []
frame_names = os.listdir(frame_dir)
frame_names.sort(key=lambda var:[int(x) if x.isdigit() else x
for x in re.findall(r'[^0-9]|[0-9]+', var)]) # sort frame_names
print("** load image from directory loop starts:")
for i in range(len(frame_names)):
frame_file = frame_names[i]
# if len(frames)>1000:
# break
"""find mapped frame and mask path"""
frame_path = os.path.join(frame_dir, frame_file)
"""load image from tif and remove useless data"""
if feature_type=='erosion':
mask_path = os.path.join(mask_dir, frame_file)
x = np.load(frame_path)
# frame_array = np.concatenate((x[:,:,0:2], np.expand_dims(x[:,:,-1], axis=2)),axis=-1)
frame_array = x[:,:,-1]
label_array = np.load(mask_path)
else: # building
mask_file = frame_file.replace('mclean_fillnodata_','')
mask_path = os.path.join(mask_dir, mask_file)
#### for 128_0ver
# mask_path = os.path.join(mask_dir, frame_file.replace('DEM','label'))
if(frame_file[-3:]=='tif'):
if not os.path.exists(mask_path):
print('rm mask_path', mask_path)
# os.remove(frame_path)
continue
frame_array = np.array(Image.open(frame_path))
label_array = np.array(Image.open(mask_path))
else:
# os.remove(frame_path)
# if os.path.exists(mask_path):
# os.remove(mask_path)
# print('remove1',frame_file)
continue
# check the dimension, if dimension wrong, remove
dims = frame_array.shape
if dims[0]!=dim or dims[1]!=dim or (len(np.unique(frame_array))<3): # remove the file if the frame has less than 3 unique data
os.remove(mask_path)
# os.remove(frame_path)
print('remove2',frame_file)
continue
# both erosion and builiding, we check if feature is present
if not is_feature_present(label_array):
continue
"""Resize to dim"""
if frame_array.shape[0]!=dim:
frame_array = np.array(Image.fromarray(frame_array).resize((dim,dim), Image.BILINEAR))
label_array = np.array(Image.fromarray(label_array).resize((dim,dim), Image.NEAREST))
"""Try preprocess : Normalization"""
try:
minn, maxx = np.min(frame_array[frame_array > 0]), np.max(frame_array[frame_array > 0])
frame_array[frame_array > 0] = low + (frame_array[frame_array > 0] - minn) * (hi - low) / (maxx - minn)
except:
continue
# check label 0 1 2
unique_labels = np.unique(label_array)
label_array = np.where(label_array==2, 1, label_array)
if 2 in unique_labels and 1 not in unique_labels:
# load the manual labels
manual_mask_path = os.path.join(dataroot, "label_manual_test/", mask_file)
if not os.path.exists(manual_mask_path):
continue
test_frames.append(frame_array)
# add the MS labels
test_masks_MS.append(label_array)
label_array = np.array(Image.open(manual_mask_path))
test_masks_ext.append(label_array)
label_array = np.where(label_array==2, 0, label_array) # only care the label 1
test_masks.append(label_array)
else:
frames.append(frame_array)
masks.append(label_array)
name_list.append(frame_names[i])
"""Form array and name_list"""
frames, masks, test_frames, test_masks, test_masks_ext, test_masks_MS = np.array(frames), np.array(masks), np.array(test_frames), np.array(test_masks), \
np.array(test_masks_ext), np.array(test_masks_MS)
print("meta data: training feature/bkground ratio",np.sum(masks), np.sum(1-masks))
"""Extend to 4 dimensions for training """
if(frames.ndim != 4):
frames = np.expand_dims(frames, -1)
test_frames = np.expand_dims(test_frames, -1)
masks = np.expand_dims(masks, -1)
test_masks = np.expand_dims(test_masks, -1)
test_masks_ext = np.expand_dims(test_masks_ext, -1)
test_masks_MS = np.expand_dims(test_masks_MS, -1)
assert(test_masks.shape == test_masks_ext.shape)
assert(test_masks.shape == test_masks_MS.shape)
print("test_masks.shape = ", test_masks.shape)
# split frames/masks to train:val = 5:1
a = int(len(frames)*5/6)
train_frames, train_masks = frames[:a], masks[:a]
val_frames, val_masks = frames[a:], masks[a:]
return train_frames, val_frames, test_frames, train_masks, val_masks, test_masks, test_masks_ext, test_masks_MS, name_list
def load_data(opt):
"""
Load data to a dictionary containing train, val, test
Return: Data_dict
"""
train_frames, val_frames, test_frames, train_masks, val_masks, test_masks, test_masks_ext, test_masks_MS, name_list = \
load_feature_data(opt.dataroot, opt.frame_path, opt.mask_path, opt.dataset, opt.dim)
n_train, n_test, n_val = len(train_frames), len(test_frames), len(val_frames)
print('***** #train: #test: #val = %d : %d :%d ******'%(n_train, n_test, n_val))
Data_dict = {
'train':[train_frames.astype('float32'),
train_masks.astype('float32')],
'val':[val_frames.astype('float32'),
val_masks.astype('float32')],
'test':[test_frames.astype('float32'),
test_masks.astype('float32')],
'test_MS':[None,
test_masks_MS.astype('float32')],
'test_ext':[None,
test_masks_ext.astype('float32')],
}
return Data_dict
| 40.271186 | 157 | 0.571409 |
a7d7fa784412398969563696b9937bee43c267bd | 5,010 | py | Python | autotest/osr/osr_micoordsys.py | robe2/gdal | 78573efe69f1506c112209501068c0b043438295 | [
"MIT"
] | null | null | null | autotest/osr/osr_micoordsys.py | robe2/gdal | 78573efe69f1506c112209501068c0b043438295 | [
"MIT"
] | null | null | null | autotest/osr/osr_micoordsys.py | robe2/gdal | 78573efe69f1506c112209501068c0b043438295 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test some MITAB specific translation issues.
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import osr
###############################################################################
# Test the osr.SpatialReference.ImportFromMICoordSys() function.
#
###############################################################################
# Test the osr.SpatialReference.ExportToMICoordSys() function.
#
###############################################################################
# Test EPSG:3857
#
gdaltest_list = [
osr_micoordsys_1,
osr_micoordsys_2,
osr_micoordsys_3 ]
if __name__ == '__main__':
gdaltest.setup_run( 'osr_micoordsys' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 36.838235 | 138 | 0.644311 |
a7d8ff63a50736d94af9bb52ce5984969f4467d7 | 2,194 | py | Python | maintain.py | keioni/ink_01 | 5c87dcf16924dfa7bf9d0e3a7250ec1542045e72 | [
"MIT"
] | null | null | null | maintain.py | keioni/ink_01 | 5c87dcf16924dfa7bf9d0e3a7250ec1542045e72 | [
"MIT"
] | null | null | null | maintain.py | keioni/ink_01 | 5c87dcf16924dfa7bf9d0e3a7250ec1542045e72 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
from ink.maintainer import make_pickle, DatabaseMaintainer
from ink.sys.config import CONF
from ink.sys.database.connector.mysql import MySQLConnector
from ink.sys.database.connector.null import NullConnector
CONF.load()
cmd = sys.argv[1]
args = sys.argv[1:]
if cmd == 'debug':
cmd = 'dbm'
args = [cmd, 'c']
if cmd == 'mp':
cmd_mp()
elif cmd == 'dbm':
cmd_dbm()
elif cmd == 't_dbm':
cmd_t_dbm()
elif cmd == 'dbrs':
cmd_dbrs()
elif cmd == 'cc':
cmd_cc()
else:
print('Bad command: {}'.format(cmd))
| 23.094737 | 70 | 0.635369 |
a7da1b3b72d6032aefdb12dd63739b250c1938d8 | 5,261 | py | Python | serpentmonkee/UtilsMonkee.py | anthromorphic-ai/serpentmonkee | 505bbe03fd8a77bc7737e46b8e27e0d91b87835b | [
"MIT"
] | null | null | null | serpentmonkee/UtilsMonkee.py | anthromorphic-ai/serpentmonkee | 505bbe03fd8a77bc7737e46b8e27e0d91b87835b | [
"MIT"
] | null | null | null | serpentmonkee/UtilsMonkee.py | anthromorphic-ai/serpentmonkee | 505bbe03fd8a77bc7737e46b8e27e0d91b87835b | [
"MIT"
] | null | null | null |
import requests
from dateutil import parser
import json
from datetime import datetime, timezone
import time
import sys
import random
import uuid
import copy
# --------------------------------------------------------------------
def call_cloud_function(url, headers, data):
"""
Calls the cloud function at url with headers and data passed as part of the POST.
Returns JSON response, passed through RoundTripDecoder
"""
response_data = None
try:
response = requests.post(url=url, data=data, headers=headers)
response_data = json.loads(response.text, cls=RoundTripDecoder)
except Exception as e:
print("ERROR in call_cloud_function: {}".format(str(e)))
return response_data
def get_size(json_obj):
"""
returns the size of the JSON object in bytes
"""
dumps = json.dumps(json_obj, cls=RoundTripEncoder)
size_bytes = sys.getsizeof(dumps)
return size_bytes
def dateDiff(unit, ts1, ts2):
"""
returns the time delta between ts1 and ts2 in the provided unit.
Unit in: ['second','minute','hour','day']
"""
elapsedTime = ts2 - ts1
totalSeconds = elapsedTime.total_seconds()
if unit in ["s", "sec", "second"]:
return totalSeconds
elif unit in ["mn", "min", "minute"]:
return totalSeconds / 60
elif unit in ["hr", "hour"]:
return totalSeconds / 60 / 60
elif unit in ["d", "day"]:
return totalSeconds / 60 / 60 / 24
def makeAscendingUid():
"""
Creates a uid such that new uids are always alphabetically in front of older ones.
For typical use in creating FB doc UIDs such that new docs will show up at the top of the collection
"""
docUid = str(1625607464 * 3 - int(time.time()))
uuid = get_uuid()
return f'{docUid}_{uuid}'
| 27.984043 | 104 | 0.553317 |
a7dac603aaf8f14d5ec261bf9ee335b205d9767b | 201 | py | Python | backend/app/bucket.py | thanet-s/subme-selected-topics-project | fac1630839c580bbd66b93f2dc9004c8637a7b15 | [
"MIT"
] | null | null | null | backend/app/bucket.py | thanet-s/subme-selected-topics-project | fac1630839c580bbd66b93f2dc9004c8637a7b15 | [
"MIT"
] | null | null | null | backend/app/bucket.py | thanet-s/subme-selected-topics-project | fac1630839c580bbd66b93f2dc9004c8637a7b15 | [
"MIT"
] | null | null | null | from minio import Minio
import os
minio_client = Minio(
os.environ['MINIO_HOST'],
access_key=os.environ['MINIO_ROOT_USER'],
secret_key=os.environ['MINIO_ROOT_PASSWORD'],
secure=False
) | 22.333333 | 49 | 0.731343 |
a7db0d9490bcb10d7f38e66a3fcc8a697cd073d0 | 2,642 | py | Python | publications/PrADA/experiments/income_census/train_config.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 49 | 2020-11-04T03:15:59.000Z | 2022-03-23T12:21:15.000Z | publications/PrADA/experiments/income_census/train_config.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 2 | 2021-09-12T02:36:42.000Z | 2021-11-25T13:19:58.000Z | publications/PrADA/experiments/income_census/train_config.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 11 | 2020-11-11T12:14:49.000Z | 2022-03-08T16:17:05.000Z | from data_process.census_process.census_data_creation_config import census_data_creation
fg_feature_extractor_architecture_list = [[28, 56, 28, 14],
[25, 50, 25, 12],
[56, 86, 56, 18],
[27, 54, 27, 13]]
intr_fg_feature_extractor_for_architecture_list = [[53, 78, 53, 15],
[84, 120, 84, 20],
[55, 81, 55, 15],
[81, 120, 81, 20],
[52, 78, 52, 15],
[83, 120, 83, 20]]
no_fg_feature_extractor_architecture = [136, 150, 60, 20]
pre_train_hyperparameters = {
"using_interaction": False,
"momentum": 0.99,
"weight_decay": 0.00001,
"lr": 5e-4,
"batch_size": 128,
"max_epochs": 600,
"epoch_patience": 2,
"valid_metric": ('ks', 'auc')
}
fine_tune_hyperparameters = {
"using_interaction": False,
"load_global_classifier": False,
"momentum": 0.99,
"weight_decay": 0.0,
"lr": 8e-4,
"batch_size": 128,
"valid_metric": ('ks', 'auc')
}
no_adaptation_hyperparameters = {
"apply_feature_group": False,
"train_data_tag": 'all', # can be either 'all' or 'tgt'
"momentum": 0.99,
"weight_decay": 0.00001,
"lr": 5e-4,
"batch_size": 128,
"max_epochs": 600,
"epoch_patience": 2,
"valid_metric": ('ks', 'auc')
}
data_dir = census_data_creation['processed_data_dir']
data_tag = 'all4000pos004'
data_hyperparameters = {
"source_ad_train_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_train.csv',
"source_ad_valid_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_valid.csv',
"src_tgt_train_file_name": data_dir + f'degree_src_tgt_census9495_{data_tag}_train.csv',
"target_ad_train_file_name": data_dir + f'grad_census9495_ad_{data_tag}_train.csv',
"target_ft_train_file_name": data_dir + f'grad_census9495_ft_{data_tag}_train.csv',
"target_ft_valid_file_name": data_dir + f'grad_census9495_ft_{data_tag}_valid.csv',
"target_ft_test_file_name": data_dir + f'grad_census9495_ft_{data_tag}_test.csv',
"census_fg_pretrained_model_dir": "census_fg_pretrained_model",
"census_fg_ft_target_model_dir": "census_fg_ft_target_model",
"census_no-fg_pretrained_model_dir": "census_no-fg_pretrained_model",
"census_no-fg_ft_target_model_dir": "census_no-fg_ft_target_model",
"census_no-ad_model_dir": "census_no-ad_model"
}
| 38.289855 | 92 | 0.608251 |
a7dbae6b6e0c89662cba5d9864585c9b7e89ef3a | 444 | py | Python | tools/create_transmit_grouped_command_cron.py | Vayel/GUCEM-BVC | e5645dec332756d3c9db083abf2c8f3625a10d4d | [
"WTFPL"
] | 2 | 2016-09-23T18:02:40.000Z | 2017-04-28T18:35:59.000Z | tools/create_transmit_grouped_command_cron.py | Vayel/GUCEM-BVC | e5645dec332756d3c9db083abf2c8f3625a10d4d | [
"WTFPL"
] | 82 | 2016-09-26T14:38:31.000Z | 2018-02-12T18:47:12.000Z | tools/create_transmit_grouped_command_cron.py | Vayel/GUCEM-BVC | e5645dec332756d3c9db083abf2c8f3625a10d4d | [
"WTFPL"
] | null | null | null | import os
from cron_helper import create
JOB_COMMENT = 'BVC transmit grouped command reminder'
HERE = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
create(create_job, JOB_COMMENT)
| 21.142857 | 82 | 0.684685 |
a7ddce06c356fe1ffbf7a25faa291f2561e6dd85 | 2,157 | py | Python | network/plot_along_subunits.py | AspirinCode/MD-analysis-tools-scripts | dfc0d282c9a844f5b8b1935a3ae74b1aff577ff9 | [
"MIT"
] | 5 | 2020-01-29T01:01:54.000Z | 2022-02-11T09:19:20.000Z | network/plot_along_subunits.py | AspirinCode/MD-analysis-tools-scripts | dfc0d282c9a844f5b8b1935a3ae74b1aff577ff9 | [
"MIT"
] | null | null | null | network/plot_along_subunits.py | AspirinCode/MD-analysis-tools-scripts | dfc0d282c9a844f5b8b1935a3ae74b1aff577ff9 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import matplotlib.pyplot as plt
i = 1
fig = plt.figure(figsize=(30,30))
for pickle_file in pickle_list:
c_B = pickle.load(open(pickle_file, "rb"))
#plot_c_B(c_B, f"bet_centrality_with_{pickle_file[19:-4]}.png")
ax = fig.add_subplot(5,1,i)
#label = figname[-6:-4].upper()
cb, = ax.plot(c_B[:,0], c_B[:,1], label=f"frame-{i}") # Label: 3-Angs
#break
ss_range = {
"ANK-H1": (4, 13),
"ANK-H2": (17, 27),
"ANK-H3": (46, 53),
"ANK-H4": (61, 71),
"ANK-H5": (81, 86),
"ANK-H6": (103, 111),
"ANK-H7": (139, 146),
"ANK-H8": (151, 157),
"ANK-H9": (175, 181),
"ANK-H10": (187, 208),
"CP1": (223, 231),
"CP2": (235, 242),
"Beta-1": (255, 262),
"Beta-2": (264, 271),
"PreS1-H1": (282, 290),
"PreS1-H2": (296, 299),
"S1": (302, 336),
"S2": (357, 384),
"S3": (397, 418),
"S4": (424, 443),
"S5": (446, 485),
"S6": (545, 575),
"Turret": (486, 519),
"Pore-H": (520, 530),
"TRP-H": (579, 597),
"Beta-3": (613, 636)
}
helix = ["S1", "S2", "S3", "S4", "S5", "S6"]
auxillary_helix = ["PreS1-H1", "PreS1-H2", "Pore-H", "TRP-H"]
ank_repeat = ["ANK-H1", "ANK-H2", "ANK-H3", "ANK-H4", "ANK-H5", "ANK-H6", "ANK-H7",
"ANK-H8", "ANK-H9", "ANK-H10"]
beta_sheet = ["Beta-1", "Beta-2", "Beta-3"]
for hel in helix:
ax.axvspan(ss_range[hel][0], ss_range[hel][1], alpha=0.4, color='#8dd3c7')
for hel in auxillary_helix:
ax.axvspan(ss_range[hel][0], ss_range[hel][1], alpha=0.4, color='#ffffb3')
for repeat in ank_repeat:
ax.axvspan(ss_range[repeat][0], ss_range[repeat][1], alpha=0.4, color='#bebada')
for beta in beta_sheet:
ax.axvspan(ss_range[beta][0], ss_range[beta][1], alpha=0.4, color='#fb8072')
ax.set_xlim(0, 654)
ax.set_xlabel("Residue id of TRPV2", fontsize=20)
ax.set_ylabel("Betweenness centrality", fontsize=20)
plt.legend(fontsize="xx-large", handles=[cb], loc="upper right")
#break
i += 1
| 31.26087 | 88 | 0.51414 |
38eb4d628bf96b1cec0ba5a9060d8732e87f164b | 276 | py | Python | runme.py | AndreWohnsland/Cocktailmaker_AW | 30efdcb85d7fb58ac2980c873c611d7b9c2b37b1 | [
"MIT"
] | 37 | 2019-07-06T11:54:08.000Z | 2022-01-21T12:26:16.000Z | runme.py | AndreWohnsland/Cocktailmaker_AW | 30efdcb85d7fb58ac2980c873c611d7b9c2b37b1 | [
"MIT"
] | 5 | 2019-12-09T07:44:08.000Z | 2022-02-01T12:00:24.000Z | runme.py | AndreWohnsland/Cocktailmaker_AW | 30efdcb85d7fb58ac2980c873c611d7b9c2b37b1 | [
"MIT"
] | 4 | 2019-07-06T12:45:01.000Z | 2021-12-29T17:09:44.000Z | import sys
from PyQt5.QtWidgets import QApplication
import src_ui.setup_mainwindow as setupui
if __name__ == "__main__":
app = QApplication(sys.argv)
w = setupui.MainScreen()
w.showFullScreen()
w.setFixedSize(800, 480)
sys.exit(app.exec_())
| 21.230769 | 42 | 0.684783 |
38ec2565ef6e55d70bfb76a776cd7b6192708820 | 8,271 | py | Python | src/ellalgo/ell_stable.py | luk036/ellalgo | 8e83587b271f35c906c0d0aa4175dac153e5e29b | [
"MIT"
] | null | null | null | src/ellalgo/ell_stable.py | luk036/ellalgo | 8e83587b271f35c906c0d0aa4175dac153e5e29b | [
"MIT"
] | null | null | null | src/ellalgo/ell_stable.py | luk036/ellalgo | 8e83587b271f35c906c0d0aa4175dac153e5e29b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import math
from typing import Tuple, Union
import numpy as np
from .cutting_plane import CUTStatus
Arr = Union[np.ndarray]
def _calc_ll(self, beta) -> CUTStatus:
"""parallel or deep cut
Arguments:
beta ([type]): [description]
Returns:
int: [description]
"""
if np.isscalar(beta):
return self._calc_dc(beta)
if len(beta) < 2: # unlikely
return self._calc_dc(beta[0])
return self._calc_ll_core(beta[0], beta[1])
def _calc_ll_core(self, b0: float, b1: float) -> CUTStatus:
"""Calculate new ellipsoid under Parallel Cut
g' (x xc) + 0 0
g' (x xc) + 1 0
Arguments:
b0 (float): [description]
b1 (float): [description]
Returns:
int: [description]
"""
b1sqn = b1 * (b1 / self._tsq)
t1n = 1 - b1sqn
if t1n < 0 or not self.use_parallel_cut:
return self._calc_dc(b0)
bdiff = b1 - b0
if bdiff < 0:
return CUTStatus.nosoln # no sol'n
if b0 == 0:
self._calc_ll_cc(b1, b1sqn)
return CUTStatus.success
b0b1n = b0 * (b1 / self._tsq)
if self._n * b0b1n < -1: # unlikely
return CUTStatus.noeffect # no effect
# parallel cut
t0n = 1.0 - b0 * (b0 / self._tsq)
# t1 = self._tsq - b1sq
bsum = b0 + b1
bsumn = bsum / self._tsq
bav = bsum / 2.0
tempn = self._halfN * bsumn * bdiff
xi = math.sqrt(t0n * t1n + tempn * tempn)
self._sigma = self._c3 + (1.0 - b0b1n - xi) / (bsumn * bav * self._nPlus1)
self._rho = self._sigma * bav
self._delta = self._c1 * ((t0n + t1n) / 2 + xi / self._n)
return CUTStatus.success
def _calc_ll_cc(self, b1: float, b1sqn: float):
"""Calculate new ellipsoid under Parallel Cut, one of them is central
g' (x xc) 0
g' (x xc) + 1 0
Arguments:
b1 (float): [description]
b1sq (float): [description]
"""
n = self._n
xi = math.sqrt(1 - b1sqn + (self._halfN * b1sqn) ** 2)
self._sigma = self._c3 + self._c2 * (1.0 - xi) / b1sqn
self._rho = self._sigma * b1 / 2.0
self._delta = self._c1 * (1.0 - b1sqn / 2.0 + xi / n)
def _calc_dc(self, beta: float) -> CUTStatus:
"""Calculate new ellipsoid under Deep Cut
g' (x xc) + 0
Arguments:
beta (float): [description]
Returns:
int: [description]
"""
try:
tau = math.sqrt(self._tsq)
except ValueError:
print("Warning: tsq is negative: {}".format(self._tsq))
self._tsq = 0.0
tau = 0.0
bdiff = tau - beta
if bdiff < 0.0:
return CUTStatus.nosoln # no sol'n
if beta == 0.0:
self._calc_cc(tau)
return CUTStatus.success
n = self._n
gamma = tau + n * beta
if gamma < 0.0:
return CUTStatus.noeffect # no effect, unlikely
self._mu = (bdiff / gamma) * self._halfNminus1
self._rho = gamma / self._nPlus1
self._sigma = 2.0 * self._rho / (tau + beta)
self._delta = self._c1 * (1.0 - beta * (beta / self._tsq))
return CUTStatus.success
def _calc_cc(self, tau: float):
"""Calculate new ellipsoid under Central Cut
Arguments:
tau (float): [description]
"""
self._mu = self._halfNminus1
self._sigma = self._c2
self._rho = tau / self._nPlus1
self._delta = self._c1
| 28.42268 | 82 | 0.489784 |
38ec28f788cf955ae54138334251cf48eba69a0b | 855 | py | Python | app/auth/forms.py | PhysicsUofRAUI/lifeLongLearning | 36e098d4319d3500509861454fa3e27a67416802 | [
"MIT"
] | null | null | null | app/auth/forms.py | PhysicsUofRAUI/lifeLongLearning | 36e098d4319d3500509861454fa3e27a67416802 | [
"MIT"
] | 38 | 2020-06-09T00:07:09.000Z | 2021-02-06T17:18:20.000Z | app/auth/forms.py | PhysicsUofRAUI/lifeLongLearning | 36e098d4319d3500509861454fa3e27a67416802 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, SubmitField
from wtforms.validators import DataRequired
#
# Purpose: This from will be used to collect the information for the user logging
# and logging out.
#
# Fields:
# Password: The password to validate the user
# Username: This contains the name that a user has chosen to represent them
# Submit: This is the field that the user uses to signal that everything has been
# filled out.
#
# Returns:
# All the material that the user filled out (bassically all the fields but filled
# out).
#
| 32.884615 | 85 | 0.71462 |
38ec8de985f6a5e8fed887f162aa2455ff186416 | 1,365 | py | Python | netmiko/f5/f5_tmsh_ssh.py | josephwhite13/netmiko | c08c5ebb3484383f034e22b9576f88be07525f72 | [
"MIT"
] | 1 | 2021-04-13T19:18:57.000Z | 2021-04-13T19:18:57.000Z | netmiko/f5/f5_tmsh_ssh.py | josephwhite13/netmiko | c08c5ebb3484383f034e22b9576f88be07525f72 | [
"MIT"
] | null | null | null | netmiko/f5/f5_tmsh_ssh.py | josephwhite13/netmiko | c08c5ebb3484383f034e22b9576f88be07525f72 | [
"MIT"
] | null | null | null | import time
from netmiko.base_connection import BaseConnection
| 33.292683 | 78 | 0.641026 |
38eda790aa9bd3615e42c068cced417ca94aa56a | 2,099 | py | Python | tools/database_tool.py | noahzhy/qumaishou | f776e5c750b350ca3b741fccf3e5dfd199c1296b | [
"Apache-2.0"
] | null | null | null | tools/database_tool.py | noahzhy/qumaishou | f776e5c750b350ca3b741fccf3e5dfd199c1296b | [
"Apache-2.0"
] | null | null | null | tools/database_tool.py | noahzhy/qumaishou | f776e5c750b350ca3b741fccf3e5dfd199c1296b | [
"Apache-2.0"
] | null | null | null | import os
import pandas as pd
import sys
import glob
#
sys.path.append("./")
db_dir_path = 'database'
def intersection_db_brand():
''''''
d1 = pd.read_csv(os.path.join(db_dir_path, 'db_brand_eng.csv'))
d2 = pd.read_csv(os.path.join(db_dir_path, 'db_brand_chn.csv'))
df = pd.merge(d1, d2, how='left', on='brand_name')
df = remove_repetition(df, 'brand_name')
df = df.loc[:, ['dispShopNo_x', 'brand_name', 'brand_url_x']]
db_save('db_brand_final', df)
print('df_merged:', df.shape[0])
return df
if __name__ == "__main__":
main() | 23.852273 | 92 | 0.666508 |
38ef272433c8c121f27894e2882710bf38e90294 | 1,331 | py | Python | flika/tests/test_settings.py | flika-org/flika | 68b87e8f75f77f4b59344e418c7783b24184adaa | [
"MIT"
] | 19 | 2016-08-11T21:17:17.000Z | 2021-04-30T19:21:03.000Z | flika/tests/test_settings.py | flika-org/flika | 68b87e8f75f77f4b59344e418c7783b24184adaa | [
"MIT"
] | 28 | 2017-03-15T18:40:33.000Z | 2021-06-01T20:35:50.000Z | flika/tests/test_settings.py | flika-org/flika | 68b87e8f75f77f4b59344e418c7783b24184adaa | [
"MIT"
] | 2 | 2019-03-08T18:51:12.000Z | 2019-05-05T16:31:15.000Z | from .. import global_vars as g
from ..window import Window
import numpy as np
from ..roi import makeROI
| 34.128205 | 139 | 0.661908 |
38f003c85d91841bc389c08c6a91fa5429cad832 | 40,888 | py | Python | tests/test_runner.py | varunvarma/panoptes | 733e1b17e01d47fe0a399e2fe635f614cc5a0b88 | [
"Apache-2.0"
] | null | null | null | tests/test_runner.py | varunvarma/panoptes | 733e1b17e01d47fe0a399e2fe635f614cc5a0b88 | [
"Apache-2.0"
] | null | null | null | tests/test_runner.py | varunvarma/panoptes | 733e1b17e01d47fe0a399e2fe635f614cc5a0b88 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2018, Oath Inc.
Licensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms.
"""
import re
import unittest
from mock import patch, MagicMock, Mock, PropertyMock
from testfixtures import LogCapture
from yahoo_panoptes.framework.plugins.panoptes_base_plugin import PanoptesPluginInfo, PanoptesBasePlugin
from yahoo_panoptes.polling.polling_plugin import PanoptesPollingPlugin
from yahoo_panoptes.polling.polling_plugin_agent import polling_plugin_task, PanoptesPollingPluginKeyValueStore, \
PanoptesSecretsStore, PanoptesPollingPluginAgentKeyValueStore
from yahoo_panoptes.discovery.discovery_plugin_agent import PanoptesDiscoveryPluginAgentKeyValueStore, \
PanoptesDiscoveryPluginKeyValueStore, PanoptesSecretsStore, discovery_plugin_task
from yahoo_panoptes.framework.resources import PanoptesContext, PanoptesResource, PanoptesResourcesKeyValueStore
from yahoo_panoptes.framework.plugins.runner import PanoptesPluginRunner, PanoptesPluginWithEnrichmentRunner
from yahoo_panoptes.framework.metrics import PanoptesMetric, PanoptesMetricsGroupSet
from tests.mock_panoptes_producer import MockPanoptesMessageProducer
from test_framework import PanoptesTestKeyValueStore, panoptes_mock_kazoo_client, panoptes_mock_redis_strict_client
from helpers import get_test_conf_file
_TIMESTAMP = 1
_, global_panoptes_test_conf_file = get_test_conf_file()
class TestPanoptesPluginWithEnrichmentRunner(TestPanoptesPluginRunner):
# 'pass' is needed for these methods because the only difference in their logging output from
# TestPanoptesPluginRunner is the presence of the PanoptesResource in some log messages.
class TestPanoptesPollingPluginRunner(TestPanoptesPluginWithEnrichmentRunner):
| 60.574815 | 123 | 0.531183 |
38f0f18dc070774e4c59dd082f508779d0e46e34 | 940 | py | Python | root/tpd_near_trainstops_per_line.py | transitanalystisarel/TransitAnalystIsrael | 341de9272b352c18333ff136a00de0b97cd82216 | [
"MIT"
] | null | null | null | root/tpd_near_trainstops_per_line.py | transitanalystisarel/TransitAnalystIsrael | 341de9272b352c18333ff136a00de0b97cd82216 | [
"MIT"
] | null | null | null | root/tpd_near_trainstops_per_line.py | transitanalystisarel/TransitAnalystIsrael | 341de9272b352c18333ff136a00de0b97cd82216 | [
"MIT"
] | 3 | 2019-05-08T04:36:03.000Z | 2020-11-23T19:46:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# collect a set of trip_id s at all stops in a GTFS file over the selected week of the service period starting at serviceweekstartdate
# filter stops near trainstations based on input txt file - stopsneartrainstop_post_edit
# merge sets of trips at stops near each trainstation to count trips per hour and per day
#
#
import transitanalystisrael_config as cfg
import process_date
import trip_ids_at_stops_merge_near_trainstops_perday_v3
import stopswtrainstopidsandtpdperline_v1
import time
#
print("Local current time :", time.asctime( time.localtime(time.time()) ))
#
processdate = process_date.get_date_now()
trip_ids_at_stops_merge_near_trainstops_perday_v3.main(processdate, cfg.gtfspath, cfg.gtfsdirbase, cfg.processedpath, processdate)
stopswtrainstopidsandtpdperline_v1.main(processdate, cfg.processedpath)
print("Local current time :", time.asctime( time.localtime(time.time()) )) | 40.869565 | 134 | 0.811702 |
38f18e910ceb0b6a8c30ade6eeea28431583e6f1 | 5,252 | py | Python | merlin/modules/normalization.py | ethereon/merlin | 0babfed51e65197086d74479a1ca9150259b4f7f | [
"BSD-3-Clause"
] | 1 | 2019-08-15T16:22:20.000Z | 2019-08-15T16:22:20.000Z | merlin/modules/normalization.py | ethereon/merlin | 0babfed51e65197086d74479a1ca9150259b4f7f | [
"BSD-3-Clause"
] | null | null | null | merlin/modules/normalization.py | ethereon/merlin | 0babfed51e65197086d74479a1ca9150259b4f7f | [
"BSD-3-Clause"
] | null | null | null | from typing import Callable, Optional, Union
import tensorflow as tf
from merlin.initializers import Init
from merlin.modules.keras import KerasAdapter
from merlin.shape import Axis
from merlin.spec import DynamicSpec, Spec
| 41.354331 | 83 | 0.653465 |
38f2cb2b1272511384ed4a1d9e959be7afec0fff | 3,095 | py | Python | Plots/Bar/NCL_bar_2.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | 1 | 2021-05-09T02:54:10.000Z | 2021-05-09T02:54:10.000Z | Plots/Bar/NCL_bar_2.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | null | null | null | Plots/Bar/NCL_bar_2.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | null | null | null | """
NCL_bar_2.py
===============
This script illustrates the following concepts:
- Drawing bars instead of curves in an XY plot
- Changing the aspect ratio of a bar plot
- Drawing filled bars up or down based on a Y reference value
- Setting the minimum/maximum value of the Y axis in a bar plot
- Using named colors to indicate a fill color
- Creating array of dates to use as x-axis tick labels
- Creating a main title
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/bar_2.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/bar_2_lg.png
"""
import geocat.datafiles as gdf
import matplotlib.pyplot as plt
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/soi.nc"))
dsoik = ds.DSOI_KET
date = ds.date
num_months = np.shape(date)[0]
# Dates in the file are represented by year and month (YYYYMM)
# representing them fractionally will make ploting the data easier
# This produces the same results as NCL's yyyymm_to_yyyyfrac() function
date_frac = np.empty_like(date)
for n in np.arange(0, num_months, 1):
yyyy = int(date[n] / 100)
mon = (date[n] / 100 - yyyy) * 100
date_frac[n] = yyyy + (mon - 1) / 12
###############################################################################
# Plot
# Generate figure (set its size (width, height) in inches) and axes
plt.figure(figsize=(12, 6))
ax = plt.axes()
# Create a list of colors based on the color bar values
colors = ['red' if (value > 0) else 'blue' for value in dsoik[::8]]
plt.bar(date_frac[::8],
dsoik[::8],
align='edge',
edgecolor='black',
color=colors,
width=8 / 12,
linewidth=.6)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=20)
# Use geocat.viz.util convenience function to set axes parameters
gvutil.set_axes_limits_and_ticks(ax,
ylim=(-3, 3),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(date_frac[40], date_frac[-16]),
xticks=np.linspace(1900, 1980, 5))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax,
maintitle="Darwin Southern Oscillation Index",
ylabel='Anomalies',
maintitlefontsize=28,
labelfontsize=20)
plt.show()
| 37.289157 | 84 | 0.577383 |
38f35744d4413bb5e881ac526e9d7f661c57bec0 | 5,248 | py | Python | gabbi/tests/test_driver.py | scottwallacesh/gabbi | 5e76332ac06cd075c11477b384ad5da1d2eaa9d5 | [
"Apache-2.0"
] | 145 | 2015-01-16T23:19:35.000Z | 2022-03-15T00:21:54.000Z | gabbi/tests/test_driver.py | scottwallacesh/gabbi | 5e76332ac06cd075c11477b384ad5da1d2eaa9d5 | [
"Apache-2.0"
] | 250 | 2015-01-02T11:20:06.000Z | 2022-03-22T19:55:18.000Z | gabbi/tests/test_driver.py | scottwallacesh/gabbi | 5e76332ac06cd075c11477b384ad5da1d2eaa9d5 | [
"Apache-2.0"
] | 49 | 2015-01-14T16:14:52.000Z | 2022-03-21T11:37:26.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test that the driver can build tests effectively."""
import os
import unittest
from gabbi import driver
TESTS_DIR = 'test_gabbits'
| 44.10084 | 75 | 0.598895 |
38f399316a049f820f54f6ac2696a2ab3406ec0f | 4,779 | py | Python | mindspore/ops/operations/_inner_ops.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/_inner_ops.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/_inner_ops.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Inner operators."""
from ..._checkparam import Validator as validator
from ...common import dtype as mstype
from ..primitive import PrimitiveWithInfer, prim_attr_register
| 48.272727 | 120 | 0.623352 |
38f39bc63224a405d7dddb6afc4bd67e4b1fcae8 | 434 | py | Python | migrations/versions/7f447c94347a_.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | migrations/versions/7f447c94347a_.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | migrations/versions/7f447c94347a_.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | """
Revision ID: 7f447c94347a
Revises: a78f4b5d7dee
Create Date: 2017-11-17 14:59:36.177805
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "7f447c94347a"
down_revision = "a78f4b5d7dee"
| 18.083333 | 80 | 0.714286 |
38f4002d6f513a5cbe6493011e435271c5396522 | 5,308 | py | Python | abcvoting/preferences.py | pbatko/abcvoting | 55a8e7e23e35a3620921e3f5426a09925e83640e | [
"MIT"
] | null | null | null | abcvoting/preferences.py | pbatko/abcvoting | 55a8e7e23e35a3620921e3f5426a09925e83640e | [
"MIT"
] | null | null | null | abcvoting/preferences.py | pbatko/abcvoting | 55a8e7e23e35a3620921e3f5426a09925e83640e | [
"MIT"
] | null | null | null | """
Dichotomous (approval) preferences and preference profiles
Voters are indexed by 0, ..., len(profile)
Candidates are indexed by 0, ..., profile.num_cand
"""
from abcvoting.misc import str_candset
from collections import OrderedDict
| 35.152318 | 75 | 0.527882 |
38f53f3c9f560f0765e57f3e8c5f7abb2dffb240 | 1,035 | py | Python | java/create_solution.py | hermantai/kata | 1b1d7af2f46bca994bede3f8a937dff96015e415 | [
"Apache-2.0"
] | null | null | null | java/create_solution.py | hermantai/kata | 1b1d7af2f46bca994bede3f8a937dff96015e415 | [
"Apache-2.0"
] | null | null | null | java/create_solution.py | hermantai/kata | 1b1d7af2f46bca994bede3f8a937dff96015e415 | [
"Apache-2.0"
] | null | null | null | import os
import sys
templ = """package kata;
import static kata.Printer.*;
import java.util.*;
/**
* Cracking the coding interview 6th ed. p.XX(TODO)
*/
public class %(classname)s {
static int %(methodname)s(String str) {
return 0;
}
public static void main(String args[]) {
runSample("abcabcdd");
}
static void runSample(String s, int ans) {
System.out.printf(
"%%s = %%s(%%s)\\n",
s,
%(methodname)s(s),
ans);
}
}
"""
if __name__ == '__main__':
main()
| 19.903846 | 76 | 0.575845 |
38f5e5531d57aca7c42b9394241ccc224319e068 | 310 | py | Python | tests/unit/helpers_test/test_password.py | alefeans/flask-base | e3daa4ce1020ba3711908c3ba5ef88b0cc599dfe | [
"MIT"
] | 11 | 2019-10-03T18:47:49.000Z | 2022-02-01T10:42:02.000Z | tests/unit/helpers_test/test_password.py | alefeans/flask-base | e3daa4ce1020ba3711908c3ba5ef88b0cc599dfe | [
"MIT"
] | null | null | null | tests/unit/helpers_test/test_password.py | alefeans/flask-base | e3daa4ce1020ba3711908c3ba5ef88b0cc599dfe | [
"MIT"
] | 8 | 2019-10-03T18:47:53.000Z | 2021-06-07T14:47:51.000Z | import pytest
from app.helpers import check_password, encrypt_password
| 23.846154 | 69 | 0.745161 |
38f71f66b4d3452d83d78d02e5968d474fc84f07 | 1,717 | py | Python | tests/output/test_pdf_to_png.py | ynikitenko/lena | d0fbae47f21007685edbd4e77bc91413421bebd1 | [
"Apache-2.0"
] | 4 | 2020-03-01T14:01:48.000Z | 2021-02-23T19:33:36.000Z | tests/output/test_pdf_to_png.py | ynikitenko/lena | d0fbae47f21007685edbd4e77bc91413421bebd1 | [
"Apache-2.0"
] | 1 | 2021-05-09T15:47:17.000Z | 2021-05-09T16:12:03.000Z | tests/output/test_pdf_to_png.py | ynikitenko/lena | d0fbae47f21007685edbd4e77bc91413421bebd1 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import pytest
import subprocess
import sys
import lena.core
from lena.output import PDFToPNG
| 35.040816 | 86 | 0.608037 |
38f9a10a36632913442b3f03652d751ae087dae5 | 4,162 | py | Python | third_party/maya/lib/usdMaya/testenv/testUsdExportSkeleton.py | navefx/YuksUSD | 56c2e1def36ee07121f4ecb349c1626472b3c338 | [
"AML"
] | 6 | 2018-08-26T13:27:22.000Z | 2021-08-14T23:57:38.000Z | third_party/maya/lib/usdMaya/testenv/testUsdExportSkeleton.py | navefx/YuksUSD | 56c2e1def36ee07121f4ecb349c1626472b3c338 | [
"AML"
] | 1 | 2021-08-14T23:57:51.000Z | 2021-08-14T23:57:51.000Z | third_party/maya/lib/usdMaya/testenv/testUsdExportSkeleton.py | navefx/YuksUSD | 56c2e1def36ee07121f4ecb349c1626472b3c338 | [
"AML"
] | 4 | 2018-06-14T18:14:59.000Z | 2021-09-13T22:20:50.000Z | #!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import os
import unittest
from maya import cmds
from maya import standalone
from maya.api import OpenMaya as OM
from pxr import Gf, Usd, UsdSkel, Vt
def testSkeletonTopology(self):
"""Tests that the joint topology is correct."""
usdFile = os.path.abspath('UsdExportSkeleton.usda')
cmds.usdExport(mergeTransformAndShape=True, file=usdFile,
shadingMode='none')
stage = Usd.Stage.Open(usdFile)
skeleton = UsdSkel.Skeleton.Get(stage, '/skeleton_Hip')
self.assertTrue(skeleton)
joints = skeleton.GetJointsAttr().Get()
self.assertEqual(joints, Vt.TokenArray([
"Hip",
"Hip/Spine",
"Hip/Spine/Neck",
"Hip/Spine/Neck/Head",
"Hip/Spine/Neck/LArm",
"Hip/Spine/Neck/LArm/LHand",
# note: skips ExtraJoints because it's not a joint
"Hip/Spine/Neck/LArm/LHand/ExtraJoints/ExtraJoint1",
"Hip/Spine/Neck/LArm/LHand/ExtraJoints/ExtraJoint1/ExtraJoint2",
"Hip/Spine/Neck/RArm",
"Hip/Spine/Neck/RArm/RHand",
"Hip/RLeg",
"Hip/RLeg/RFoot",
"Hip/LLeg",
"Hip/LLeg/LFoot"
]))
def testSkelTransformDecomposition(self):
"""
Tests that the decomposed transform values, when recomposed, recreate
the correct Maya transformation matrix.
"""
usdFile = os.path.abspath('UsdExportSkeleton.usda')
cmds.usdExport(mergeTransformAndShape=True, file=usdFile,
shadingMode='none', frameRange=[1, 30])
stage = Usd.Stage.Open(usdFile)
anim = UsdSkel.PackedJointAnimation.Get(stage,
'/skeleton_Hip/Animation')
self.assertEqual(anim.GetJointsAttr().Get()[8],
"Hip/Spine/Neck/RArm")
animT = anim.GetTranslationsAttr()
animR = anim.GetRotationsAttr()
animS = anim.GetScalesAttr()
selList = OM.MSelectionList()
selList.add("RArm")
rArmDagPath = selList.getDagPath(0)
fnTransform = OM.MFnTransform(rArmDagPath)
for i in xrange(1, 31):
cmds.currentTime(i, edit=True)
mayaXf = fnTransform.transformation().asMatrix()
usdT = animT.Get(i)[8]
usdR = animR.Get(i)[8]
usdS = animS.Get(i)[8]
usdXf = UsdSkel.MakeTransform(usdT, usdR, usdS)
self._AssertMatricesClose(usdXf, Gf.Matrix4d(*mayaXf))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 34.683333 | 77 | 0.645603 |