hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c51c826bcad9a887b4123c7790037f70e652cfae | 1,250 | py | Python | fix_ccJSON.py | boada/wmh | f2abe5ff2aeeae6eebab2e8c40803b3fcec9ac3a | [
"MIT"
] | null | null | null | fix_ccJSON.py | boada/wmh | f2abe5ff2aeeae6eebab2e8c40803b3fcec9ac3a | [
"MIT"
] | null | null | null | fix_ccJSON.py | boada/wmh | f2abe5ff2aeeae6eebab2e8c40803b3fcec9ac3a | [
"MIT"
] | null | null | null | import pandas as pd
import sys
if __name__ == "__main__":
fix(sys.argv[1])
| 24.038462 | 60 | 0.508 |
c51deae5ca13775e3c2ef4b77c14c0ca5e33d193 | 1,184 | py | Python | 01-Lesson-Plans/03-Python-Pandas/1/Activities/12-functions-02/Unsolved/functions-02.py | tatianegercina/FinTech | b40687aa362d78674e223eb15ecf14bc59f90b62 | [
"ADSL"
] | 1 | 2021-04-13T07:14:34.000Z | 2021-04-13T07:14:34.000Z | 01-Lesson-Plans/03-Python-Pandas/1/Activities/12-functions-02/Unsolved/functions-02.py | tatianegercina/FinTech | b40687aa362d78674e223eb15ecf14bc59f90b62 | [
"ADSL"
] | 2 | 2021-06-02T03:14:19.000Z | 2022-02-11T23:21:24.000Z | 01-Lesson-Plans/03-Python-Pandas/1/Activities/12-functions-02/Unsolved/functions-02.py | tatianegercina/FinTech | b40687aa362d78674e223eb15ecf14bc59f90b62 | [
"ADSL"
] | 1 | 2021-05-07T13:26:50.000Z | 2021-05-07T13:26:50.000Z | # Define a function "warble" that takes in a string as an argument, adds " arglebargle" to the end of it, and returns the result.
# Print the result of calling your "warble" function with the argument "hello".
# Define a function "wibble" that takes a string as an argument, prints the argument, prepends "wibbly " to the argument, and returns the result
# Print the result of calling your "wibble" function with the argument "bibbly"
# Define a function "print_sum" that takes in two numbers as arguments and prints the sum of those two numbers.
# Define a function "return_sum" that takes in two numbers as arguments and returns the sum of those two numbers
# Using either "return_sum" and no mathematical operators, define a function "triple_sum" that takes in 3 arguments and returns the sum of those 3 numbers.
# Define a function "dance_party" that takes in a string as an argument, that prints "dance!", updates the string from calling "wibble" function with that argument, updates the string from calling "warble" function with that argument, returns the updated string
# Print the result of calling your "dance_party" function with your name as the argument
| 43.851852 | 261 | 0.771115 |
c51e6be205213ab9c3f0f822b11808c56b8e2982 | 1,003 | py | Python | Section 6 - Modular Programming/Green eggs and ham v4.py | gitjot/python-for-lccs | a8a4ae8847abbc33361f80183c06d57b20523382 | [
"CC0-1.0"
] | 10 | 2020-02-14T14:28:15.000Z | 2022-02-02T18:44:11.000Z | Section 6 - Modular Programming/Green eggs and ham v4.py | gitjot/python-for-lccs | a8a4ae8847abbc33361f80183c06d57b20523382 | [
"CC0-1.0"
] | null | null | null | Section 6 - Modular Programming/Green eggs and ham v4.py | gitjot/python-for-lccs | a8a4ae8847abbc33361f80183c06d57b20523382 | [
"CC0-1.0"
] | 8 | 2020-03-25T09:27:42.000Z | 2021-11-03T15:24:38.000Z | # Event: LCCS Python Fundamental Skills Workshop
# Date: Dec 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: To find (and fix) two syntax errors
# A program to display Green Eggs and Ham (v4)
# Program execution starts here
showChorus()
displayVerse1() # SYNTAX ERROR 1 - function 'displayVerse1' does not exist
showChorus()
showVerse2() # SYNTAX ERROR 2 - function 'showVerse2' does not exist
showChorus()
| 30.393939 | 75 | 0.658026 |
c51e94a6e708f618911c4ecc6deceed3e193e44e | 1,107 | py | Python | internal/handlers/singapore.py | fillingthemoon/cartogram-web | 58b645bca0c22b9bccdb2a5a8213a5a24a7e5958 | [
"MIT"
] | null | null | null | internal/handlers/singapore.py | fillingthemoon/cartogram-web | 58b645bca0c22b9bccdb2a5a8213a5a24a7e5958 | [
"MIT"
] | null | null | null | internal/handlers/singapore.py | fillingthemoon/cartogram-web | 58b645bca0c22b9bccdb2a5a8213a5a24a7e5958 | [
"MIT"
] | null | null | null | import settings
import handlers.base_handler
import csv
| 29.918919 | 292 | 0.641373 |
c51f18d40b89343f5d2cfddd15750839af888439 | 1,247 | py | Python | code.py | aashray18521/parallelModifiedGrepPython | afad79662e59e1e6fc5f491ba988995a312dc205 | [
"MIT"
] | null | null | null | code.py | aashray18521/parallelModifiedGrepPython | afad79662e59e1e6fc5f491ba988995a312dc205 | [
"MIT"
] | 3 | 2020-11-23T15:37:43.000Z | 2020-11-23T15:38:51.000Z | code.py | aashray18521/parallelModifiedGrepPython | afad79662e59e1e6fc5f491ba988995a312dc205 | [
"MIT"
] | null | null | null | import multiprocessing
import os
import time
rootdir = input()
keyword = input()
batch_size = 1
the_queue = multiprocessing.Queue()
walk_dirs(rootdir, batch_size)
the_pool = multiprocessing.Pool(3, worker_main,(the_queue,))
| 31.974359 | 87 | 0.644747 |
c51f3d08b27846ef7d07616f6d207a8d88638159 | 1,316 | py | Python | flask_youku/__init__.py | xiaoyh121/program | 6826f024cce7a4250a1dab8dba145c1f0d713286 | [
"Apache-2.0"
] | 176 | 2016-12-11T03:24:41.000Z | 2021-12-10T11:44:37.000Z | flask_youku/__init__.py | xiaoyh121/program | 6826f024cce7a4250a1dab8dba145c1f0d713286 | [
"Apache-2.0"
] | 4 | 2018-02-07T03:31:13.000Z | 2021-12-25T13:03:49.000Z | flask_youku/__init__.py | xiaoyh121/program | 6826f024cce7a4250a1dab8dba145c1f0d713286 | [
"Apache-2.0"
] | 76 | 2016-11-13T08:57:38.000Z | 2021-12-25T12:02:05.000Z | from flask import Blueprint, Markup
from flask import render_template
def youku(*args, **kwargs):
"""Define the Jinja function."""
video = Video(*args, **kwargs)
return video.html
| 24.830189 | 59 | 0.609422 |
c51fefbd501d6ac95a99920e7040a7192440ef23 | 26,061 | py | Python | main.py | DasAnish/TutorMatch | 1b2cf3a71e859f519d645dc33edf72a975661066 | [
"MIT"
] | null | null | null | main.py | DasAnish/TutorMatch | 1b2cf3a71e859f519d645dc33edf72a975661066 | [
"MIT"
] | null | null | null | main.py | DasAnish/TutorMatch | 1b2cf3a71e859f519d645dc33edf72a975661066 | [
"MIT"
] | 1 | 2021-09-19T15:00:59.000Z | 2021-09-19T15:00:59.000Z | from backend import Backend, Tutor, Parent
from kivy.app import App
from kivy.base import Builder
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from kivy.uix.image import Image
from kivy.config import Config
from kivy.graphics import *
from kivy.animation import *
from kivy.graphics import RoundedRectangle
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.slider import Slider
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.popup import Popup
from backend import Backend, Match, Level
import os
#Builder.load_file("kivyFiles/main.kv")
photoHeight = 550
photoWidth = 340
parent = {'username':'kelvincfleung', 'password':'hello123', 'fname':'Kelvin', 'lname':'Leung1', 'rateMin':10, 'rateMax':20, 'subject':'maths', 'level':1}
parentObj = Parent('61467ec2c2c5a2e917994d69')
parentObj.updateInfo(parent)
# KELVIN GO HERE
#360*640
if __name__ == '__main__':
Config.set('graphics', 'width', '360')
Config.set('graphics', 'height', '640')
Config.set('graphics', 'resizable', False)
MainApp().run() | 40.784038 | 192 | 0.610683 |
c520331bf38c88e653e41aa4b2d7c402d30d7649 | 374 | py | Python | routes/routes.py | aryan9600/SimpleMath-Flask | 855120ba7e7f36435045840ab1c6672308fae7e5 | [
"MIT"
] | null | null | null | routes/routes.py | aryan9600/SimpleMath-Flask | 855120ba7e7f36435045840ab1c6672308fae7e5 | [
"MIT"
] | null | null | null | routes/routes.py | aryan9600/SimpleMath-Flask | 855120ba7e7f36435045840ab1c6672308fae7e5 | [
"MIT"
] | null | null | null | from flask import Blueprint, request
router = Blueprint("router", __name__)
| 22 | 48 | 0.697861 |
c5203ec4fd880de88723d9ad07ee74058b1d23cf | 1,592 | py | Python | configs/repdet/repdet_repvgg_b1g2_nanopan_nanohead_1x_coco.py | karthiksharma98/mmdetection | 295145d41a74598db98a037224f0f82c074f3fff | [
"Apache-2.0"
] | null | null | null | configs/repdet/repdet_repvgg_b1g2_nanopan_nanohead_1x_coco.py | karthiksharma98/mmdetection | 295145d41a74598db98a037224f0f82c074f3fff | [
"Apache-2.0"
] | null | null | null | configs/repdet/repdet_repvgg_b1g2_nanopan_nanohead_1x_coco.py | karthiksharma98/mmdetection | 295145d41a74598db98a037224f0f82c074f3fff | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/repdet_repvgg_pafpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_poly.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='RepDet',
pretrained='/data/kartikes/repvgg_models/repvgg_b1g2.pth',
backbone=dict(
type='RepVGG',
arch='B1g2',
out_stages=[1, 2, 3, 4],
activation='ReLU',
last_channel=1024,
deploy=False),
neck=dict(
type='NanoPAN',
in_channels=[128, 256, 512, 1024],
out_channels=256,
num_outs=5,
start_level=1,
add_extra_convs='on_input'),
bbox_head=dict(
type='NanoDetHead',
num_classes=80,
in_channels=256,
stacked_convs=2,
feat_channels=256,
share_cls_reg=True,
reg_max=10,
norm_cfg=dict(type='BN', requires_grad=True),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
)
optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001)
data = dict(
samples_per_gpu=4,
workers_per_gpu=2)
find_unused_parameters=True
runner = dict(type='EpochBasedRunner', max_epochs=12)
| 28.945455 | 74 | 0.594849 |
c5206e72ad25192f5a2ed7316aa7ced0c3105161 | 436 | py | Python | tests/test_calculate_branch.py | ivergara/python-abc | b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a | [
"Unlicense"
] | 2 | 2021-07-25T20:12:21.000Z | 2021-07-25T21:19:23.000Z | tests/test_calculate_branch.py | ivergara/python-abc | b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a | [
"Unlicense"
] | 1 | 2021-12-28T22:07:05.000Z | 2021-12-28T22:07:05.000Z | tests/test_calculate_branch.py | ivergara/python-abc | b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a | [
"Unlicense"
] | 1 | 2021-12-07T19:53:45.000Z | 2021-12-07T19:53:45.000Z | import pytest
from tests import assert_source_returns_expected
BRANCH_CASES = [
# Call
('print("hello world")', 'b | print("hello world")'),
# Await
("await noop()", "b | await noop()"),
# Class instantiation
("Noop()", "b | Noop()"),
]
| 22.947368 | 68 | 0.669725 |
c52074b71855ef72867102bc5564df2ba1896c19 | 4,619 | py | Python | client/src/obc.py | estcube/telemetry-forwarding-client | be659c8dd8e4bd26d1d1974d63f90acffd150e34 | [
"MIT"
] | 3 | 2020-06-11T12:34:25.000Z | 2020-09-16T12:06:32.000Z | client/src/obc.py | estcube/telemetry-forwarding-client | be659c8dd8e4bd26d1d1974d63f90acffd150e34 | [
"MIT"
] | 57 | 2020-09-16T09:11:04.000Z | 2022-02-28T01:32:13.000Z | client/src/obc.py | estcube/Telemetry-Forwarding-Client | be659c8dd8e4bd26d1d1974d63f90acffd150e34 | [
"MIT"
] | null | null | null | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
| 52.488636 | 118 | 0.657285 |
c522238afd1828d1190c7360573f7b8dc442a5a0 | 1,537 | py | Python | SourceWatch/buffer.py | spezifanta/SourceWatch | aaf2cf1ba00015947689181daf77b80bde9b4feb | [
"MIT"
] | 6 | 2019-07-09T19:40:01.000Z | 2022-01-24T12:01:37.000Z | SourceWatch/buffer.py | spezifanta/SourceWatch | aaf2cf1ba00015947689181daf77b80bde9b4feb | [
"MIT"
] | null | null | null | SourceWatch/buffer.py | spezifanta/SourceWatch | aaf2cf1ba00015947689181daf77b80bde9b4feb | [
"MIT"
] | 1 | 2020-11-07T13:06:58.000Z | 2020-11-07T13:06:58.000Z | import io
import struct
| 25.616667 | 74 | 0.573845 |
c52372bcbf3ae907ef32ccf5713d1759604af330 | 483 | py | Python | scripts/npc/holyStone.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/npc/holyStone.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/npc/holyStone.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # Holy Stone - Holy Ground at the Snowfield (3rd job)
questIDs = [1431, 1432, 1433, 1435, 1436, 1437, 1439, 1440, 1442, 1443, 1445, 1446, 1447, 1448]
hasQuest = False
for qid in questIDs:
if sm.hasQuest(qid):
hasQuest = True
break
if hasQuest:
if sm.sendAskYesNo("#b(A mysterious energy surrounds this stone. Do you want to investigate?)"):
sm.warpInstanceIn(910540000, 0)
else:
sm.sendSayOkay("#b(A mysterious energy surrounds this stone)#k")
| 30.1875 | 100 | 0.679089 |
c523effb8f36813f8d45730c0dbdd83679d7448e | 16,256 | py | Python | pyvmodule/expr.py | tanhongze/pyvmodule | b88cd35e57893024071306d238ce601341ce3bb4 | [
"MIT"
] | null | null | null | pyvmodule/expr.py | tanhongze/pyvmodule | b88cd35e57893024071306d238ce601341ce3bb4 | [
"MIT"
] | null | null | null | pyvmodule/expr.py | tanhongze/pyvmodule | b88cd35e57893024071306d238ce601341ce3bb4 | [
"MIT"
] | 1 | 2020-01-20T07:25:40.000Z | 2020-01-20T07:25:40.000Z | #-- coding:utf-8
from .ast import ASTNode
from .compute.value import expr_value_calc_funcs,expr_value_prop_funcs
from .compute.width import expr_width_calc_funcs,expr_width_fix_funcs
from .compute.width import expr_match_width,expr_calc_width
from .tools.utility import count_one
import warnings
__all__ = ['Mux','Concatenate','Expr','wrap_expr',
'BinaryOperator',
'ConstExpr','Hexadecimal','Decimal','Octal','Binary']
class UnaryOperator(Expr):
class BinaryOperator(Expr):
class Mux(Expr):
class MultilineAlignOperator(Expr):
def fix_slice(key,width):
start = (0 if key.step is None else key.stop-key.step) if key.start is None else key.start
stop = (width if key.step is None else key.start+key.start) if key.stop is None else key.stop
width = stop - start
return start,stop,width
class ConstExpr(Expr):
_radix_fmtstrs = {
16:lambda width,value:("%d'h{:0>%dx}"%(width,(width+3)//4)).format(value),
10:lambda width,value:("%d'd{:0>d}" % width ).format(value),
8 :lambda width,value:("%d'o{:0>%do}"%(width,(width+2)//3)).format(value),
2 :lambda width,value:("%d'b{:0>%db}"%(width, width )).format(value)}
def _set_default(self,typename,n_childs=0):
self.comments = []
self.childs = [None]*n_childs
self.value = None
self.width = None
def __init__(self,value,width=None,radix=10):
self.typename = 'const'
self._value = 0
self._width = None
if not width is None:self.width = width
self.radix = radix
self.value = value
def __getitem__(self,key):
if isinstance(key,slice):
for a in {'start','stop','step'}:
if not isinstance(getattr(key,a),(int,type(None))):
raise SyntaxError('Invalid fetch format from constant expression.')
start = 0 if key.start is None else key.start
if not key.step is None:return Hexadecimal(int(self)>>start,width=key.step)
elif not key.stop is None:return Hexadecimal(int(self)>>start,width=key.stop-start)
return Hexadecimal(int(self)>>start,width=self.width-start)
elif isinstance(key,(int,ConstExpr)):
loc = int(key)
if loc<0:loc += len(self)
return Binary((self.value>>loc)&1,width=1)
elif isinstance(key,Expr):
n = 1<<len(key)
v = self.value
m = count_one(v)
if m==0:return Binary(0,width=1)
elif m==n:return Binary(1,width=1)
else:
if m<=(n>>1):
expr = 0
for i in range(n):
if ((v>>i)&1)==1:expr|=key//i
else:
expr = 1
for i in range(n):
if ((v>>i)&1)==0:expr&=~(key//i)
return expr
else:raise TypeError(type(key))
def __str__(self):
width = self.width
value = self.value
if value is None:
if width is None:return "'bz"
else:return "%d'bz"%width
if width is None:
if value<0:warnings.warn('Negative value without width declared.')
return str(value)
result = self._radix_fmtstr(width,value)
return result
def __int__(self):return self.value
def Hexadecimal(x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=16)
def Binary (x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=2 )
def Octal (x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=8 )
def Decimal (x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=10)
| 40.237624 | 123 | 0.62906 |
c529b4e8440b64034ec82bd0b0da8014712c8c78 | 13,936 | py | Python | Common_3/Tools/ForgeShadingLanguage/generators/d3d.py | divecoder/The-Forge | e882fbc000b2915b52c98fe3a8c791930490dd3c | [
"Apache-2.0"
] | 3,058 | 2017-10-03T01:33:22.000Z | 2022-03-30T22:04:23.000Z | Common_3/Tools/ForgeShadingLanguage/generators/d3d.py | juteman/The-Forge | e882fbc000b2915b52c98fe3a8c791930490dd3c | [
"Apache-2.0"
] | 157 | 2018-01-26T10:18:33.000Z | 2022-03-06T10:59:23.000Z | Common_3/Tools/ForgeShadingLanguage/generators/d3d.py | juteman/The-Forge | e882fbc000b2915b52c98fe3a8c791930490dd3c | [
"Apache-2.0"
] | 388 | 2017-12-21T10:52:32.000Z | 2022-03-31T18:25:49.000Z | """ GLSL shader generation """
from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace
from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl
import os, sys, importlib, re
from shutil import copyfile | 39.703704 | 130 | 0.564581 |
c52ada24bea0c59c6a12c8a2a1dea577b379a815 | 1,673 | py | Python | test/relationships/test_minhash.py | bateman-research/search-sifter | 78b05beac5ca21862d2773609dc4b9395a4982a5 | [
"MIT"
] | 1 | 2020-07-20T13:20:00.000Z | 2020-07-20T13:20:00.000Z | test/relationships/test_minhash.py | bateman-research/search-sifter | 78b05beac5ca21862d2773609dc4b9395a4982a5 | [
"MIT"
] | null | null | null | test/relationships/test_minhash.py | bateman-research/search-sifter | 78b05beac5ca21862d2773609dc4b9395a4982a5 | [
"MIT"
] | null | null | null | import pytest
import searchsifter.relationships.minhash as mh
import searchsifter.relationships.jaccard as jc
def test_intersection(a, b, c):
assert mh.intersection_signature(a, b) == set(range(50, 100))
assert mh.intersection_signature(a, b, c) == set(range(75, 100))
def test_union(a, b):
assert mh.union_signature(a, b, 100) == a
assert len(mh.union_signature(a, b, 20)) == 20
| 22.306667 | 68 | 0.550508 |
c52efabf8d8724ff1df4180be0a678f90bbcc559 | 1,672 | py | Python | tests/integration/test_between_tags.py | liorbass/pydriller | 26e6b594102e1f0a3e1029c5389fedec3cc55471 | [
"Apache-2.0"
] | 583 | 2018-04-09T09:48:47.000Z | 2022-03-23T17:27:10.000Z | tests/integration/test_between_tags.py | liorbass/pydriller | 26e6b594102e1f0a3e1029c5389fedec3cc55471 | [
"Apache-2.0"
] | 195 | 2018-05-25T08:10:58.000Z | 2022-03-29T09:28:37.000Z | tests/integration/test_between_tags.py | liorbass/pydriller | 26e6b594102e1f0a3e1029c5389fedec3cc55471 | [
"Apache-2.0"
] | 134 | 2018-04-10T12:57:34.000Z | 2022-03-29T13:40:35.000Z | # Copyright 2018 Davide Spadini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydriller.repository import Repository
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
| 34.122449 | 91 | 0.684211 |
c52f836bfe409a72332984d1519b1c551dfb66b2 | 847 | py | Python | tests/modules/command/button/test_wa_url_parameter.py | d3no/mocean-sdk-python | cbc215a0eb8aa26c04afb940eab6482f23150c75 | [
"MIT"
] | null | null | null | tests/modules/command/button/test_wa_url_parameter.py | d3no/mocean-sdk-python | cbc215a0eb8aa26c04afb940eab6482f23150c75 | [
"MIT"
] | null | null | null | tests/modules/command/button/test_wa_url_parameter.py | d3no/mocean-sdk-python | cbc215a0eb8aa26c04afb940eab6482f23150c75 | [
"MIT"
] | null | null | null | from unittest import TestCase
from moceansdk.modules.command.button.wa_url_parameter_button import (
WaUrlParameterButton,
)
| 27.322581 | 81 | 0.670602 |
c52fa39e205177e471e16b57a23781f02f1d2a0d | 7,345 | py | Python | 2019/day21_input.py | coingraham/adventofcode | 52b5b3f049242881285d0c2704f44cc1ee2a821e | [
"MIT"
] | 5 | 2020-12-04T04:30:17.000Z | 2021-11-12T11:26:22.000Z | 2019/day21_input.py | coingraham/adventofcode | 52b5b3f049242881285d0c2704f44cc1ee2a821e | [
"MIT"
] | null | null | null | 2019/day21_input.py | coingraham/adventofcode | 52b5b3f049242881285d0c2704f44cc1ee2a821e | [
"MIT"
] | null | null | null | input_data = """109,2050,21101,0,966,1,21101,13,0,0,1106,0,1378,21101,20,0,0,1105,1,1337,21101,0,27,0,1105,1,1279,1208,1,65,748,1005,748,73,1208,1,79,748,1005,748,110,1208,1,78,748,1005,748,132,1208,1,87,748,1005,748,169,1208,1,82,748,1005,748,239,21101,0,1041,1,21102,1,73,0,1105,1,1421,21101,0,78,1,21101,1041,0,2,21102,88,1,0,1106,0,1301,21101,0,68,1,21102,1041,1,2,21102,1,103,0,1106,0,1301,1101,0,1,750,1105,1,298,21102,1,82,1,21102,1,1041,2,21102,1,125,0,1106,0,1301,1102,1,2,750,1106,0,298,21101,0,79,1,21102,1041,1,2,21102,147,1,0,1105,1,1301,21102,84,1,1,21102,1,1041,2,21101,162,0,0,1106,0,1301,1101,0,3,750,1105,1,298,21102,1,65,1,21101,1041,0,2,21101,184,0,0,1106,0,1301,21102,76,1,1,21102,1041,1,2,21101,199,0,0,1106,0,1301,21101,75,0,1,21102,1,1041,2,21101,0,214,0,1105,1,1301,21102,221,1,0,1106,0,1337,21101,10,0,1,21101,0,1041,2,21101,236,0,0,1106,0,1301,1106,0,553,21102,1,85,1,21101,1041,0,2,21101,254,0,0,1106,0,1301,21102,1,78,1,21101,0,1041,2,21102,269,1,0,1106,0,1301,21102,276,1,0,1105,1,1337,21102,1,10,1,21101,1041,0,2,21102,291,1,0,1106,0,1301,1102,1,1,755,1105,1,553,21102,32,1,1,21102,1041,1,2,21101,313,0,0,1105,1,1301,21102,320,1,0,1105,1,1337,21102,1,327,0,1106,0,1279,1202,1,1,749,21102,1,65,2,21102,1,73,3,21101,0,346,0,1105,1,1889,1206,1,367,1007,749,69,748,1005,748,360,1102,1,1,756,1001,749,-64,751,1106,0,406,1008,749,74,748,1006,748,381,1101,-1,0,751,1105,1,406,1008,749,84,748,1006,748,395,1101,0,-2,751,1105,1,406,21102,1100,1,1,21102,1,406,0,1105,1,1421,21102,32,1,1,21101,0,1100,2,21101,421,0,0,1105,1,1301,21101,0,428,0,1106,0,1337,21101,435,0,0,1105,1,1279,2102,1,1,749,1008,749,74,748,1006,748,453,1102,-1,1,752,1105,1,478,1008,749,84,748,1006,748,467,1101,-2,0,752,1105,1,478,21101,1168,0,1,21101,0,478,0,1105,1,1421,21102,485,1,0,1105,1,1337,21101,0,10,1,21101,0,1168,2,21102,500,1,0,1105,1,1301,1007,920,15,748,1005,748,518,21102,1,1209,1,21101,0,518,0,1105,1,1421,1002,920,3,529,1001,529,921,529,101,0,750,0,1001,529,1,537,1002,751,1,0,1001,537,1,545,1001,752,0,0,1001,920,1,920,1105,1,13,1005,755,577,1006,756,570,21102,1,1100,1,21102,1,570,0,1106,0,1421,21101,987,0,1,1105,1,581,21101,1001,0,1,21101,0,588,0,1105,1,1378,1101,758,0,594,101,0,0,753,1006,753,654,21001,753,0,1,21102,610,1,0,1105,1,667,21102,0,1,1,21101,621,0,0,1106,0,1463,1205,1,647,21101,0,1015,1,21102,1,635,0,1106,0,1378,21102,1,1,1,21101,646,0,0,1106,0,1463,99,1001,594,1,594,1105,1,592,1006,755,664,1101,0,0,755,1106,0,647,4,754,99,109,2,1102,726,1,757,22102,1,-1,1,21102,9,1,2,21102,1,697,3,21101,692,0,0,1106,0,1913,109,-2,2105,1,0,109,2,101,0,757,706,2101,0,-1,0,1001,757,1,757,109,-2,2105,1,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,255,63,191,159,95,127,223,0,163,166,217,200,238,34,117,94,155,62,55,60,69,46,103,172,98,186,252,79,107,56,171,214,241,220,175,87,61,70,53,113,232,250,246,245,249,174,86,253,78,108,236,137,244,102,162,84,243,213,126,111,77,212,156,158,222,219,35,239,116,120,190,47,100,221,198,118,205,136,185,187,227,123,119,110,121,43,189,143,188,109,138,177,233,57,226,170,202,248,237,152,196,92,114,167,168,229,234,125,157,169,242,59,182,247,99,216,142,42,183,173,106,39,215,207,201,49,115,54,204,76,71,124,178,181,199,38,179,231,228,85,122,154,50,197,139,218,140,58,153,235,206,251,254,184,203,101,68,93,51,230,141,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,20,73,110,112,117,116,32,105,110,115,116,114,117,99,116,105,111,110,115,58,10,13,10,87,97,108,107,105,110,103,46,46,46,10,10,13,10,82,117,110,110,105,110,103,46,46,46,10,10,25,10,68,105,100,110,39,116,32,109,97,107,101,32,105,116,32,97,99,114,111,115,115,58,10,10,58,73,110,118,97,108,105,100,32,111,112,101,114,97,116,105,111,110,59,32,101,120,112,101,99,116,101,100,32,115,111,109,101,116,104,105,110,103,32,108,105,107,101,32,65,78,68,44,32,79,82,44,32,111,114,32,78,79,84,67,73,110,118,97,108,105,100,32,102,105,114,115,116,32,97,114,103,117,109,101,110,116,59,32,101,120,112,101,99,116,101,100,32,115,111,109,101,116,104,105,110,103,32,108,105,107,101,32,65,44,32,66,44,32,67,44,32,68,44,32,74,44,32,111,114,32,84,40,73,110,118,97,108,105,100,32,115,101,99,111,110,100,32,97,114,103,117,109,101,110,116,59,32,101,120,112,101,99,116,101,100,32,74,32,111,114,32,84,52,79,117,116,32,111,102,32,109,101,109,111,114,121,59,32,97,116,32,109,111,115,116,32,49,53,32,105,110,115,116,114,117,99,116,105,111,110,115,32,99,97,110,32,98,101,32,115,116,111,114,101,100,0,109,1,1005,1262,1270,3,1262,20101,0,1262,0,109,-1,2105,1,0,109,1,21102,1288,1,0,1106,0,1263,21002,1262,1,0,1102,1,0,1262,109,-1,2106,0,0,109,5,21101,0,1310,0,1106,0,1279,22102,1,1,-2,22208,-2,-4,-1,1205,-1,1332,21201,-3,0,1,21102,1332,1,0,1106,0,1421,109,-5,2105,1,0,109,2,21102,1,1346,0,1106,0,1263,21208,1,32,-1,1205,-1,1363,21208,1,9,-1,1205,-1,1363,1106,0,1373,21102,1370,1,0,1106,0,1279,1106,0,1339,109,-2,2106,0,0,109,5,2102,1,-4,1386,20101,0,0,-2,22101,1,-4,-4,21102,1,0,-3,22208,-3,-2,-1,1205,-1,1416,2201,-4,-3,1408,4,0,21201,-3,1,-3,1105,1,1396,109,-5,2105,1,0,109,2,104,10,21201,-1,0,1,21102,1436,1,0,1105,1,1378,104,10,99,109,-2,2106,0,0,109,3,20002,594,753,-1,22202,-1,-2,-1,201,-1,754,754,109,-3,2105,1,0,109,10,21101,5,0,-5,21101,1,0,-4,21102,1,0,-3,1206,-9,1555,21101,3,0,-6,21101,0,5,-7,22208,-7,-5,-8,1206,-8,1507,22208,-6,-4,-8,1206,-8,1507,104,64,1105,1,1529,1205,-6,1527,1201,-7,716,1515,21002,0,-11,-8,21201,-8,46,-8,204,-8,1105,1,1529,104,46,21201,-7,1,-7,21207,-7,22,-8,1205,-8,1488,104,10,21201,-6,-1,-6,21207,-6,0,-8,1206,-8,1484,104,10,21207,-4,1,-8,1206,-8,1569,21102,0,1,-9,1105,1,1689,21208,-5,21,-8,1206,-8,1583,21101,1,0,-9,1106,0,1689,1201,-5,716,1588,21001,0,0,-2,21208,-4,1,-1,22202,-2,-1,-1,1205,-2,1613,21201,-5,0,1,21101,1613,0,0,1105,1,1444,1206,-1,1634,22101,0,-5,1,21102,1,1627,0,1106,0,1694,1206,1,1634,21101,0,2,-3,22107,1,-4,-8,22201,-1,-8,-8,1206,-8,1649,21201,-5,1,-5,1206,-3,1663,21201,-3,-1,-3,21201,-4,1,-4,1106,0,1667,21201,-4,-1,-4,21208,-4,0,-1,1201,-5,716,1676,22002,0,-1,-1,1206,-1,1686,21101,0,1,-4,1106,0,1477,109,-10,2106,0,0,109,11,21102,1,0,-6,21102,1,0,-8,21102,0,1,-7,20208,-6,920,-9,1205,-9,1880,21202,-6,3,-9,1201,-9,921,1725,20101,0,0,-5,1001,1725,1,1733,20102,1,0,-4,22101,0,-4,1,21102,1,1,2,21101,9,0,3,21102,1,1754,0,1106,0,1889,1206,1,1772,2201,-10,-4,1766,1001,1766,716,1766,21002,0,1,-3,1105,1,1790,21208,-4,-1,-9,1206,-9,1786,21201,-8,0,-3,1105,1,1790,21202,-7,1,-3,1001,1733,1,1795,21001,0,0,-2,21208,-2,-1,-9,1206,-9,1812,21201,-8,0,-1,1105,1,1816,22101,0,-7,-1,21208,-5,1,-9,1205,-9,1837,21208,-5,2,-9,1205,-9,1844,21208,-3,0,-1,1106,0,1855,22202,-3,-1,-1,1105,1,1855,22201,-3,-1,-1,22107,0,-1,-1,1106,0,1855,21208,-2,-1,-9,1206,-9,1869,22102,1,-1,-8,1105,1,1873,22102,1,-1,-7,21201,-6,1,-6,1105,1,1708,21202,-8,1,-10,109,-11,2105,1,0,109,7,22207,-6,-5,-3,22207,-4,-6,-2,22201,-3,-2,-1,21208,-1,0,-6,109,-7,2106,0,0,0,109,5,1202,-2,1,1912,21207,-4,0,-1,1206,-1,1930,21101,0,0,-4,21202,-4,1,1,22101,0,-3,2,21102,1,1,3,21102,1949,1,0,1106,0,1954,109,-5,2106,0,0,109,6,21207,-4,1,-1,1206,-1,1977,22207,-5,-3,-1,1206,-1,1977,22102,1,-5,-5,1105,1,2045,21201,-5,0,1,21201,-4,-1,2,21202,-3,2,3,21101,1996,0,0,1105,1,1954,21201,1,0,-5,21102,1,1,-2,22207,-5,-3,-1,1206,-1,2015,21101,0,0,-2,22202,-3,-2,-3,22107,0,-4,-1,1206,-1,2037,21202,-2,1,1,21102,1,2037,0,106,0,1912,21202,-3,-1,-3,22201,-5,-3,-5,109,-6,2105,1,0""" | 7,345 | 7,345 | 0.687543 |
c52fa5fe46da648c33fa7618314fa8e93cc98a14 | 10,860 | py | Python | src/python/pants/backend/go/target_types.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/go/target_types.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | 12 | 2022-01-06T23:20:22.000Z | 2022-03-17T05:06:37.000Z | src/python/pants/backend/go/target_types.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Sequence
from pants.core.goals.package import OutputPathField
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, PathGlobs
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
Dependencies,
InvalidFieldException,
InvalidTargetException,
MultipleSourcesField,
StringField,
StringSequenceField,
Target,
)
from pants.option.global_options import FilesNotFoundBehavior
# -----------------------------------------------------------------------------------------------
# `go_mod` target generator
# -----------------------------------------------------------------------------------------------
# TODO: This field probably shouldn't be registered.
# TODO(#12953): generalize this?
# -----------------------------------------------------------------------------------------------
# `go_first_party_package` target
# -----------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------
# `go_third_party_package` target
# -----------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------
# `go_binary` target
# -----------------------------------------------------------------------------------------------
class GoBinaryDependenciesField(Dependencies):
# This is only used to inject a dependency from the `GoBinaryMainPackageField`. Users should
# add any explicit dependencies to the `go_package`.
alias = "_dependencies"
class GoBinaryTarget(Target):
alias = "go_binary"
core_fields = (
*COMMON_TARGET_FIELDS,
OutputPathField,
GoBinaryMainPackageField,
GoBinaryDependenciesField,
RestartableField,
)
help = "A Go binary."
| 36.813559 | 98 | 0.617127 |
c52fbd848e1acb3cd166434a5aa79fb5ec3b969e | 9,623 | py | Python | iris/src/iris/main.py | headma5ter/wall-e | da7624cd58ee3e61b847af6a389cc919e1f2a8d1 | [
"MIT"
] | null | null | null | iris/src/iris/main.py | headma5ter/wall-e | da7624cd58ee3e61b847af6a389cc919e1f2a8d1 | [
"MIT"
] | null | null | null | iris/src/iris/main.py | headma5ter/wall-e | da7624cd58ee3e61b847af6a389cc919e1f2a8d1 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import matplotlib.lines as lines
from statistics import mode, StatisticsError
from csv import QUOTE_ALL
import pandas as pd
import pathlib
import json
from iris import logger
from iris import config
from iris import classifier
from iris.helpers.utils import log_function # TODO: change to ceres
COLUMN_NAMES = ["w", "x", "y", "z"]
if __name__ == "__main__":
# Get relevant paths
data_path = getattr(config, f"{config.stage}_data_path")
centroid_path = config.centroid_serial_path
mapping_path = config.mapping_serial_path
centroids = None
mapping = dict()
if config.stage == "testing":
if not centroid_path.is_file() or not mapping_path.is_file():
logger.warn(
"No training data to be read -- could result in poor model performance"
)
else:
# Get centroids and species mapping from training data
centroids = read_data(serial_path=centroid_path)
mapping = read_data(serial_path=mapping_path)
# Classify data set
data = read_data(csv_path=data_path)
data, centroids = classify_clusters(data, initial_centroids=centroids)
if config.stage == "training":
# Map species to cluster
mapping = map_cluster_to_species(data)
if config.serialize:
# Save data
serialize_data(centroids, config.centroid_serial_path)
serialize_data(mapping, config.mapping_serial_path)
# Add the model's species classification to data
data = map_species_onto_data(data, mapping)
if config.save:
# Save testing results to files
write_to_csv(data, config.results_path)
if config.visualize:
# Plot data
plot_clusters(data, mapping)
calculate_statistics(data, mapping)
logger.info(f"Process complete\n\t{config.summary}")
| 32.731293 | 103 | 0.618622 |
c530c5e8d4407688c79bec94a667aec813211585 | 2,328 | py | Python | data_loader/util.py | lixiaoyu0575/physionet_challenge2020_pytorch | 39b5aeeead440eaa88d6fdaf4a8a70c15373e062 | [
"MIT"
] | 1 | 2021-05-24T08:09:30.000Z | 2021-05-24T08:09:30.000Z | data_loader/util.py | lixiaoyu0575/physionet_challenge2020_pytorch | 39b5aeeead440eaa88d6fdaf4a8a70c15373e062 | [
"MIT"
] | null | null | null | data_loader/util.py | lixiaoyu0575/physionet_challenge2020_pytorch | 39b5aeeead440eaa88d6fdaf4a8a70c15373e062 | [
"MIT"
] | null | null | null | from scipy.io import loadmat
import numpy as np
import os
import torch
from torch.utils.data import Dataset, TensorDataset
from torchvision import transforms
# Find unique classes.
# Load challenge data.
# Customed TensorDataset
def custom_collate_fn(batch):
data = [item[0].unsqueeze(0) for item in batch]
target = [item[1].unsqueeze(0) for item in batch]
return [data, target]
| 28.390244 | 78 | 0.608247 |
c538cf5b43e938d74b89e921d97d1ef0493292ec | 317 | py | Python | solutions/binarysearch.io/hard/collecting-coins/main.py | zwliew/ctci | 871f4fc957be96c6d0749d205549b7b35dc53d9e | [
"MIT"
] | 4 | 2020-11-07T14:38:02.000Z | 2022-01-03T19:02:36.000Z | solutions/binarysearch.io/hard/collecting-coins/main.py | zwliew/ctci | 871f4fc957be96c6d0749d205549b7b35dc53d9e | [
"MIT"
] | 1 | 2019-04-17T06:55:14.000Z | 2019-04-17T06:55:14.000Z | solutions/binarysearch.io/hard/collecting-coins/main.py | zwliew/ctci | 871f4fc957be96c6d0749d205549b7b35dc53d9e | [
"MIT"
] | null | null | null | 3 from functools import lru_cache
4 @lru_cache(None)
6 if i < 0 or j < 0:
7 return 0
8 return max(dp(i - 1, j), dp(i, j - 1)) + matrix[i][j]
9 return dp(len(matrix) - 1, len(matrix[0]) - 1)
| 31.7 | 66 | 0.498423 |
c53adeb9103721e86c1c98d5836be2d9b0c044bf | 12,074 | py | Python | Python 3/First_steps_on_machine_learning/Maze_using_Bellman_equation/Test_Maze.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | null | null | null | Python 3/First_steps_on_machine_learning/Maze_using_Bellman_equation/Test_Maze.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | null | null | null | Python 3/First_steps_on_machine_learning/Maze_using_Bellman_equation/Test_Maze.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | 1 | 2020-08-19T17:25:22.000Z | 2020-08-19T17:25:22.000Z | import pygame, sys, maze_builder, random
def move(a=(0, 0), b=(0, 0)):
a = (a[0] + b[0], a[1] + b[1])
return(a)
runner = Maze_runner()
# s p p
# b g
# e p p
maze = Maze( 1000, 1000, (3, 3))
test_node_00 = maze_builder.Node(position=(0, 0), right = True, special="start")
test_node_10 = maze_builder.Node(position=(1, 0), left = True, down = True, right = True)
test_node_20 = maze_builder.Node(position=(2, 0), down = True, left = True)
test_node_11 = maze_builder.Node(position=(1, 1), down = True, up = True, right = True, special="bad")
test_node_21 = maze_builder.Node(position=(2, 1), left = True, up=True, down=True, special="good")
test_node_22 = maze_builder.Node(position=(2, 2), up = True, left = True)
test_node_12 = maze_builder.Node(position=(1, 2), up = True, left = True, right = True)
test_node_02 = maze_builder.Node(position=(0, 2), right = True, special="end")
maze.add_node(test_node_00)
maze.add_node(test_node_20)
maze.add_node(test_node_10)
maze.add_node(test_node_11)
maze.add_node(test_node_21)
maze.add_node(test_node_22)
maze.add_node(test_node_12)
maze.add_node(test_node_02)
maze.add_runner(runner)
# maze.work_out_values(maze.end)
# maze.get_value(maze.start)
maze.run()
# maze.solve_random()
# x x
# x x
# maze = Maze( 1000, 1000, (2, 2))
# test_node_00 = maze_builder.Node(position=(0, 0), right = True, up = True, special="start")
# test_node_10 = maze_builder.Node(position=(1, 0), left = True, down = True)
# test_node_11 = maze_builder.Node(position=(1, 1), up = True, left = True)
# test_node_01 = maze_builder.Node(position=(0, 1), right = True, down = True, special="end")
# maze.add_node(test_node_00)
# maze.add_node(test_node_10)
# maze.add_node(test_node_11)
# maze.add_node(test_node_01)
# maze.add_runner(runner)
# maze.run()
| 46.79845 | 265 | 0.595826 |
c53b663532da343a9e761b6ebf1b05f4670a34a6 | 13,679 | py | Python | powderday/nebular_emission/abund.py | mccbc/powderday | 604b4a242216db0e93dc2e50a77bc20dc5cfb10f | [
"BSD-3-Clause"
] | null | null | null | powderday/nebular_emission/abund.py | mccbc/powderday | 604b4a242216db0e93dc2e50a77bc20dc5cfb10f | [
"BSD-3-Clause"
] | null | null | null | powderday/nebular_emission/abund.py | mccbc/powderday | 604b4a242216db0e93dc2e50a77bc20dc5cfb10f | [
"BSD-3-Clause"
] | null | null | null | from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as InterpUS
from powderday.nebular_emission.cloudy_tools import sym_to_name
"""
------------------------------------------------------------------------------------------
From cloudyfsps written by Nell Byler.
(Source https://github.com/nell-byler/cloudyfsps/blob/master/cloudyfsps/nebAbundTools.py
retrieved in October 2019)
------------------------------------------------------------------------------------------
"""
def getNebAbunds(set_name, logZ, dust=True, re_z=False, **kwargs):
"""
neb_abund.get_abunds(set_name, logZ, dust=True, re_z=False)
set_name must be 'dopita', 'newdopita', 'cl01' or 'yeh'
"""
allowed_names = ['dopita', 'newdopita', 'cl01', 'yeh',
'varyNO', 'gutkin', 'UVbyler', 'varyCO']
if set_name in allowed_names:
return eval('{}({}, dust={}, re_z={})'.format(set_name, logZ, dust, re_z))
else:
raise IOError(allowed_names)
def load_abund(set_name):
if set_name == 'dopita':
adict = dict(He=-1.01,
C=-3.44,
N=-3.95,
O=-3.07,
Ne=-3.91,
Mg=-4.42,
Si=-4.45,
S=-4.79,
Ar=-5.44,
Ca=-5.64,
Fe=-4.33,
F=-7.52,
Na=-5.69,
Al=-5.53,
P=-6.43,
Cl=-6.73,
K=-6.87,
Ti=-6.96,
Cr=-6.32,
Mn=-6.47,
Co=-7.08,
Ni=-5.75,
Cu=-7.73,
Zn=-7.34)
elif set_name == 'newdopita':
adict = dict(He=-1.01,
C=-3.57,
N=-4.60,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'UVbyler':
adict = dict(He=-1.01,
C=-3.57,
N=-4.17,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'gutkin':
adict = dict(He=-1.01,
C=-3.53,
N=-4.32,
O=-3.17,
F=-7.47,
Ne=-4.01,
Na=-5.70,
Mg=-4.45,
Al=-5.56,
Si=-4.48,
P=-6.57,
S=-4.87,
Cl=-6.53,
Ar=-5.63,
K=-6.92,
Ca=-5.67,
Sc=-8.86,
Ti=-7.01,
V=-8.03,
Cr=-6.36,
Mn=-6.64,
Fe=-4.51,
Co=-7.11,
Ni=-5.78,
Cu=-7.82,
Zn=-7.43)
return adict
def load_depl(set_name):
if set_name == 'dopita':
ddict = dict(C=-0.30,
N=-0.22,
O=-0.22,
Ne=0.0,
Mg=-0.70,
Si=-1.0,
S=0.0,
Ar=0.0,
Ca=-2.52,
Fe=-2.0,
F=0.0,
Na=0.0,
Al=0.0,
P=0.0,
Cl=0.0,
K=0.0,
Ti=0.0,
Cr=0.0,
Mn=0.0,
Co=0.0,
Ni=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'newdopita':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'UVbyler':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'gutkin':
ddict = dict(He=0.00,
Li=-0.8,
C=-0.30,
O=-0.15,
Na=-0.60,
Mg=-0.70,
Al=-1.70,
Si=-1.00,
Cl=-0.30,
Ca=-2.52,
Fe=-2.00,
Ni=-1.40)
return ddict
| 31.159453 | 90 | 0.370934 |
c53b92a47fb947f6f8b829b01647aa8c055f8973 | 644 | py | Python | character/migrations/0004_alter_character_alignment.py | scottBowles/dnd | a1ef333f1a865d51b5426dc4b3493e8437584565 | [
"MIT"
] | null | null | null | character/migrations/0004_alter_character_alignment.py | scottBowles/dnd | a1ef333f1a865d51b5426dc4b3493e8437584565 | [
"MIT"
] | null | null | null | character/migrations/0004_alter_character_alignment.py | scottBowles/dnd | a1ef333f1a865d51b5426dc4b3493e8437584565 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-12 02:00
from django.db import migrations, models
| 33.894737 | 300 | 0.591615 |
c53bcf309d42be5b0611b4932b04593b5fb3c79b | 818 | py | Python | graphs_trees/check_balance/test_check_balance.py | filippovitale/interactive-coding-challenges | 8380a7aa98618c3cc9c0271c30bd320937d431ad | [
"Apache-2.0"
] | null | null | null | graphs_trees/check_balance/test_check_balance.py | filippovitale/interactive-coding-challenges | 8380a7aa98618c3cc9c0271c30bd320937d431ad | [
"Apache-2.0"
] | null | null | null | graphs_trees/check_balance/test_check_balance.py | filippovitale/interactive-coding-challenges | 8380a7aa98618c3cc9c0271c30bd320937d431ad | [
"Apache-2.0"
] | 1 | 2019-12-13T12:57:44.000Z | 2019-12-13T12:57:44.000Z | from nose.tools import assert_equal
if __name__ == '__main__':
main() | 20.974359 | 48 | 0.570905 |
c53bd8529e678df43ecc3a88f38641a5587a1587 | 1,129 | py | Python | D_predict.py | shanqu91/microseismic_event_detection_via_CNN | ff9f0de135d14741c057a2a78e1fd69db18ae1d2 | [
"MIT"
] | null | null | null | D_predict.py | shanqu91/microseismic_event_detection_via_CNN | ff9f0de135d14741c057a2a78e1fd69db18ae1d2 | [
"MIT"
] | null | null | null | D_predict.py | shanqu91/microseismic_event_detection_via_CNN | ff9f0de135d14741c057a2a78e1fd69db18ae1d2 | [
"MIT"
] | 1 | 2021-10-05T08:41:15.000Z | 2021-10-05T08:41:15.000Z | import keras
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from scipy import io
mat_contents = io.loadmat('Data/X_test_0.mat')
X_test_0 = mat_contents['X_test_0']
mat_contents = io.loadmat('Data/X_test_1.mat')
X_test_1 = mat_contents['X_test_1']
batch_size = 40
num_classes = 2
test_datasize, patch_rows, patch_cols = X_test_0.shape[0], X_test_0.shape[1], X_test_0.shape[2]
X_test_0 = X_test_0.reshape(test_datasize, patch_rows, patch_cols, 1)
test_datasize, patch_rows, patch_cols = X_test_1.shape[0], X_test_1.shape[1], X_test_1.shape[2]
X_test_1 = X_test_1.reshape(test_datasize, patch_rows, patch_cols, 1)
print('X_test_0 shape:', X_test_0.shape)
print('X_test_1 shape:', X_test_1.shape)
# load trained model
model = load_model('Data/trained_model.h5')
# prediction
Y_test_0 = model.predict(X_test_0, batch_size=batch_size, verbose=1)
Y_test_1 = model.predict(X_test_1, batch_size=batch_size, verbose=1)
io.savemat('Data/Y_test_0.mat', {'Y_test_0':Y_test_0})
io.savemat('Data/Y_test_1.mat', {'Y_test_1':Y_test_1})
| 35.28125 | 95 | 0.782108 |
c53d72e1616e580f62f88e5fc1f0a262cb103728 | 94 | py | Python | app/db_manager/apps.py | PragmaticCoder/Linkedin-Analytics | a990b5cae02f0d758bc3123bde643d13a439efa3 | [
"MIT"
] | 13 | 2018-07-31T15:37:47.000Z | 2021-12-20T04:48:13.000Z | app/db_manager/apps.py | PragmaticCoder/Linkedin-Analytics | a990b5cae02f0d758bc3123bde643d13a439efa3 | [
"MIT"
] | 25 | 2019-12-10T20:03:48.000Z | 2022-03-11T23:26:11.000Z | app/db_manager/apps.py | PragmaticCoder/Linkedin-Analytics | a990b5cae02f0d758bc3123bde643d13a439efa3 | [
"MIT"
] | 4 | 2020-03-24T20:13:50.000Z | 2022-02-05T20:40:48.000Z | from django.apps import AppConfig
| 15.666667 | 33 | 0.765957 |
c53d83148b42eaa02961efd8a515c82ec643034c | 813 | py | Python | examples/dialogs.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 666 | 2016-11-14T18:17:40.000Z | 2022-03-29T03:53:22.000Z | examples/dialogs.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 598 | 2016-10-20T21:04:09.000Z | 2022-03-15T22:44:49.000Z | examples/dialogs.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 95 | 2017-01-19T12:23:58.000Z | 2022-03-06T18:16:21.000Z | from appJar import gui
app=gui()
app.addButtons(["info", "error", "warning", "yesno", "question"], press)
app.addButtons(["ok", "retry", "text", "number"], press)
app.go()
| 45.166667 | 74 | 0.607626 |
c53d9c366f6302c3f4189f86bcaf5a05f084763e | 19,136 | py | Python | src_RealData/Nets/ObjectOriented.py | XYZsake/DRFNS | 73fc5683db5e9f860846e22c8c0daf73b7103082 | [
"MIT"
] | 42 | 2018-10-07T08:19:01.000Z | 2022-02-08T17:41:24.000Z | src_RealData/Nets/ObjectOriented.py | XYZsake/DRFNS | 73fc5683db5e9f860846e22c8c0daf73b7103082 | [
"MIT"
] | 11 | 2018-12-22T00:15:46.000Z | 2021-12-03T10:29:32.000Z | src_RealData/Nets/ObjectOriented.py | XYZsake/DRFNS | 73fc5683db5e9f860846e22c8c0daf73b7103082 | [
"MIT"
] | 14 | 2018-08-26T06:47:06.000Z | 2021-07-24T11:52:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import os
from sklearn.metrics import confusion_matrix
from datetime import datetime | 38.272 | 215 | 0.570861 |
c53e560dfa34e9fcc79e711abf7084717bfce494 | 1,571 | py | Python | flaskr/test/unit/webapp/test_change_light_color.py | UnibucProjects/SmartAquarium | 6f3c16fb7a45218e763b46223568f6c3e5b66bfd | [
"MIT"
] | 6 | 2022-02-02T19:37:57.000Z | 2022-02-03T15:12:32.000Z | flaskr/test/unit/webapp/test_change_light_color.py | UnibucProjects/SmartAquarium | 6f3c16fb7a45218e763b46223568f6c3e5b66bfd | [
"MIT"
] | 18 | 2022-01-29T22:47:46.000Z | 2022-02-03T15:30:28.000Z | flaskr/test/unit/webapp/test_change_light_color.py | UnibucProjects/SmartAquarium | 6f3c16fb7a45218e763b46223568f6c3e5b66bfd | [
"MIT"
] | null | null | null | from flask import request
import pytest
import json
from app import create_app, create_rest_api
from db import get_db
from change_light import is_aquarium_id_valid
| 26.627119 | 78 | 0.706556 |
c53e6e767c955b2bf53a179312e0dc8ac8e05972 | 4,293 | py | Python | commands/inventory.py | zbylyrcxr/DennisMUD | cb9be389e3be3e267fd78b1520ed2902941742da | [
"MIT"
] | 2 | 2022-02-21T17:55:03.000Z | 2022-02-22T06:25:04.000Z | commands/inventory.py | zbylyrcxr/DennisMUD | cb9be389e3be3e267fd78b1520ed2902941742da | [
"MIT"
] | 3 | 2022-02-09T18:18:29.000Z | 2022-03-07T08:15:54.000Z | commands/inventory.py | zbylyrcxr/DennisMUD | cb9be389e3be3e267fd78b1520ed2902941742da | [
"MIT"
] | 1 | 2022-03-07T08:10:59.000Z | 2022-03-07T08:10:59.000Z | #######################
# Dennis MUD #
# inventory.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
from lib.litnumbers import *
from lib.vigenere import *
import random
NAME = "inventory"
CATEGORIES = ["items"]
ALIASES = ["inv", "i"]
USAGE = "inventory"
DESCRIPTION = "List all of the items in your inventory."
| 39.027273 | 136 | 0.642208 |
c53ebab62d8ce95d55ec92330a072c34d445b216 | 296 | py | Python | tests/polynomials.py | mernst/cozy | d7b2c0ee575057dea4ebec201d579f0ecd785b1b | [
"Apache-2.0"
] | 188 | 2017-11-27T18:59:34.000Z | 2021-12-31T02:28:33.000Z | tests/polynomials.py | mernst/cozy | d7b2c0ee575057dea4ebec201d579f0ecd785b1b | [
"Apache-2.0"
] | 95 | 2017-11-13T01:21:48.000Z | 2020-10-30T06:38:14.000Z | tests/polynomials.py | mernst/cozy | d7b2c0ee575057dea4ebec201d579f0ecd785b1b | [
"Apache-2.0"
] | 16 | 2018-02-13T04:49:09.000Z | 2021-02-06T13:26:46.000Z | import unittest
from cozy.polynomials import Polynomial
| 29.6 | 84 | 0.712838 |
c53ef504f8c908892ab80122b5998f9150c4ae18 | 823 | py | Python | presenters/calculator_presenter.py | RamonWill/portfolio-management-project | ac8ce313f8d62f09810fc1da19d6b252f193871b | [
"MIT"
] | 14 | 2020-01-01T04:59:06.000Z | 2022-02-08T06:48:21.000Z | presenters/calculator_presenter.py | linhvien/portfolio-management-project | ac8ce313f8d62f09810fc1da19d6b252f193871b | [
"MIT"
] | null | null | null | presenters/calculator_presenter.py | linhvien/portfolio-management-project | ac8ce313f8d62f09810fc1da19d6b252f193871b | [
"MIT"
] | 8 | 2020-10-15T06:52:37.000Z | 2021-10-04T06:44:36.000Z | from custom_objects import FinanceCalculator
from tkinter import messagebox
| 34.291667 | 80 | 0.647631 |
c53f341c44f58f7cf080b91299e6c06e76e614e8 | 1,877 | py | Python | core/power_status_monitor.py | kangyifei/CloudSimPy | 45912e7ea35086b67941624102e400cb22e549ab | [
"MIT"
] | null | null | null | core/power_status_monitor.py | kangyifei/CloudSimPy | 45912e7ea35086b67941624102e400cb22e549ab | [
"MIT"
] | null | null | null | core/power_status_monitor.py | kangyifei/CloudSimPy | 45912e7ea35086b67941624102e400cb22e549ab | [
"MIT"
] | null | null | null | import json
| 33.517857 | 107 | 0.606819 |
c53f7e729f7148ea37a06ebe087c005b16755a1d | 25,133 | py | Python | maintest.py | thorsilver/ABM-for-social-care | 3a47868d2881799980a3f9f24b78c66a31eda194 | [
"MIT"
] | null | null | null | maintest.py | thorsilver/ABM-for-social-care | 3a47868d2881799980a3f9f24b78c66a31eda194 | [
"MIT"
] | null | null | null | maintest.py | thorsilver/ABM-for-social-care | 3a47868d2881799980a3f9f24b78c66a31eda194 | [
"MIT"
] | 1 | 2018-01-05T15:42:40.000Z | 2018-01-05T15:42:40.000Z |
from sim import Sim
import os
import cProfile
import pylab
import math
import matplotlib.pyplot as plt
import argparse
import json
import decimal
import numpy as np
def init_params():
"""Set up the simulation parameters."""
p = {}
## The basics: starting population and year, etc.
p['initialPop'] = 750
p['startYear'] = 1860
p['endYear'] = 2050
p['thePresent'] = 2012
p['statsCollectFrom'] = 1960
p['minStartAge'] = 20
p['maxStartAge'] = 40
p['verboseDebugging'] = False
p['singleRunGraphs'] = True
p['favouriteSeed'] = None
p['numRepeats'] = 1
p['loadFromFile'] = False
## Mortality statistics
p['baseDieProb'] = 0.0001
p['babyDieProb'] = 0.005
p['maleAgeScaling'] = 14.0
p['maleAgeDieProb'] = 0.00021
p['femaleAgeScaling'] = 15.5
p['femaleAgeDieProb'] = 0.00019
p['num5YearAgeClasses'] = 28
## Transitions to care statistics
p['baseCareProb'] = 0.0002
p['personCareProb'] = 0.0008
##p['maleAgeCareProb'] = 0.0008
p['maleAgeCareScaling'] = 18.0
##p['femaleAgeCareProb'] = 0.0008
p['femaleAgeCareScaling'] = 19.0
p['numCareLevels'] = 5
p['cdfCareTransition'] = [ 0.7, 0.9, 0.95, 1.0 ]
p['careLevelNames'] = ['none','low','moderate','substantial','critical']
p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 30.0, 80.0 ]
## Availability of care statistics
p['childHours'] = 5.0
p['homeAdultHours'] = 30.0
p['workingAdultHours'] = 25.0
p['retiredHours'] = 60.0
p['lowCareHandicap'] = 0.5
p['hourlyCostOfCare'] = 20.0
## Fertility statistics
p['growingPopBirthProb'] = 0.215
p['steadyPopBirthProb'] = 0.13
p['transitionYear'] = 1965
p['minPregnancyAge'] = 17
p['maxPregnancyAge'] = 42
## Class and employment statistics
p['numOccupationClasses'] = 3
p['occupationClasses'] = ['lower','intermediate','higher']
p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]
## Age transition statistics
p['ageOfAdulthood'] = 17
p['ageOfRetirement'] = 65
## Marriage and divorce statistics (partnerships really)
p['basicFemaleMarriageProb'] = 0.25
p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicMaleMarriageProb'] = 0.3
p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0 ]
p['basicDivorceRate'] = 0.06
p['variableDivorce'] = 0.06
p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0 ]
## Leaving home and moving around statistics
p['probApartWillMoveTogether'] = 0.3
p['coupleMovesToExistingHousehold'] = 0.3
p['basicProbAdultMoveOut'] = 0.22
p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicProbSingleMove'] = 0.05
p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicProbFamilyMove'] = 0.03
p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
p['agingParentsMoveInWithKids'] = 0.1
p['variableMoveBack'] = 0.1
## Description of the map, towns, and houses
p['mapGridXDimension'] = 8
p['mapGridYDimension'] = 12
p['townGridDimension'] = 40
p['numHouseClasses'] = 3
p['houseClasses'] = ['small','medium','large']
p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]
p['ukMap'] = [ [ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],
[ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],
[ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],
[ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],
[ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],
[ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],
[ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],
[ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],
[ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]
p['mapDensityModifier'] = 0.6
p['ukClassBias'] = [
[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],
[ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],
[ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]
## Graphical interface details
p['interactiveGraphics'] = True
p['delayTime'] = 0.0
p['screenWidth'] = 1300
p['screenHeight'] = 700
p['bgColour'] = 'black'
p['mainFont'] = 'Helvetica 18'
p['fontColour'] = 'white'
p['dateX'] = 70
p['dateY'] = 20
p['popX'] = 70
p['popY'] = 50
p['pixelsInPopPyramid'] = 2000
p['careLevelColour'] = ['blue','green','yellow','orange','red']
p['houseSizeColour'] = ['brown','purple','yellow']
p['pixelsPerTown'] = 56
p['maxTextUpdateList'] = 22
return p
p = init_params()
#######################################################
## A basic single run
#######################################################
## Batch run (no graphics)
#######################################################
## Retirement age run (no graphics)
#######################################################
##runs for sensitivity analysis using GEM-SA
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH
# def sensitivityLarge(runtype, ageingList, careList, retiredHList, retiredAList, baseDieList, babyDieList, personCareList, maleCareList, femaleCareList, \
# childHoursList, homeAdultList, workingAdultList, lowCareList, growingBirthList, basicDivorceList, variableDivorceList, basicMaleMarriageList, \
# basicFemaleMarriageList, probMoveList, moveHouseholdList, probMoveOutList, probMoveBackList, reps):
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH, 10 params
#######################################################
# Recurrent neural network experiments -- 10 params, outputs recorded per year
#######################################################
## A profiling run; use import pstats then p = pstats.Stats('profile.txt') then p.sort_stats('time').print_stats(10)
#cProfile.run('s.run()','profile.txt')
#######################################################
## Parse command line arguments
def loadParamFile(file, dict):
"""
Given a JSON filename and a dictionary, return the dictionary with
the file's fields merged into it.
Example: if the initial dictionary is
dict['bobAge'] = 90 and dict['samAge']=20 and the JSON data is
{'age':{'bob':40, 'fred':35}}
the returned dictionary contains the following data values:
dict['bobAge'] = 40, dict['fredAge'] = 35, dict['samAge'] = 20
"""
json_data = open(file).read()
data = json.loads(json_data)
for group in data:
fields = data.get(group)
if type({}) == type(fields):
# Group of fields - create name from item and group
for item in fields:
name = item + group[:1].upper() + group[1:]
value = data [group][item]
dict [name] = value
else:
# Single data value - naming is assumed to be correct case
dict [group] = fields
return dict
def loadCommandLine(dict):
"""Process the command line, loading params file (if required). The dict
argument will be augmented with data from the user-specified parameters
file (if required), otherwise will return the dict argument unchanged"""
parser = argparse.ArgumentParser(
description='lives v1.0: complex social behaviour simulation.',
epilog='Example: "maintest.py -f test.json -n 3" --- run 3 sims with test.json\'s params',
formatter_class=argparse.RawTextHelpFormatter,
prog='lives',
usage='use "%(prog)s -h" for more information')
group = parser.add_mutually_exclusive_group()
parser.add_argument(
'-f', '--file',
help='parameters file in JSON format e.g. soylent.json')
group.add_argument(
'-n', '--num', metavar='N', type=int, default=0,
help='number of runs to carry out.')
group.add_argument('-r', '--retire', metavar='R', type=int, default=0,
help='retirement batch, number of iterations.')
group.add_argument('-g', '--gem', metavar='G', type=int, default=0,
help='GEM-SA batch for sensitivity analysis, number of iterations.')
group.add_argument('-l', '--lptau', metavar='L', type=int, default=0,
help='sensitivity analysis batch with LPtau sampling.')
group.add_argument('-m', '--maximin', metavar='M', type=int, default=0,
help='sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-b', '--bigly', metavar='B', type=int, default=0,
help='bigly sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-t', '--tenparams', metavar='T', type=int, default=0,
help='10 parameter sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-c', '--recurrent', metavar='C', type=int, default=0,
help='10 parameter time-series run for RNN.')
args = parser.parse_args()
print("~ Filename: {}".format(args.file))
print("~ Number: {}".format(args.num))
print("~ Retire: {}".format(args.retire))
print("~ GEM-SA: {}".format(args.gem))
print("~ LPtau: {}".format(args.lptau))
print("~ Maximin: {}".format(args.maximin))
print("~ Big SA: {}".format(args.bigly))
print("~ Ten Params: {}".format(args.tenparams))
print("~ Ten Params RNN: {}".format(args.recurrent))
if args.file:
#agingParentList = json.load(retireList, parse_float=decimal.Decimal)
res = loadParamFile (args.file, dict)
print ("p = {}".format(dict))
basicRun(dict)
elif args.num >= 1:
batchRun(args.num)
elif args.retire:
p['ageingParentList'] = []
res = loadParamFile('retire.json', dict)
print("List = {}".format(dict))
retireRun(args.retire)
elif args.gem:
p['ageingParentList'] = []
p['careProbList'] = []
p['retiredHoursList'] = []
p['retiredAgeList'] = []
res = loadParamFile('gem.json', dict)
print("List = {}".format(dict))
gemRun(args.gem)
elif args.lptau:
sim_array = np.genfromtxt('lptau-4params.txt', delimiter=' ')
sim_list = list(sim_array.T)
# print(sim_list)
ageingParentSettings = sim_list[0]
careProbSettings = sim_list[1]
retiredHoursSettings = sim_list[2]
retiredAgeSettings = sim_list[3]
# print(ageingParentSettings)
# print(careProbSettings)
# print(retiredHoursSettings)
# print(retiredAgeSettings)
sensitivityRun('LPtau', ageingParentSettings, careProbSettings, retiredHoursSettings, retiredAgeSettings, args.lptau)
elif args.maximin:
sim_array = np.genfromtxt('latinhypercube-4params.txt', delimiter=' ')
sim_list = list(sim_array.T)
# print(sim_list)
ageingParentSettings = sim_list[0]
careProbSettings = sim_list[1]
retiredHoursSettings = sim_list[2]
retiredAgeSettings = sim_list[3]
# print(ageingParentSettings)
# print(careProbSettings)
# print(retiredHoursSettings)
# print(retiredAgeSettings)
sensitivityRun('Maximin', ageingParentSettings, careProbSettings, retiredHoursSettings, retiredAgeSettings, args.maximin)
elif args.bigly:
sim_array = np.genfromtxt('latinhypercube-22params.txt', delimiter=' ')
sim_list = list(sim_array.T)
#print(sim_list)
np.savetxt('hypercube22_GEMSA_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
sensitivityLarge('hypercube22', sim_list, args.bigly)
elif args.tenparams:
sim_array = np.genfromtxt('LPtau-10params.txt', delimiter=' ')
sim_list = list(sim_array.T)
#print(sim_list)
np.savetxt('lptau10_GEMSA_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
sensitivityTenParams('lptau10', sim_list, args.tenparams)
elif args.recurrent:
sim_array = np.genfromtxt('lptau10round2_GEMSA_inputs.csv', delimiter=',')
sim_list = list(sim_array.T)
print(sim_list)
np.savetxt('lptau10_recurrent_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
RNNOutputScenario('LPtauRNN', sim_list, args.recurrent)
else:
basicRun(p)
return dict
# Load the default values, overwriting and adding to the initial p values
loadParamFile("default.json", p)
# Load values based upon the command line file passed (if any).
loadCommandLine (p)
#print ("p = {}".format(p)) | 40.08453 | 188 | 0.56018 |
c53f83b4724adf9f9dc5fc23447830899cf93a99 | 2,427 | py | Python | mainapp/views.py | MelqonHovhannisyan/weather | 455ce90fd480efb6c05002a53ed478fa4014e84b | [
"MIT"
] | null | null | null | mainapp/views.py | MelqonHovhannisyan/weather | 455ce90fd480efb6c05002a53ed478fa4014e84b | [
"MIT"
] | null | null | null | mainapp/views.py | MelqonHovhannisyan/weather | 455ce90fd480efb6c05002a53ed478fa4014e84b | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from .serializers import WeatherSerializer
import requests
import json
import math
import os
import yaml
from rest_framework.decorators import action
from django.conf import settings
def api_docs(request):
"""
Base API Docs endpoint function for the Swagger
"""
file = open(os.path.join(settings.BASE_DIR, 'api.yaml'), encoding='utf8')
spec = yaml.safe_load(file.read())
return render(request, template_name="swagger_base.html", context={'data': json.dumps(spec)})
| 37.338462 | 180 | 0.562835 |
c53f9f1e1c994d952d8c3879b34114ccaf382fd6 | 5,420 | py | Python | tests/test_backtrack.py | nisaruj/algorithms | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | [
"MIT"
] | 6 | 2018-12-12T09:14:05.000Z | 2019-04-29T22:07:28.000Z | tests/test_backtrack.py | nisaruj/algorithms | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | [
"MIT"
] | null | null | null | tests/test_backtrack.py | nisaruj/algorithms | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | [
"MIT"
] | 1 | 2021-07-16T16:49:35.000Z | 2021-07-16T16:49:35.000Z | from algorithms.backtrack import (
add_operators,
permute,
permute_iter,
anagram,
array_sum_combinations,
unique_array_sum_combinations,
combination_sum,
find_words,
pattern_match,
)
import unittest
from algorithms.backtrack.generate_parenthesis import *
if __name__ == '__main__':
unittest.main()
| 28.983957 | 104 | 0.474908 |
c542862715caa74d2fd3f0e9e9fcab1cbbe24d4a | 284 | py | Python | syncless/wscherry.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | 4 | 2015-11-25T09:06:44.000Z | 2019-12-11T21:35:21.000Z | syncless/wscherry.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | null | null | null | syncless/wscherry.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | 2 | 2015-11-25T09:19:38.000Z | 2016-02-26T03:54:06.000Z | import sys
sys.path.append("/usr/lib/python2.7/site-packages")
import redis
_r = redis.Redis(host='localhost', port=6379, db=0)
import cherrypy
cherrypy.quickstart(Test())
| 17.75 | 51 | 0.661972 |
c544eb603d7c0e4860f104e7e494d3ae3bdfe615 | 538 | py | Python | server.py | celinekeisja/jobmonitorservice | aaf56dd198c1275439a0f5ed27617fb458f715ac | [
"MIT"
] | null | null | null | server.py | celinekeisja/jobmonitorservice | aaf56dd198c1275439a0f5ed27617fb458f715ac | [
"MIT"
] | null | null | null | server.py | celinekeisja/jobmonitorservice | aaf56dd198c1275439a0f5ed27617fb458f715ac | [
"MIT"
] | 1 | 2019-11-11T10:26:42.000Z | 2019-11-11T10:26:42.000Z | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from config import db
import config
app = config.connex_app
app.add_api('swagger.yml')
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
# app.run(host='localhost', port=5000, debug=True) | 20.692308 | 54 | 0.711896 |
c54618a73487992c76ea8d3ae910cd85c832a27e | 4,939 | py | Python | website/addons/figshare/views/config.py | harrismendell/osf.io | e2727b1bb2aaa7de494f941be08cb3e9305ae624 | [
"Apache-2.0"
] | null | null | null | website/addons/figshare/views/config.py | harrismendell/osf.io | e2727b1bb2aaa7de494f941be08cb3e9305ae624 | [
"Apache-2.0"
] | null | null | null | website/addons/figshare/views/config.py | harrismendell/osf.io | e2727b1bb2aaa7de494f941be08cb3e9305ae624 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from website.util import web_url_for
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_valid_project
)
from ..api import Figshare
from ..utils import options_to_hgrid
###### AJAX Config
def serialize_settings(node_settings, current_user, client=None):
"""View helper that returns a dictionary representation of a
FigshareNodeSettings record. Provides the return value for the
figshare config endpoints.
"""
current_user_settings = current_user.get_addon('figshare')
user_settings = node_settings.user_settings
user_has_auth = current_user_settings is not None and current_user_settings.has_auth
user_is_owner = user_settings is not None and (
user_settings.owner._primary_key == current_user._primary_key
)
valid_credentials = True
if user_settings:
client = client or Figshare.from_settings(user_settings)
articles, status = client.articles(node_settings)
if status == 401:
valid_credentials = False
result = {
'nodeHasAuth': node_settings.has_auth,
'userHasAuth': user_has_auth,
'userIsOwner': user_is_owner,
'urls': serialize_urls(node_settings),
'validCredentials': valid_credentials,
}
if node_settings.has_auth:
# Add owner's profile URL
result['urls']['owner'] = web_url_for('profile_view_id',
uid=user_settings.owner._primary_key)
result['ownerName'] = user_settings.owner.fullname
# Show available projects
linked = node_settings.linked_content or {'id': None, 'type': None, 'title': None}
result['linked'] = linked
return result
def serialize_urls(node_settings):
node = node_settings.owner
urls = {
'config': node.api_url_for('figshare_config_put'),
'deauthorize': node.api_url_for('figshare_deauthorize'),
'auth': node.api_url_for('figshare_oauth_start'),
'importAuth': node.api_url_for('figshare_import_user_auth'),
'options': node.api_url_for('figshare_get_options'),
'folders': node.api_url_for('figshare_get_options'),
'files': node.web_url_for('collect_file_trees'),
# Endpoint for fetching only folders (including root)
'contents': node.api_url_for('figshare_hgrid_data_contents'),
'settings': web_url_for('user_addons')
}
return urls
| 33.371622 | 90 | 0.692853 |
c546f1f9e36c1fc60824e9adb3e2de4e63364611 | 2,290 | py | Python | orquesta/utils/dictionary.py | igcherkaev/orquesta | 2baa66d33f53cb04b660b3ce284a52d478ecc528 | [
"Apache-2.0"
] | 85 | 2018-07-26T04:29:49.000Z | 2022-03-31T10:47:50.000Z | orquesta/utils/dictionary.py | igcherkaev/orquesta | 2baa66d33f53cb04b660b3ce284a52d478ecc528 | [
"Apache-2.0"
] | 149 | 2018-07-27T22:36:45.000Z | 2022-03-31T10:54:32.000Z | orquesta/utils/dictionary.py | igcherkaev/orquesta | 2baa66d33f53cb04b660b3ce284a52d478ecc528 | [
"Apache-2.0"
] | 24 | 2018-08-07T13:37:41.000Z | 2021-12-16T18:12:43.000Z | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
| 27.590361 | 78 | 0.609607 |
c549524dfb308c9a530339a9a6c6add82b8d8653 | 9,114 | py | Python | examples/twisted/websocket/auth_persona/server.py | rapyuta-robotics/autobahn-python | c08e9e352d526a7fd0885bb94706366a432ada1a | [
"MIT"
] | 1,670 | 2015-10-12T15:46:22.000Z | 2022-03-30T22:12:53.000Z | examples/twisted/websocket/auth_persona/server.py | rapyuta-robotics/autobahn-python | c08e9e352d526a7fd0885bb94706366a432ada1a | [
"MIT"
] | 852 | 2015-10-16T22:11:03.000Z | 2022-03-27T07:57:01.000Z | examples/twisted/websocket/auth_persona/server.py | rapyuta-robotics/autobahn-python | c08e9e352d526a7fd0885bb94706366a432ada1a | [
"MIT"
] | 790 | 2015-10-15T08:46:12.000Z | 2022-03-30T12:22:13.000Z | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
import json
import urllib
import Cookie
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
import autobahn
from autobahn.util import newid, utcnow
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
if __name__ == '__main__':
log.startLogging(sys.stdout)
print("Running Autobahn|Python {}".format(autobahn.version))
# our WebSocket server factory
factory = PersonaServerFactory("ws://127.0.0.1:8080")
# we serve static files under "/" ..
root = File(".")
# .. and our WebSocket server under "/ws" (note that Twisted uses
# bytes for URIs)
resource = WebSocketResource(factory)
root.putChild(b"ws", resource)
# run both under one Twisted Web Site
site = Site(root)
site.log = lambda _: None # disable any logging
reactor.listenTCP(8080, site)
reactor.run()
| 36.456 | 115 | 0.585473 |
c54a392610a02b36eccf6f7a462a2e02a2aa190a | 1,681 | py | Python | src/ggrc_risks/models/risk.py | Killswitchz/ggrc-core | 2460df94daf66727af248ad821462692917c97a9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc_risks/models/risk.py | Killswitchz/ggrc-core | 2460df94daf66727af248ad821462692917c97a9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc_risks/models/risk.py | Killswitchz/ggrc-core | 2460df94daf66727af248ad821462692917c97a9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from sqlalchemy.ext.declarative import declared_attr
from ggrc import db
from ggrc.access_control.roleable import Roleable
from ggrc.fulltext.mixin import Indexed
from ggrc.models.associationproxy import association_proxy
from ggrc.models import mixins
from ggrc.models.deferred import deferred
from ggrc.models.object_document import PublicDocumentable
from ggrc.models.object_person import Personable
from ggrc.models import reflection
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState
| 33.62 | 78 | 0.732302 |
c54a493288773ae7775619c9f9c08446cac8b3d2 | 1,191 | py | Python | booknlp/common/calc_coref_metrics.py | ishine/booknlp | 2b42ccd40dc2c62097308398d4e08f91ecab4177 | [
"MIT"
] | 539 | 2021-11-22T16:29:40.000Z | 2022-03-30T17:50:58.000Z | booknlp/common/calc_coref_metrics.py | gxxu-ml/booknlp | 2b42ccd40dc2c62097308398d4e08f91ecab4177 | [
"MIT"
] | 6 | 2021-12-12T18:21:49.000Z | 2022-03-30T20:51:40.000Z | booknlp/common/calc_coref_metrics.py | gxxu-ml/booknlp | 2b42ccd40dc2c62097308398d4e08f91ecab4177 | [
"MIT"
] | 44 | 2021-11-22T07:22:50.000Z | 2022-03-25T20:02:26.000Z | import subprocess, re, sys
if __name__ == "__main__":
goldFile=sys.argv[1]
predFile=sys.argv[2]
scorer=sys.argv[3]
bcub_f, avg=get_conll(scorer, gold=goldFile, preds=predFile)
| 31.342105 | 102 | 0.686818 |
c54b7ed70bd070a66a466b8ee7706f4673635759 | 16,878 | py | Python | apps/core/test.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | apps/core/test.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | apps/core/test.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
from tornado.testing import AsyncTestCase
from apps.core.models import (ModelBase,
_get_master_engine,
_get_slave_engine)
from tornado.options import options
from apps.core.urlutils import urlpattens
from apps.auth.views import LoginHandler
from apps.views import IndexHandler
from apps.core.datastruct import QueryDict, lru_cache
from simplejson import loads
from tornado.testing import AsyncHTTPTestCase, gen_test
from apps.core.httpclient import (RESTfulAsyncClient, SessionClient)
from apps.core.crypto import get_random_string
from tornado.web import URLSpec
import re
from tornado.web import Application
from apps.core.cache.base import CacheBase, cache as cache_proxy
from tornado.gen import sleep
from mock import patch
from apps.core.timezone import now
from concurrent.futures import ThreadPoolExecutor
import thread
#
options.testing = True # mockbase_url
| 33.159136 | 77 | 0.583185 |
c54bfc8137e477b8a93b0291e14e014c3954ee65 | 622 | py | Python | docker/aws/update_event_mapping.py | uk-gov-mirror/nationalarchives.tdr-jenkins | 1bcbee009d4384a777247039d44b2790eba34caa | [
"MIT"
] | null | null | null | docker/aws/update_event_mapping.py | uk-gov-mirror/nationalarchives.tdr-jenkins | 1bcbee009d4384a777247039d44b2790eba34caa | [
"MIT"
] | 34 | 2020-02-03T14:20:42.000Z | 2022-01-26T09:22:09.000Z | docker/aws/update_event_mapping.py | uk-gov-mirror/nationalarchives.tdr-jenkins | 1bcbee009d4384a777247039d44b2790eba34caa | [
"MIT"
] | 1 | 2021-04-11T07:11:53.000Z | 2021-04-11T07:11:53.000Z | import sys
from sessions import get_session
account_number = sys.argv[1]
stage = sys.argv[2]
function_name = sys.argv[3]
version = sys.argv[4]
function_arn = f'arn:aws:lambda:eu-west-2:{account_number}:function:{function_name}'
boto_session = get_session(account_number, "TDRJenkinsLambdaRole" + stage.capitalize())
client = boto_session.client("lambda")
event_mappings = client.list_event_source_mappings()['EventSourceMappings']
uuid = list(filter(lambda x: x['FunctionArn'].startswith(function_arn), event_mappings))[0]['UUID']
client.update_event_source_mapping(UUID=uuid, FunctionName=function_arn + ":" + version) | 41.466667 | 99 | 0.786174 |
c54c0437171dca7cbeb276eabca7979dd5dce208 | 2,202 | py | Python | src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | b70a2f692ccae948576177560e3628b9dece5aee | [
"MIT"
] | 653 | 2015-06-07T14:45:40.000Z | 2022-03-25T17:31:58.000Z | src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | b70a2f692ccae948576177560e3628b9dece5aee | [
"MIT"
] | 64 | 2017-10-29T10:53:37.000Z | 2022-03-14T23:49:18.000Z | src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | b70a2f692ccae948576177560e3628b9dece5aee | [
"MIT"
] | 224 | 2015-06-07T14:46:00.000Z | 2022-03-25T17:36:46.000Z | # rvore Huffman
# Funo utilitria para imprimir
# cdigos huffman para todos os smbolos
# na nova rvore huffman que sera criada
def printNodes(node, val=''):
# cdigo huffman para o n atual
newVal = val + str(node.huff)
# se o n no pertence ponta da
# rvore ento caminha dentro do mesmo
# at a ponta
if(node.left):
printNodes(node.left, newVal)
if(node.right):
printNodes(node.right, newVal)
# Se o n estiver na ponta da rore
# ento exibe o cdigo huffman
if(not node.left and not node.right):
print(f"{node.symbol} -> {newVal}")
# caracteres para rvore huffman
chars = ['a', 'b', 'c', 'd', 'e', 'f']
# frequncia dos caracteres
freq = [5, 9, 12, 13, 16, 45]
# lista contendo os ns no utilizados
nodes = []
if __name__ == '__main__':
# convertendo caracteres e frequncia em
# ns da rvore huffman
for x in range(len(chars)):
nodes.append(node(freq[x], chars[x]))
while len(nodes) > 1:
# Ordena todos os ns de forma ascendente
# baseado em sua frequncia
nodes = sorted(nodes, key=lambda x: x.freq)
# Seleciona os dois ns menores
left = nodes[0]
right = nodes[1]
# Atribui um valor direcional estes ns
# (direita ou esquerda)
left.huff = 0
right.huff = 1
# Combina os 2 ns menores para um novo n pai
# para eles.
newNode = node(
left.freq +
right.freq,
left.symbol +
right.symbol,
left,
right)
# remove os 2 ns e adiciona o n pai
# como um novo s sobre os outros
nodes.remove(left)
nodes.remove(right)
nodes.append(newNode)
# rvore Huffman pronta!
printNodes(nodes[0])
| 24.741573 | 60 | 0.584469 |
c54db6fb5167c6cfc8f323c48a3a8c66fab835af | 8,927 | py | Python | optiga.py | boraozgen/personalize-optiga-trust | 2a158d9fb6cba2bfabce8f5eecb38bc7b81f5bc8 | [
"MIT"
] | 6 | 2019-09-27T13:16:29.000Z | 2021-04-19T22:00:49.000Z | optiga.py | boraozgen/personalize-optiga-trust | 2a158d9fb6cba2bfabce8f5eecb38bc7b81f5bc8 | [
"MIT"
] | 2 | 2020-07-10T12:40:59.000Z | 2020-08-13T09:26:15.000Z | optiga.py | boraozgen/personalize-optiga-trust | 2a158d9fb6cba2bfabce8f5eecb38bc7b81f5bc8 | [
"MIT"
] | 7 | 2019-08-23T09:20:52.000Z | 2021-06-14T15:01:14.000Z | import argparse
import json
import base64
import hashlib
import sys
import binascii
from optigatrust.util.types import *
from optigatrust.pk import *
from optigatrust.x509 import *
private_key_slot_map = {
'second': KeyId.ECC_KEY_E0F1,
'0xE0E1': KeyId.ECC_KEY_E0F1,
'0xE0F1': KeyId.ECC_KEY_E0F1,
'third': KeyId.ECC_KEY_E0F2,
'0xE0E2': KeyId.ECC_KEY_E0F2,
'0xE0F2': KeyId.ECC_KEY_E0F2,
'fourth': KeyId.ECC_KEY_E0F3,
'0xE0E3': KeyId.ECC_KEY_E0F3,
'0xE0F3': KeyId.ECC_KEY_E0F3,
'five': KeyId.RSA_KEY_E0FC,
'0xE0FC': KeyId.RSA_KEY_E0FC,
'six': KeyId.RSA_KEY_E0FD,
'0xE0FD': KeyId.RSA_KEY_E0FD
}
certificate_slot_map = {
'second': ObjectId.USER_CERT_1,
'0xE0E1': ObjectId.USER_CERT_1,
'0xE0F1': ObjectId.USER_CERT_1,
'third': ObjectId.USER_CERT_2,
'0xE0E2': ObjectId.USER_CERT_2,
'0xE0F2': ObjectId.USER_CERT_2,
'fourth': ObjectId.USER_CERT_3,
'0xE0E3': ObjectId.USER_CERT_3,
'0xE0F3': ObjectId.USER_CERT_3,
'0xE0E8': ObjectId.TRUST_ANCHOR_1,
'0xE0EF': ObjectId.TRUST_ANCHOR_2
}
object_slot_map = {
'0xf1d0': ObjectId.DATA_TYPE1_0,
'0xf1d1': ObjectId.DATA_TYPE1_1,
'0xf1d2': ObjectId.DATA_TYPE1_2,
'0xf1d3': ObjectId.DATA_TYPE1_3,
'0xf1d4': ObjectId.DATA_TYPE1_4,
'0xf1d5': ObjectId.DATA_TYPE1_5,
'0xf1d6': ObjectId.DATA_TYPE1_6,
'0xf1d7': ObjectId.DATA_TYPE1_7,
'0xf1d8': ObjectId.DATA_TYPE1_8,
'0xf1d9': ObjectId.DATA_TYPE1_9,
'0xf1da': ObjectId.DATA_TYPE1_A,
'0xf1db': ObjectId.DATA_TYPE1_B,
'0xf1dc': ObjectId.DATA_TYPE1_C,
'0xf1dd': ObjectId.DATA_TYPE1_D,
'0xf1de': ObjectId.DATA_TYPE1_E,
'0xf1e0': ObjectId.DATA_TYPE2_0,
'0xf1e1': ObjectId.DATA_TYPE2_1
}
allowed_object_ids = [
# Certificate Slots
'0xe0e0', '0xe0e1', '0xe0e2', '0xe0e3',
# Trust Anchor Slots
'0xe0e8', '0xe0ef',
# Arbitrary Data Objects
'0xf1d0', '0xf1d1', '0xf1d2', '0xf1d3', '0xf1d4', '0xf1d5', '0xf1d6', '0xf1d7',
'0xf1d8', '0xf1d9', '0xf1da', '0xf1db', '0xf1dc', '0xf1dd', '0xf1de',
'0xf1e0', '0xf1e1'
]
'''
#################################################################################################################
'''
parser = argparse.ArgumentParser(description="Communicate with your OPTIGA(TM) Trust sample")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("--query", nargs=1, metavar='QUERY_ARGUMENT',
help="Define the query argument you want to extract from the output")
parser.add_argument("--csr", metavar='CONFIG_FILE',
help="Instructs the script to generate a Certificate Signing Request."
"Give the script the configuration file for your CSR (fields like Common Name, "
"AWS IoT Thing Name, etc)")
parser.add_argument("--write", metavar='DATA_TO_WRITE', help="Write provided data to the chip.")
parser.add_argument("--read",
metavar='OBJECT_ID',
choices=allowed_object_ids,
help="Certificate Slots: 0xe0e0-0xe0e3\n"
"Trust Anchor slots: 0xe0e8 and 0xe0ef\n"
"100 bytes: 0xf1d0-0xf1de\n"
"1500 bytes: 0xf1e0, 0xf1e1")
parser.add_argument("--slot",
choices=[
# They all mean the same
'second', '0xe0e1', '0xe0f1',
'third', '0xe0e2', '0xe0f2',
'fourth', '0xe0e3', '0xe0f3',
'five', '0xe0fc', 'six', '0xe0fd',
'0xE0E8', '0xE0EF'
],
help="Use one the predefined slots; e.g. second, 0xe0e1, or 0xe0f1, they all mean the same")
parser.add_argument("--id",
metavar='OBJECT_ID',
choices=allowed_object_ids,
help="USe to define which ID to use with your write command \n"
"Certificate Slots: 0xe0e0-0xe0e3\n"
"Trust Anchor slots: 0xe0e8 and 0xe0ef\n"
"100 bytes: 0xf1d0-0xf1de\n"
"1500 bytes: 0xf1e0, 0xf1e1")
args = parser.parse_args()
if args.csr:
parse_csr(args)
sys.exit(0)
if args.write:
parse_write(args)
sys.exit(0)
else:
parser.print_help()
sys.exit(0)
| 39.325991 | 121 | 0.596729 |
c54de03fd28e53eb54540b034a2e8a1f2994146a | 3,532 | py | Python | graph_test.py | MathewMacDougall/Two-Faced-Type | 53fae81a151fd0689ac7328dda6b3e984c9a42e9 | [
"MIT"
] | null | null | null | graph_test.py | MathewMacDougall/Two-Faced-Type | 53fae81a151fd0689ac7328dda6b3e984c9a42e9 | [
"MIT"
] | 25 | 2020-11-15T05:30:23.000Z | 2020-12-12T22:03:35.000Z | graph_test.py | MathewMacDougall/Two-Faced-Type | 53fae81a151fd0689ac7328dda6b3e984c9a42e9 | [
"MIT"
] | null | null | null | import unittest
from graph import Graph
if __name__ == '__main__':
unittest.main()
| 34.627451 | 72 | 0.610136 |
c5508e61b45a9bd59041d4ba0c8bea652aa09b89 | 2,033 | py | Python | cfnbootstrap/construction_errors.py | roberthutto/aws-cfn-bootstrap | 801a16802a931fa4dae0eba4898fe1ccdb304924 | [
"Apache-2.0"
] | null | null | null | cfnbootstrap/construction_errors.py | roberthutto/aws-cfn-bootstrap | 801a16802a931fa4dae0eba4898fe1ccdb304924 | [
"Apache-2.0"
] | null | null | null | cfnbootstrap/construction_errors.py | roberthutto/aws-cfn-bootstrap | 801a16802a931fa4dae0eba4898fe1ccdb304924 | [
"Apache-2.0"
] | 3 | 2017-02-10T13:14:38.000Z | 2018-09-20T01:04:20.000Z | #==============================================================================
# Copyright 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
| 28.236111 | 79 | 0.619774 |
c5514806a6a0a0953948700c69152edf438355ea | 2,798 | py | Python | tests/test_variable.py | snc2/tequila | 6767ced9215408f7d055c22df7a66ccd610b00fb | [
"MIT"
] | 1 | 2021-01-11T18:40:47.000Z | 2021-01-11T18:40:47.000Z | tests/test_variable.py | snc2/tequila | 6767ced9215408f7d055c22df7a66ccd610b00fb | [
"MIT"
] | 1 | 2020-05-08T13:34:33.000Z | 2021-12-06T06:12:37.000Z | tests/test_variable.py | snc2/tequila | 6767ced9215408f7d055c22df7a66ccd610b00fb | [
"MIT"
] | null | null | null | import pytest
from tequila import numpy as np
from tequila.circuit.gradient import grad
from tequila.objective.objective import Objective, Variable
import operator
| 39.408451 | 260 | 0.550751 |
c55176ac699f36bb549a798358fd9868f0da10c3 | 7,649 | py | Python | getnear/tseries.py | edwardspeyer/getnear | 746f3cedc1aed6166423f54d32e208017f660b38 | [
"MIT"
] | null | null | null | getnear/tseries.py | edwardspeyer/getnear | 746f3cedc1aed6166423f54d32e208017f660b38 | [
"MIT"
] | null | null | null | getnear/tseries.py | edwardspeyer/getnear | 746f3cedc1aed6166423f54d32e208017f660b38 | [
"MIT"
] | null | null | null | from getnear.config import Tagged, Untagged, Ignore
from getnear.logging import info
from lxml import etree
import re
import requests
import telnetlib
| 34.61086 | 92 | 0.552098 |
c5524c8d02f3aef3cff31c032990bb8d482aaf1e | 16,945 | py | Python | Tests/subset/svg_test.py | ThomasRettig/fonttools | 629f44b8cc4ed768088b952c9e600190685a90fc | [
"Apache-2.0",
"MIT"
] | 2,705 | 2016-09-27T10:02:12.000Z | 2022-03-31T09:37:46.000Z | Tests/subset/svg_test.py | ThomasRettig/fonttools | 629f44b8cc4ed768088b952c9e600190685a90fc | [
"Apache-2.0",
"MIT"
] | 1,599 | 2016-09-27T09:07:36.000Z | 2022-03-31T23:04:51.000Z | Tests/subset/svg_test.py | ThomasRettig/fonttools | 629f44b8cc4ed768088b952c9e600190685a90fc | [
"Apache-2.0",
"MIT"
] | 352 | 2016-10-07T04:18:15.000Z | 2022-03-30T07:35:01.000Z | from string import ascii_letters
import textwrap
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.subset.svg import NAMESPACES, ranges
import pytest
etree = pytest.importorskip("lxml.etree")
# This contains a bunch of cross-references between glyphs, paths, gradients, etc.
# Note the path coordinates are completely made up and not meant to be rendered.
# We only care about the tree structure, not it's visual content.
COMPLEX_SVG = """\
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
<path id="p1" d="M3,3"/>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph3">
<use xlink:href="#p1"/>
</g>
<use id="glyph4" xlink:href="#glyph1" x="10"/>
<use id="glyph5" xlink:href="#glyph2" y="-10"/>
<g id="glyph6">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
<g id="group1">
<g id="glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph7">
<path d="M4,4"/>
</g>
<g id="glyph8">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
<path id="M6,6"/>
</g>
<path d="M7,7"/>
</g>
<g id="glyph9">
<use xlink:href="#p2"/>
</g>
<g id="glyph10">
<use xlink:href="#p3"/>
</g>
</g>
<g id="glyph11">
<path d="M7,7" fill="url(#rg4)"/>
</g>
<g id="glyph12">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
"""
| 34.652352 | 110 | 0.477781 |
c552f157bcec716a7f87d20bd21cf1b7b813d8da | 211 | py | Python | models/dl-weights.py | diegoinacio/object-detection-flask-opencv | bc012e884138e9ead04115b8550e833bed134074 | [
"MIT"
] | 16 | 2020-03-01T07:35:35.000Z | 2022-02-01T16:34:24.000Z | models/dl-weights.py | girish008/Real-Time-Object-Detection-Using-YOLOv3-OpenCV | 6af4c550f6128768b646f5923af87c2f654cd1bd | [
"MIT"
] | 6 | 2020-02-13T12:50:24.000Z | 2022-02-02T03:22:30.000Z | models/dl-weights.py | girish008/Real-Time-Object-Detection-Using-YOLOv3-OpenCV | 6af4c550f6128768b646f5923af87c2f654cd1bd | [
"MIT"
] | 8 | 2020-06-22T10:23:58.000Z | 2022-01-14T21:17:50.000Z | """
This script downloads the weight file
"""
import requests
URL = "https://pjreddie.com/media/files/yolov3.weights"
r = requests.get(URL, allow_redirects=True)
open('yolov3_t.weights', 'wb').write(r.content)
| 23.444444 | 55 | 0.739336 |
c55412d74acd62e5e8c97c0f510ea4a9a80e5595 | 1,786 | py | Python | utim-esp32/modules/utim/utilities/process_device.py | connax-utim/utim-micropython | 23c30f134af701a44a8736b09c8c201e13760d18 | [
"Apache-2.0"
] | null | null | null | utim-esp32/modules/utim/utilities/process_device.py | connax-utim/utim-micropython | 23c30f134af701a44a8736b09c8c201e13760d18 | [
"Apache-2.0"
] | null | null | null | utim-esp32/modules/utim/utilities/process_device.py | connax-utim/utim-micropython | 23c30f134af701a44a8736b09c8c201e13760d18 | [
"Apache-2.0"
] | null | null | null | """
Subprocessor for device messages
"""
import logging
from ..utilities.tag import Tag
from ..workers import device_worker_forward
from ..workers import device_worker_startup
from ..utilities.address import Address
from ..utilities.status import Status
from ..utilities.data_indexes import SubprocessorIndex
_SubprocessorIndex = SubprocessorIndex()
logger = logging.Logger('utilities.process_device')
| 33.074074 | 80 | 0.663494 |
c556608e317003e7eff23a5318cc565b380cac29 | 174 | py | Python | TrainAndTest/Fbank/LSTMs/__init__.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 8 | 2020-08-26T13:32:56.000Z | 2022-01-18T21:05:46.000Z | TrainAndTest/Fbank/LSTMs/__init__.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 1 | 2020-07-24T17:06:16.000Z | 2020-07-24T17:06:16.000Z | TrainAndTest/Fbank/LSTMs/__init__.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 5 | 2020-12-11T03:31:15.000Z | 2021-11-23T15:57:55.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: __init__.py.py
@Time: 2020/3/27 10:43 AM
@Overview:
"""
| 14.5 | 26 | 0.689655 |
c556a7e2e7f0a44508e2fef82666c7378cbf88cf | 226 | py | Python | ninja/security/__init__.py | lsaavedr/django-ninja | caa182007368bb0fed85b184fb0583370e9589b4 | [
"MIT"
] | null | null | null | ninja/security/__init__.py | lsaavedr/django-ninja | caa182007368bb0fed85b184fb0583370e9589b4 | [
"MIT"
] | null | null | null | ninja/security/__init__.py | lsaavedr/django-ninja | caa182007368bb0fed85b184fb0583370e9589b4 | [
"MIT"
] | null | null | null | from ninja.security.apikey import APIKeyQuery, APIKeyCookie, APIKeyHeader
from ninja.security.http import HttpBearer, HttpBasicAuth
| 28.25 | 73 | 0.800885 |
c5596189b90b1ffb040eef3bc2ba25c968d94c71 | 1,160 | py | Python | migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 1 | 2018-01-01T20:35:43.000Z | 2018-01-01T20:35:43.000Z | migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 37 | 2017-02-24T17:07:25.000Z | 2021-09-02T14:49:19.000Z | migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 2 | 2019-11-05T13:45:45.000Z | 2020-11-13T22:02:37.000Z | """Add attribute to handle multiline information
Revision ID: 66d4be40bced
Revises: 6a809295d586
Create Date: 2018-05-16 12:13:32.023450
"""
import sqlalchemy as sa
from alembic import op
from limonero.migration_utils import is_sqlite
# revision identifiers, used by Alembic.
revision = '66d4be40bced'
down_revision = '6a809295d586'
branch_labels = None
depends_on = None
| 29.74359 | 108 | 0.668103 |
c559db70f1ddb6f54d717326e423cfde57c7f2af | 247 | py | Python | config.py | kxxoling/horus | a3c4b6c40a1064fffa595976f10358178dd65367 | [
"MIT"
] | null | null | null | config.py | kxxoling/horus | a3c4b6c40a1064fffa595976f10358178dd65367 | [
"MIT"
] | null | null | null | config.py | kxxoling/horus | a3c4b6c40a1064fffa595976f10358178dd65367 | [
"MIT"
] | null | null | null | import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
SQLITE = 'db.sqlite3'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, SQLITE) + '?check_same_thread=False'
| 22.454545 | 100 | 0.740891 |
c55a4b523b8ba2e29366bb76e2448cbced42c61f | 4,372 | py | Python | tests/test_lp_problem.py | LovisAnderson/flipy | bde898e46e34cdfba39cecb75586fa3f4d816520 | [
"Apache-2.0"
] | null | null | null | tests/test_lp_problem.py | LovisAnderson/flipy | bde898e46e34cdfba39cecb75586fa3f4d816520 | [
"Apache-2.0"
] | null | null | null | tests/test_lp_problem.py | LovisAnderson/flipy | bde898e46e34cdfba39cecb75586fa3f4d816520 | [
"Apache-2.0"
] | null | null | null | import pytest
from flipy.lp_problem import LpProblem
from flipy.lp_objective import LpObjective, Maximize
from flipy.lp_variable import LpVariable
from flipy.lp_expression import LpExpression
from flipy.lp_constraint import LpConstraint
from io import StringIO
| 41.638095 | 205 | 0.665599 |
c55a6c83c0c4deda47ef169a2a79ced739a7f4c8 | 106 | py | Python | src/invoice_medicine/apps.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | src/invoice_medicine/apps.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | src/invoice_medicine/apps.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | from django.apps import AppConfig
| 17.666667 | 39 | 0.792453 |
c55b87c8df2b77ae553d466bad5d103ac2336d62 | 5,893 | py | Python | tests/components/tectonics/test_listric_kinematic_extender.py | amanaster2/landlab | ea17f8314eb12e3fc76df66c9b6ff32078caa75c | [
"MIT"
] | 257 | 2015-01-13T16:01:21.000Z | 2022-03-29T22:37:43.000Z | tests/components/tectonics/test_listric_kinematic_extender.py | amanaster2/landlab | ea17f8314eb12e3fc76df66c9b6ff32078caa75c | [
"MIT"
] | 1,222 | 2015-02-05T21:36:53.000Z | 2022-03-31T17:53:49.000Z | tests/components/tectonics/test_listric_kinematic_extender.py | amanaster2/landlab | ea17f8314eb12e3fc76df66c9b6ff32078caa75c | [
"MIT"
] | 274 | 2015-02-11T19:56:08.000Z | 2022-03-28T23:31:07.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 08:42:24 2021
@author: gtucker
"""
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_raises
from landlab import HexModelGrid, RadialModelGrid, RasterModelGrid
from landlab.components import Flexure, ListricKinematicExtender
def test_hangingwall_nodes():
"""Test the correct identification of hangingwall nodes."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(grid, fault_location=2500.0)
assert_array_equal(
extender._hangwall, [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20]
)
def test_subsidence_and_horiz_shift():
"""Test that elev subsides then shifts after 2 time steps."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
topo = grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(
grid, extension_rate=0.01, fault_location=2500.0
)
# Run long enough to extend by half a grid cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -1404.156819, -910.66907, -590.616478, -383.045648, -248.425118],
)
# Now extend another half cell, so cumulative extension is one cell and
# elevations should get shifted by one cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -3514.477461, -2808.313638, -1821.338140, -1181.232956, -766.091296],
)
# Another step, and this time the hangingwall edge has moved by one cell,
# so the first 3 cells in this row should not further subside.
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[
0.0,
0.0,
-3514.477461,
-3718.982708,
-2411.954617,
-1564.278603,
-1014.516414,
],
)
def test_with_flexure():
"""Test integrating with flexure."""
crust_density = 2700.0 # density of crustal column, kg/m3
dx = 2500.0 # grid spacing, m
dt = 125000.0 # time step, y
upper_crust_base_depth = 10000.0 # m
grid = RasterModelGrid((3, 7), xy_spacing=dx)
topo = grid.add_zeros("topographic__elevation", at="node")
load = grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
thickness = grid.add_zeros("upper_crust_thickness", at="node")
upper_crust_base = grid.add_zeros("upper_crust_base__elevation", at="node")
extender = ListricKinematicExtender(
grid,
extension_rate=0.01,
fault_location=2500.0,
track_crustal_thickness=True,
)
flexer = Flexure(grid, eet=5000.0, method="flexure")
deflection = grid.at_node["lithosphere_surface__elevation_increment"]
topo[
grid.x_of_node <= 7500.0
] = 1000.0 # this will force thickness to be 1 km greater at left
upper_crust_base[:] = -upper_crust_base_depth
thickness[:] = topo - upper_crust_base
unit_wt = crust_density * flexer.gravity
load[:] = unit_wt * thickness # loading pressure
# Get the initial deflection, which we'll need to calculate total current
# deflection
flexer.update()
init_deflection = deflection.copy()
# Run extension for half a grid cell. Elevations change, but thickness
# doesn't, so deflection should not change. We should be able to recover
# elevation from:
#
# topo = thickness + crust base - (deflection + subsidence)
#
extender.run_one_step(dt=dt)
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
net_deflection[7:14],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
)
test_topo = thickness + upper_crust_base - (net_deflection + extender._cum_subs)
assert_array_almost_equal(topo, test_topo)
# Now extend for another half cell, which should force a shift. The
# cumulative subsidence will be subtracted from the thickness field,
# representing thinning as the hangingwall slides to the "right". This
# will cause net upward isostatic deflection.
extender.run_one_step(dt=dt)
load[:] = unit_wt * thickness
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
thickness[7:14],
[
11000.0,
11000.0,
8191.686362, # greatest subsidence: lost nearly 3 km
9178.66186,
9818.767044, # thicker because shifted (only lost <200 m)
9233.908704,
9503.149763,
],
)
assert_array_almost_equal(
net_deflection[7:14],
[
-59.497362,
-65.176276,
-69.222531,
-70.334462,
-68.608952,
-64.912352,
-59.743080,
],
)
| 33.293785 | 88 | 0.659426 |
c55c22b2cd2bfee50cd66031731dbde75ccb7354 | 22,485 | py | Python | unbalanced_dataset/under_sampling.py | designer357/IGBB | 89a60ec38fa9dab54175c24c347ee43232825504 | [
"MIT"
] | 1 | 2021-08-20T17:14:28.000Z | 2021-08-20T17:14:28.000Z | unbalanced_dataset/under_sampling.py | designer357/IGBB | 89a60ec38fa9dab54175c24c347ee43232825504 | [
"MIT"
] | null | null | null | unbalanced_dataset/under_sampling.py | designer357/IGBB | 89a60ec38fa9dab54175c24c347ee43232825504 | [
"MIT"
] | 1 | 2018-09-13T23:26:23.000Z | 2018-09-13T23:26:23.000Z | from __future__ import print_function
from __future__ import division
import numpy as np
from numpy import logical_not, ones
from numpy.random import seed, randint
from numpy import concatenate
from random import sample
from collections import Counter
from .unbalanced_dataset import UnbalancedDataset
| 34.806502 | 104 | 0.558417 |
c55c65db8051ee9fdf9ceac3a9490b6f81b381e7 | 931 | py | Python | wikipron/extract/cmn.py | Alireza-Sampour/wikipron | ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d | [
"Apache-2.0"
] | 1 | 2021-08-01T20:31:27.000Z | 2021-08-01T20:31:27.000Z | wikipron/extract/cmn.py | Alireza-Sampour/wikipron | ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d | [
"Apache-2.0"
] | null | null | null | wikipron/extract/cmn.py | Alireza-Sampour/wikipron | ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d | [
"Apache-2.0"
] | null | null | null | """Word and pron extraction for (Mandarin) Chinese."""
import itertools
import typing
import requests
from wikipron.extract.default import yield_pron, IPA_XPATH_SELECTOR
if typing.TYPE_CHECKING:
from wikipron.config import Config
from wikipron.typing import Iterator, Word, Pron, WordPronPair
# Select pron from within this li
_PRON_XPATH_TEMPLATE = """
//div[@class="vsHide"]
//ul
//li[(a[@title="w:Mandarin Chinese"])]
"""
| 25.162162 | 71 | 0.71536 |
c55ca719e407ecd982eeb52d8e27fa9690f85669 | 420 | py | Python | iis/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | iis/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | iis/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.dev.testing import requires_py3
from datadog_checks.iis import IIS
| 26.25 | 76 | 0.797619 |
c560c444067061f2f72e5a0dd18c1c1230d2f961 | 1,174 | py | Python | scripts/utils/param_grid_to_files.py | bagustris/emotion | 5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36 | [
"MIT"
] | 3 | 2020-11-03T14:54:22.000Z | 2021-04-12T12:23:10.000Z | scripts/utils/param_grid_to_files.py | bagustris/emotion | 5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36 | [
"MIT"
] | null | null | null | scripts/utils/param_grid_to_files.py | bagustris/emotion | 5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36 | [
"MIT"
] | 2 | 2020-12-03T06:21:59.000Z | 2021-01-16T04:47:12.000Z | from pathlib import Path
import click
import yaml
from sklearn.model_selection import ParameterGrid
from ertk.utils import PathlibPath, get_arg_mapping
if __name__ == "__main__":
main()
| 32.611111 | 76 | 0.67632 |
c5610fccc549a5c9d69f6c3b166e598fbe0653b9 | 6,172 | py | Python | mangabee_parsers.py | ta-dachi/mangaget | 4ef39df0a6cceb2817d3bd0ad4d8290b8f576341 | [
"MIT"
] | null | null | null | mangabee_parsers.py | ta-dachi/mangaget | 4ef39df0a6cceb2817d3bd0ad4d8290b8f576341 | [
"MIT"
] | null | null | null | mangabee_parsers.py | ta-dachi/mangaget | 4ef39df0a6cceb2817d3bd0ad4d8290b8f576341 | [
"MIT"
] | null | null | null | from html.parser import HTMLParser
| 35.883721 | 124 | 0.545366 |
c5630abd4f13c6d9b9fd911d42b444b3c07c02dd | 1,831 | py | Python | bme280/reader.py | budrom/dht2eleasticsearch | 286974c0f4096ae3fb2f1f700b761051b09c47cf | [
"MIT"
] | null | null | null | bme280/reader.py | budrom/dht2eleasticsearch | 286974c0f4096ae3fb2f1f700b761051b09c47cf | [
"MIT"
] | null | null | null | bme280/reader.py | budrom/dht2eleasticsearch | 286974c0f4096ae3fb2f1f700b761051b09c47cf | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import threading
import sys
from Adafruit_BME280 import *
from datetime import datetime
from elasticsearch import Elasticsearch
def send2es(data):
""" Initiate connection to Elasticsearch and send data as a single document.
data - dictionary/JSON to be sent
"""
i = 'metrics_{}'.format(datetime.now().strftime('%m.%y'))
es.index(index=i, doc_type='measurement', body=data)
if __name__ == "__main__":
print("Script started")
try:
es_host = os.environ['ELASTICSEARCH_URL']
es_user = os.environ['ELASTICSEARCH_USER']
es_pass = os.environ['ELASTICSEARCH_PASSWORD']
es = Elasticsearch(es_host, http_auth=(es_user, es_pass))
except KeyError:
es_host = None
try:
t_compensation = float(os.environ['T_COMPENSATION'])
except KeyError:
t_compensation = 0
sensor = BME280(t_mode=BME280_OSAMPLE_2,
p_mode=BME280_OSAMPLE_8,
h_mode=BME280_OSAMPLE_1,
filter=BME280_FILTER_16,
address=0x76)
threading.Timer(60-float(datetime.utcnow().strftime('%S.%f')), readSensor).start()
print("Waiting for next minute to start loop...")
| 31.033898 | 125 | 0.688695 |
c565a1b2f5a20a17f2045f38225c4233abda30b9 | 456 | py | Python | tests/web/backend_pytest.py | brumar/eel-for-transcrypt | 28cf5e0aa55a3c885b63d79d1ffae1370be644d2 | [
"MIT"
] | 1 | 2019-12-31T13:53:05.000Z | 2019-12-31T13:53:05.000Z | tests/web/backend_pytest.py | brumar/eel-for-transcrypt | 28cf5e0aa55a3c885b63d79d1ffae1370be644d2 | [
"MIT"
] | 1 | 2021-11-15T17:48:03.000Z | 2021-11-15T17:48:03.000Z | tests/web/backend_pytest.py | brumar/eel-for-transcrypt | 28cf5e0aa55a3c885b63d79d1ffae1370be644d2 | [
"MIT"
] | null | null | null | import eel_for_transcrypt as eel
from web.common import InventoryItem
| 16.888889 | 48 | 0.756579 |
c565d028bd9c69f1d86bec597f86ad7c3dad14ce | 6,989 | py | Python | tests/test_polygon.py | tilezen/mapbox-vector-tile | 4e3a65a6f98c317048266260b8e7aac705e31e6f | [
"MIT"
] | 121 | 2016-07-14T00:44:54.000Z | 2022-03-19T00:49:14.000Z | tests/test_polygon.py | tilezen/mapbox-vector-tile | 4e3a65a6f98c317048266260b8e7aac705e31e6f | [
"MIT"
] | 53 | 2016-07-05T14:35:06.000Z | 2021-05-20T22:31:02.000Z | tests/test_polygon.py | tilezen/mapbox-vector-tile | 4e3a65a6f98c317048266260b8e7aac705e31e6f | [
"MIT"
] | 34 | 2016-07-27T23:45:05.000Z | 2022-01-02T20:37:58.000Z | # -*- coding: utf-8 -*-
"""
Tests for vector_tile/polygon.py
"""
import unittest
from mapbox_vector_tile.polygon import make_it_valid
from shapely import wkt
import os
| 34.945 | 78 | 0.529976 |
c567553f0cf12169873a1f6859559b2967a6ea7a | 275 | py | Python | snake_debug.py | xlrobotics/PPOC-balance-bot | 41dae4b2bbfce94ed04841fa9ba122eb57459e5a | [
"MIT"
] | 3 | 2020-11-10T01:45:35.000Z | 2021-09-27T11:39:06.000Z | snake_debug.py | xlrobotics/PPOC-balance-bot | 41dae4b2bbfce94ed04841fa9ba122eb57459e5a | [
"MIT"
] | null | null | null | snake_debug.py | xlrobotics/PPOC-balance-bot | 41dae4b2bbfce94ed04841fa9ba122eb57459e5a | [
"MIT"
] | 2 | 2020-01-25T17:26:33.000Z | 2021-02-16T16:39:38.000Z | import gym
# from stable_baselines import DQN as deepq
from stable_baselines import A2C as ac
from stable_baselines.common.policies import MlpLnLstmPolicy
import snake_bot
if __name__ == '__main__':
env = gym.make("snakebot-v0")
env.debug_mode()
exit(0) | 27.5 | 61 | 0.741818 |
c5675771c49be7e9f7d6d764c6141228f78fdc9d | 2,179 | py | Python | easy/572_subtree_of_another_tree.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | easy/572_subtree_of_another_tree.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | easy/572_subtree_of_another_tree.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | """
Given the roots of two binary trees root and subRoot, return true if there is a subtree of root with the same structure
and node values of subRoot and false otherwise.
A subtree of a binary tree tree is a tree that consists of a node in tree and all of this node's descendants.
The tree tree could also be considered as a subtree of itself.
Example 1:
3 (root)
/\
4 5 4 (subroot)
/ \ / \
1 2 1 2
Input: root = [3,4,5,1,2], subRoot = [4,1,2]
Output: true
Example 2:
3 (root)
/\
4 5 4 (subroot)
/ \ / \
1 2 1 2
/
0
Input: root = [3,4,5,1,2,null,null,null,null,0], subRoot = [4,1,2]
Output: false
Constraints:
The number of nodes in the root tree is in the range [1, 2000].
The number of nodes in the subRoot tree is in the range [1, 1000].
-104 <= root.val <= 104
-104 <= subRoot.val <= 104
"""
from tools.binary_tree import TreeNode
| 33.523077 | 119 | 0.592015 |
c567629ea21a15f16d30ea7895f7a40e8e344679 | 80,085 | py | Python | pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 7 | 2020-04-14T09:41:17.000Z | 2021-08-06T09:38:19.000Z | pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | null | null | null | pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 3 | 2020-04-30T12:44:48.000Z | 2020-12-15T08:40:26.000Z | import pyeccodes.accessors as _
| 66.7375 | 355 | 0.700893 |
c56762e2edaef44daca6ab74ffdc3c598a3d259d | 2,038 | py | Python | perspective_transform.py | shengchen-liu/CarND-Advanced_Lane_Finding | e23a3f5021e59f3acef4e8fec48537fffab0f1b3 | [
"MIT"
] | null | null | null | perspective_transform.py | shengchen-liu/CarND-Advanced_Lane_Finding | e23a3f5021e59f3acef4e8fec48537fffab0f1b3 | [
"MIT"
] | null | null | null | perspective_transform.py | shengchen-liu/CarND-Advanced_Lane_Finding | e23a3f5021e59f3acef4e8fec48537fffab0f1b3 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
from calibration_utils import calibrate_camera, undistort
import glob
import matplotlib.image as mpimg
import pickle
from threshold import binarize
def perspective_transform(img, verbose=False):
"""
Execute perspective transform
"""
img_size = (img.shape[1], img.shape[0])
# algorithm to automatically pick?
# https: // knowledge.udacity.com / questions / 22331
src = np.float32(
[[200, 720],
[1100, 720],
[595, 450],
[685, 450]])
dst = np.float32(
[[300, 720],
[980, 720],
[300, 0],
[980, 0]])
m = cv2.getPerspectiveTransform(src, dst)
m_inv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, m, img_size, flags=cv2.INTER_LINEAR)
unwarped = cv2.warpPerspective(warped, m_inv, (warped.shape[1], warped.shape[0]), flags=cv2.INTER_LINEAR) # DEBUG
if verbose:
f, axarray = plt.subplots(1, 2)
f.set_facecolor('white')
axarray[0].set_title('Before perspective transform')
axarray[0].imshow(img, cmap='gray')
for point in src:
axarray[0].plot(*point, '.')
axarray[1].set_title('After perspective transform')
axarray[1].imshow(warped, cmap='gray')
for point in dst:
axarray[1].plot(*point, '.')
for axis in axarray:
axis.set_axis_off()
plt.show()
return warped, m, m_inv
if __name__ == '__main__':
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
mtx = save_dict['mtx']
dist = save_dict['dist']
# show result on test images
for test_img in glob.glob('test_images/*.jpg'):
img = cv2.imread(test_img)
img_undistorted = undistort(img, mtx, dist, verbose=False)
img_binary = binarize(img_undistorted, verbose=False)
img_birdeye, M, Minv = perspective_transform(cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2RGB), verbose=True)
| 29.114286 | 118 | 0.628557 |
c5681f32ba0443d6943fe18106423ebafc204c78 | 12,733 | py | Python | epgrefresh/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | epgrefresh/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | epgrefresh/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | from __future__ import print_function
# for localized messages
from . import _, NOTIFICATIONDOMAIN
# Config
from Components.config import config, ConfigYesNo, ConfigNumber, ConfigSelection, \
ConfigSubsection, ConfigClock, ConfigYesNo, ConfigInteger, NoSave
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from Tools.BoundFunction import boundFunction
from boxbranding import getImageDistro
from Components.SystemInfo import SystemInfo
from Components.NimManager import nimmanager
# Error-print
from traceback import print_exc
from sys import stdout
# Calculate default begin/end
from time import time, localtime, mktime
now = localtime()
begin = mktime((
now.tm_year, now.tm_mon, now.tm_mday, 07, 30,
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
end = mktime((
now.tm_year, now.tm_mon, now.tm_mday, 20, 00,
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
#Configuration
config.plugins.epgrefresh = ConfigSubsection()
config.plugins.epgrefresh.enabled = ConfigYesNo(default=False)
config.plugins.epgrefresh.begin = ConfigClock(default=int(begin))
config.plugins.epgrefresh.end = ConfigClock(default=int(end))
config.plugins.epgrefresh.interval_seconds = ConfigNumber(default=120)
config.plugins.epgrefresh.delay_standby = ConfigNumber(default=10)
config.plugins.epgrefresh.inherit_autotimer = ConfigYesNo(default=False)
config.plugins.epgrefresh.afterevent = ConfigYesNo(default=False)
config.plugins.epgrefresh.force = ConfigYesNo(default=False)
config.plugins.epgrefresh.skipProtectedServices = ConfigSelection(choices=[
("bg_only", _("Background only")),
("always", _("Foreground also")),
], default="bg_only"
)
config.plugins.epgrefresh.enablemessage = ConfigYesNo(default=True)
config.plugins.epgrefresh.wakeup = ConfigYesNo(default=False)
config.plugins.epgrefresh.lastscan = ConfigNumber(default=0)
config.plugins.epgrefresh.parse_autotimer = ConfigSelection(choices=[
("always", _("Yes")),
("never", _("No")),
("bg_only", _("Background only")),
("ask_yes", _("Ask default Yes")),
("ask_no", _("Ask default No")),
], default="never"
)
config.plugins.epgrefresh.erase = ConfigYesNo(default=False)
adapter_choices = [("main", _("Main Picture"))]
if SystemInfo.get("NumVideoDecoders", 1) > 1:
adapter_choices.append(("pip", _("Picture in Picture")))
adapter_choices.append(("pip_hidden", _("Picture in Picture (hidden)")))
if len(nimmanager.nim_slots) > 1:
adapter_choices.append(("record", _("Fake recording")))
config.plugins.epgrefresh.adapter = ConfigSelection(choices=adapter_choices, default="main")
config.plugins.epgrefresh.show_in_extensionsmenu = ConfigYesNo(default=False)
config.plugins.epgrefresh.show_run_in_extensionsmenu = ConfigYesNo(default=True)
if getImageDistro() in ("openatv", "openvix",):
config.plugins.epgrefresh.show_in_plugins = ConfigYesNo(default=False)
else:
config.plugins.epgrefresh.show_in_plugins = ConfigYesNo(default=True)
config.plugins.epgrefresh.show_help = ConfigYesNo(default=True)
config.plugins.epgrefresh.wakeup_time = ConfigInteger(default=-1)
config.plugins.epgrefresh.showadvancedoptions = NoSave(ConfigYesNo(default=False))
# convert previous parameters
config.plugins.epgrefresh.background = ConfigYesNo(default=False)
if config.plugins.epgrefresh.background.value:
config.plugins.epgrefresh.adapter.value = "pip_hidden"
config.plugins.epgrefresh.background.value = False
config.plugins.epgrefresh.save()
config.plugins.epgrefresh.interval = ConfigNumber(default=2)
if config.plugins.epgrefresh.interval.value != 2:
config.plugins.epgrefresh.interval_seconds.value = config.plugins.epgrefresh.interval.value * 60
config.plugins.epgrefresh.interval.value = 2
config.plugins.epgrefresh.save()
#pragma mark - Help
try:
from Components.Language import language
from Plugins.SystemPlugins.MPHelp import registerHelp, XMLHelpReader
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, fileExists
lang = language.getLanguage()[:2]
HELPPATH = resolveFilename(SCOPE_PLUGINS, "Extensions/EPGRefresh")
if fileExists(HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"):
helpfile = HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"
else:
helpfile = HELPPATH + "/mphelp.xml"
reader = XMLHelpReader(helpfile)
epgrefreshHelp = registerHelp(*reader)
except Exception as e:
print("[EPGRefresh] Unable to initialize MPHelp:", e, "- Help not available!")
epgrefreshHelp = None
#pragma mark -
# Notification-Domain
# Q: Do we really need this or can we do this better?
from Tools import Notifications
try:
Notifications.notificationQueue.registerDomain(NOTIFICATIONDOMAIN, _("EPGREFRESH_NOTIFICATION_DOMAIN"), deferred_callable=True)
except Exception as e:
EPGRefreshNotificationKey = ""
#print("[EPGRefresh] Error registering Notification-Domain:", e)
# Plugin
from EPGRefresh import epgrefresh
from EPGRefreshService import EPGRefreshService
# Plugins
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
#pragma mark - Workaround for unset clock
from enigma import eDVBLocalTimeHandler
def timeCallback(isCallback=True):
"""Time Callback/Autostart management."""
thInstance = eDVBLocalTimeHandler.getInstance()
if isCallback:
# NOTE: this assumes the clock is actually ready when called back
# this may not be true, but we prefer silently dying to waiting forever
thInstance.m_timeUpdated.get().remove(timeCallback)
elif not thInstance.ready():
thInstance.m_timeUpdated.get().append(timeCallback)
return
epgrefresh.start()
# Autostart
# Mainfunction
# Eventinfo
# XXX: we need this helper function to identify the descriptor
# Extensions menu
extSetupDescriptor = PluginDescriptor(_("EPG-Refresh_SetUp"), description=_("Automatically refresh EPG"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=extensionsmenu, needsRestart=False)
extRunDescriptor = PluginDescriptor(_("EPG-Refresh_Refresh now"), description=_("Start EPGrefresh immediately"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=forceRefresh, needsRestart=False)
extStopDescriptor = PluginDescriptor(_("EPG-Refresh_Stop Refresh"), description=_("Stop Running EPG-refresh"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=stopRunningRefresh, needsRestart=False)
extPendingServDescriptor = PluginDescriptor(_("EPG-Refresh_Pending Services"), description=_("Show the pending Services for refresh"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=showPendingServices, needsRestart=False)
extPluginDescriptor = PluginDescriptor( name=_("EPGRefresh"), description=_("Automatically refresh EPG"), where=PluginDescriptor.WHERE_PLUGINMENU, fnc=main, icon="EPGRefresh.png", needsRestart=False)
config.plugins.epgrefresh.show_in_plugins.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
config.plugins.epgrefresh.show_in_extensionsmenu.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
config.plugins.epgrefresh.show_run_in_extensionsmenu.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
| 35.766854 | 224 | 0.783162 |
c56a7f8daf694ac42476f66c3d71841a3dd5c679 | 29,709 | py | Python | ongoing/prescriptors/bandit/bandit_prescriptor.py | bradyneal/covid-xprize-comp | d515f58b009a0a3e2421bc83e7ac893f3c3a1ece | [
"Apache-2.0"
] | null | null | null | ongoing/prescriptors/bandit/bandit_prescriptor.py | bradyneal/covid-xprize-comp | d515f58b009a0a3e2421bc83e7ac893f3c3a1ece | [
"Apache-2.0"
] | null | null | null | ongoing/prescriptors/bandit/bandit_prescriptor.py | bradyneal/covid-xprize-comp | d515f58b009a0a3e2421bc83e7ac893f3c3a1ece | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import os
from copy import deepcopy
import datetime
import pickle
import time
import copy
os.system('export PYTHONPATH="$(pwd):$PYTHONPATH"')
from ongoing.prescriptors.base import BasePrescriptor, PRED_CASES_COL, CASES_COL, NPI_COLUMNS, NPI_MAX_VALUES
import ongoing.prescriptors.base as base
from bandit import CCTSB
# np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
TMP_PRED_FILE_NAME = os.path.join(ROOT_DIR, 'tmp_predictions_for_prescriptions', 'preds.csv')
TMP_PRESCRIPTION_FILE = os.path.join(ROOT_DIR, 'tmp_prescription.csv')
MODEL_FILE = os.path.join(ROOT_DIR, 'bandits.pkl')
# Number of iterations of training for the bandit.
# Each iteration presents the bandit with a new context.
# Each iteration trains the bandit for the entire prediction window.
NB_ITERATIONS = 2
EXPLORE_ITERATIONS = 1
CHOICE = 'fixed'
# Number of days the prescriptors will look at in the past.
# Larger values here may make convergence slower, but give
# prescriptors more context. The number of inputs of each neat
# network will be NB_LOOKBACK_DAYS * (NPI_COLUMNS + 1) + NPI_COLUMNS.
# The '1' is for previous case data, and the final NPI_COLUMNS
# is for IP cost information.
# NB_LOOKBACK_DAYS = 14
# Number of countries to use for training. Again, lower numbers
# here will make training faster, since there will be fewer
# input variables, but could potentially miss out on useful info.
# NB_EVAL_COUNTRIES = 10
# Range of days the prescriptors will be evaluated on.
# To save time during training, this range may be significantly
# shorter than the maximum days a prescriptor can be evaluated on.
# EVAL_START_DATE = '2020-08-01'
# EVAL_END_DATE = '2020-08-02'
# Number of prescriptions to make per country.
# This can be set based on how many solutions in PRESCRIPTORS_FILE
# we want to run and on time constraints.
NB_PRESCRIPTIONS = 10
# OBJECTIVE_WEIGHTS = [0.01, 0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 0.99]
OBJECTIVE_WEIGHTS = [0.5, 1.0]
LOAD = True
if __name__ == '__main__':
prescriptor = Bandit(seed=42)
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, os.pardir, 'prescriptions')
ofile_path = os.path.abspath(os.path.join(output_dir, 'bandit_evaluate.csv'))
print(ofile_path)
print()
prescriptor.evaluate(output_file_path=ofile_path)
| 46.492958 | 188 | 0.586321 |
c56aa8051395c03cfefdb6b4c31ba197b3b0d2c8 | 1,876 | py | Python | examples/server.py | zaibon/tcprouter | 7e9d2590e1b1d9d984ac742bd82fcbcf3d42b3ef | [
"BSD-3-Clause"
] | 5 | 2019-05-30T23:36:05.000Z | 2019-10-10T21:37:53.000Z | examples/server.py | zaibon/tcprouter | 7e9d2590e1b1d9d984ac742bd82fcbcf3d42b3ef | [
"BSD-3-Clause"
] | 7 | 2019-06-12T11:55:46.000Z | 2019-11-18T22:53:06.000Z | examples/server.py | xmonader/eltcprouter | b3435733d102c2435e9f62aa469d34c475cc31bd | [
"BSD-3-Clause"
] | 1 | 2021-01-05T20:09:51.000Z | 2021-01-05T20:09:51.000Z | from gevent import monkey; monkey.patch_all()
import logging
from gevent.server import StreamServer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def Handler(receiver_class):
""" A basic connection handler that applies a receiver object to each
connection.
"""
return handle
server = StreamServer(('0.0.0.0', 9092), Handler(EchoReceiver), keyfile='server.key', certfile='server.crt')
logger.info('Server running')
server.serve_forever()
| 25.351351 | 108 | 0.601812 |
c56c42d080d6ecfdd85da2bc93ed1de36bb3b713 | 9,289 | py | Python | starter.py | device42/DOQL_scripts_examples | 55cdf3868768cb4f609011575b1051d7a69c19c5 | [
"Apache-2.0"
] | 7 | 2017-10-25T13:54:18.000Z | 2022-01-25T16:16:53.000Z | starter.py | RomanNyschuk/DOQL_scripts_examples | 1ec20426dcbe586c9b93ec77002a048c6563dca6 | [
"Apache-2.0"
] | 2 | 2018-11-19T18:17:35.000Z | 2020-10-09T19:38:53.000Z | starter.py | RomanNyschuk/DOQL_scripts_examples | 1ec20426dcbe586c9b93ec77002a048c6563dca6 | [
"Apache-2.0"
] | 6 | 2018-10-18T14:39:08.000Z | 2021-04-15T19:06:01.000Z | # encoding: utf-8
import os
import ssl
import sys
import csv
import json
import time
import base64
from datetime import datetime
from datetime import timedelta
try:
import pyodbc
except ImportError:
pass
# PYTHON 2 FALLBACK #
try:
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from io import StringIO
python = 3
except ImportError:
from urllib import urlencode
from urllib2 import urlopen, Request
from StringIO import StringIO
reload(sys)
sys.setdefaultencoding('utf8')
python = 2
# PYTHON 2 FALLBACK #
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
global _debug
_debug = True
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Please use "python starter.py query.json".')
sys.exit()
main()
print('Done!')
sys.exit()
| 34.531599 | 262 | 0.518247 |
c56c7f75c3a3c13e0936e45885df9754bb813e14 | 5,001 | py | Python | docs/conf.py | donatelli01/donatelli_documentations | 6bf851014a96cd54c16d7d56b5677b081ca0d4e3 | [
"CC-BY-4.0"
] | null | null | null | docs/conf.py | donatelli01/donatelli_documentations | 6bf851014a96cd54c16d7d56b5677b081ca0d4e3 | [
"CC-BY-4.0"
] | null | null | null | docs/conf.py | donatelli01/donatelli_documentations | 6bf851014a96cd54c16d7d56b5677b081ca0d4e3 | [
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
import sys, os
sys.path.insert(0, os.path.abspath('extensions'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig',
'epub2', 'mobi', 'autoimage', 'code_example']
todo_include_todos = True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = []
add_function_parentheses = True
#add_module_names = True
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
project = u'Music for Geeks and Nerds'
copyright = u'2012, Pedro Kroger'
version = ''
release = ''
# -- Options for HTML output ---------------------------------------------------
html_theme = 'book'
html_theme_path = ['themes']
html_title = "Music for Geeks and Nerds"
#html_short_title = None
#html_logo = None
#html_favicon = None
html_static_path = ['_static']
html_domain_indices = False
html_use_index = False
html_show_sphinx = False
htmlhelp_basename = 'MusicforGeeksandNerdsdoc'
html_show_sourcelink = False
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'papersize': '',
'fontpkg': '',
'fncychap': '',
'maketitle': '\\cover',
'pointsize': '',
'preamble': '',
'releasename': "",
'babel': '',
'printindex': '',
'fontenc': '',
'inputenc': '',
'classoptions': '',
'utf8extra': '',
}
latex_additional_files = ["mfgan-bw.sty", "mfgan.sty", "_static/cover.png"]
latex_documents = [
('index', 'music-for-geeks-and-nerds.tex', u'Music for Geeks and Nerds',
u'Pedro Kroger', 'manual'),
]
latex_show_pagerefs = False
latex_domain_indices = False
latex_use_modindex = False
#latex_logo = None
#latex_show_urls = False
# -- Options for Epub output ---------------------------------------------------
epub_title = u'Music for Geeks and Nerds'
epub_author = u'Pedro Kroger'
epub_publisher = u'Pedro Kroger'
epub_copyright = u'2012, Pedro Kroger'
epub_theme = 'epub2'
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ("_static/cover.png", "epub-cover.html")
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js', '_static/underscore.js',
'_static/basic.css', 'search.html', '_static/websupport.js']
# The depth of the table of contents in toc.ncx.
epub_tocdepth = 2
# Allow duplicate toc entries.
epub_tocdup = False
# -- Options for Mobi output ---------------------------------------------------
mobi_theme = "mobi"
mobi_title = u'Music for Geeks and Nerds'
mobi_author = u'Pedro Kroger'
mobi_publisher = u'Pedro Kroger'
mobi_copyright = u'2012, Pedro Kroger'
# The scheme of the identifier. Typical schemes are ISBN or URL.
#mobi_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#mobi_identifier = ''
# A unique identification for the text.
#mobi_uid = ''
mobi_cover = "_static/cover.png"
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#mobi_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#mobi_post_files = []
# A list of files that should not be packed into the mobi file.
mobi_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js', '_static/underscore.js',
'_static/basic.css', 'search.html', '_static/websupport.js']
# The depth of the table of contents in toc.ncx.
mobi_tocdepth = 2
# Allow duplicate toc entries.
mobi_tocdup = False
mobi_add_visible_links = False
# -- Options for Code Examples output ---------------------------------------------------
code_example_dir = "code-example"
code_add_python_path = ["../py"]
################################################################################
| 28.577143 | 89 | 0.642072 |
c56cd8896de2c6a2a8be34144a21660a056501d9 | 17,031 | py | Python | sapphire/simulation.py | alexanderzimmerman/sapphire | 1236000d201b8ff44296b0428ef31e5ff0e6078f | [
"MIT"
] | 10 | 2019-04-26T16:23:49.000Z | 2022-02-01T22:44:29.000Z | sapphire/simulation.py | alexanderzimmerman/sapphire | 1236000d201b8ff44296b0428ef31e5ff0e6078f | [
"MIT"
] | 35 | 2018-12-10T08:55:59.000Z | 2019-03-21T10:48:57.000Z | sapphire/simulation.py | alexanderzimmerman/sapphire | 1236000d201b8ff44296b0428ef31e5ff0e6078f | [
"MIT"
] | 4 | 2019-04-11T16:49:48.000Z | 2021-03-15T00:58:09.000Z | """Provides a class for constructing simulations based on Firedrake.
Simulations proceed forward in time by solving
a sequence of Initial Boundary Values Problems (IBVP's).
Using the Firedrake framework,
the PDE's are discretized in space with Finite Elements (FE).
The symbolic capabilities of Firedrake are used to
automatically implement backward difference formula (BDF) time
discretizations and to automatically linearize nonlinear problems
with Newton's method.
Nonlinear and linear solvers are provided by PETSc
and are accessed via the Firedrake interface.
This module imports `firedrake` as `fe` and its documentation writes
`fe` instead of `firedrake`.
"""
import typing
import pathlib
import ufl
import firedrake as fe
import sapphire.time_discretization
import sapphire.output
def unit_vectors(mesh) -> typing.Tuple[ufl.tensors.ListTensor]:
"""Returns the mesh's spatial unit vectors in each dimension.
Args:
mesh (fe.Mesh): The mesh for the spatial discretization.
"""
dim = mesh.geometric_dimension()
return tuple([fe.unit_vector(i, dim) for i in range(dim)])
def time_discrete_terms(
solutions: typing.List[fe.Function],
timestep_size: fe.Constant) \
-> typing.Union[
ufl.core.operator.Operator,
typing.List[ufl.core.operator.Operator]]:
"""Returns backward difference time discretization.
The backward difference formula's stencil size is determine by the
number of solutions provided, i.e. `len(solutions)`.
For example, if `len(solutions == 3)`, then the second-order BDF2
method will be used, because it involves solutions at three
discrete times.
The return type depends on whether or not the solution is based on
a mixed finite element. For mixed finite elements, a list of time
discrete terms will be returned, each item corresponding to one of
the sub-elements of the mixed element. Otherwise, a single term
will be returned.
"""
"""
The return type design choice was made, rather than always
returning a list (e.g. with only one item if not using a mixed
element), so that it would be more intuitive when not using mixed
elements.
"""
"""
This implementation assumes constant time step size.
Variable time step sizes change the BDF formula
for all except first order.
"""
time_discrete_terms = [
sapphire.time_discretization.bdf(
[fe.split(solutions[n])[i] for n in range(len(solutions))],
timestep_size = timestep_size)
for i in range(len(solutions[0].split()))]
return time_discrete_terms
| 36.391026 | 86 | 0.575245 |
c56d0b93bb067141c9ac8d852c7ba2ad1f8b703b | 16,389 | py | Python | lookmlint/lookmlint.py | kingfink/lookmlint | 5fd76328b3ad6917e649a28abed05f64707422b6 | [
"Apache-2.0"
] | null | null | null | lookmlint/lookmlint.py | kingfink/lookmlint | 5fd76328b3ad6917e649a28abed05f64707422b6 | [
"Apache-2.0"
] | 1 | 2020-02-25T16:01:31.000Z | 2020-02-25T16:01:31.000Z | lookmlint/lookmlint.py | kingfink/lookmlint | 5fd76328b3ad6917e649a28abed05f64707422b6 | [
"Apache-2.0"
] | null | null | null | from collections import Counter
import json
import os
import re
import subprocess
import attr
import yaml
def read_lint_config(repo_path):
# read .lintconfig.yml
full_path = os.path.expanduser(repo_path)
config_filepath = os.path.join(full_path, '.lintconfig.yml')
acronyms = []
abbreviations = []
if os.path.isfile(config_filepath):
with open(config_filepath) as f:
config = yaml.load(f)
acronyms = config.get('acronyms', acronyms)
abbreviations = config.get('abbreviations', abbreviations)
lint_config = {'acronyms': acronyms, 'abbreviations': abbreviations}
return lint_config
def parse_repo(full_path):
cmd = (
f'cd {full_path} && '
'lookml-parser --input="*.lkml" --whitespace=2 > /tmp/lookmlint.json'
)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output, error = process.communicate()
def lookml_from_repo_path(repo_path):
full_path = os.path.expanduser(repo_path)
parse_repo(full_path)
lkml = LookML('/tmp/lookmlint.json')
return lkml
def label_issues(label, acronyms=[], abbreviations=[]):
acronyms_used = [
a.upper() for a in acronyms if _contains_bad_acronym_usage(label, a)
]
abbreviations_used = [
a.title() for a in abbreviations if _contains_bad_abbreviation_usage(label, a)
]
return acronyms_used + abbreviations_used
def lint_labels(lkml, acronyms, abbreviations):
# check for acronym and abbreviation issues
explore_label_issues = {}
for m in lkml.models:
issues = m.explore_label_issues(acronyms, abbreviations)
if issues != {}:
explore_label_issues[m.name] = issues
explore_view_label_issues = {}
for m in lkml.models:
for e in m.explores:
issues = e.view_label_issues(acronyms, abbreviations)
if issues != {}:
if m.name not in explore_view_label_issues:
explore_view_label_issues[m.name] = {}
explore_view_label_issues[m.name][e.name] = issues
field_label_issues = {}
for v in lkml.views:
issues = v.field_label_issues(acronyms, abbreviations)
if issues != {}:
field_label_issues[v.name] = issues
# create overall labels issues dict
label_issues = {}
if explore_label_issues != {}:
label_issues['explores'] = explore_label_issues
if explore_view_label_issues != {}:
label_issues['explore_views'] = explore_view_label_issues
if field_label_issues != {}:
label_issues['fields'] = field_label_issues
return label_issues
def lint_duplicate_view_labels(lkml):
issues = {}
for m in lkml.models:
for e in m.explores:
dupes = e.duplicated_view_labels()
if dupes == {}:
continue
if m.name not in issues:
issues[m.name] = {}
if e.name not in issues[m.name]:
issues[m.name][e.name] = dupes
return issues
def lint_sql_references(lkml):
# check for raw SQL field references
raw_sql_refs = {}
for m in lkml.models:
for e in m.explores:
for v in e.views:
if not v.contains_raw_sql_ref():
continue
if m.name not in raw_sql_refs:
raw_sql_refs[m.name] = {}
if e.name not in raw_sql_refs[m.name]:
raw_sql_refs[m.name][e.name] = {}
raw_sql_refs[m.name][e.name][v.name] = v.sql_on
return raw_sql_refs
def lint_view_primary_keys(lkml):
# check for missing primary keys
views_missing_primary_keys = [v.name for v in lkml.views if not v.has_primary_key()]
return views_missing_primary_keys
def lint_missing_drill_fields(lkml):
# check for measures missing drill fields
measures_missing_drill_fields = []
for v in lkml.views:
measures_missing_drill_fields += [(v.name, m.name) for m in v.measures if not m.has_drill_fields()]
return sorted(list(set(measures_missing_drill_fields)))
def lint_unused_includes(lkml):
# check for unused includes
unused_includes = {
m.name: m.unused_includes() for m in lkml.models if m.unused_includes() != []
}
return unused_includes
def lint_unused_view_files(lkml):
# check for unused view files
unused_view_files = lkml.unused_view_files()
return unused_view_files
def lint_missing_view_sql_definitions(lkml):
return [
v.name
for v in lkml.views
if not v.has_sql_definition()
and v.extends == []
and any(f.sql and '${TABLE}' in f.sql for f in v.fields)
]
def lint_semicolons_in_derived_table_sql(lkml):
return [v.name for v in lkml.views if v.derived_table_contains_semicolon()]
def lint_select_star_in_derived_table_sql(lkml):
return [v.name for v in lkml.views if v.derived_table_contains_select_star()]
def lint_mismatched_view_names(lkml):
return lkml.mismatched_view_names()
| 34.430672 | 175 | 0.626945 |
c56d395d346db6cdbf6e9c0543fb7e6ccd0a31e0 | 4,306 | py | Python | book-code/numpy-ml/numpy_ml/utils/testing.py | yangninghua/code_library | b769abecb4e0cbdbbb5762949c91847a0f0b3c5a | [
"MIT"
] | null | null | null | book-code/numpy-ml/numpy_ml/utils/testing.py | yangninghua/code_library | b769abecb4e0cbdbbb5762949c91847a0f0b3c5a | [
"MIT"
] | null | null | null | book-code/numpy-ml/numpy_ml/utils/testing.py | yangninghua/code_library | b769abecb4e0cbdbbb5762949c91847a0f0b3c5a | [
"MIT"
] | null | null | null | """Utilities for writing unit tests"""
import numbers
import numpy as np
#######################################################################
# Assertions #
#######################################################################
def is_symmetric(X):
"""Check that an array `X` is symmetric along its main diagonal"""
return np.allclose(X, X.T)
def is_symmetric_positive_definite(X):
"""Check that a matrix `X` is a symmetric and positive-definite."""
if is_symmetric(X):
try:
# if matrix is symmetric, check whether the Cholesky decomposition
# (defined only for symmetric/Hermitian positive definite matrices)
# exists
np.linalg.cholesky(X)
return True
except np.linalg.LinAlgError:
return False
return False
def is_stochastic(X):
"""True if `X` contains probabilities that sum to 1 along the columns"""
msg = "Array should be stochastic along the columns"
assert len(X[X < 0]) == len(X[X > 1]) == 0, msg
assert np.allclose(np.sum(X, axis=1), np.ones(X.shape[0])), msg
return True
def is_number(a):
"""Check that a value `a` is numeric"""
return isinstance(a, numbers.Number)
def is_one_hot(x):
"""Return True if array `x` is a binary array with a single 1"""
msg = "Matrix should be one-hot binary"
assert np.array_equal(x, x.astype(bool)), msg
assert np.allclose(np.sum(x, axis=1), np.ones(x.shape[0])), msg
return True
def is_binary(x):
"""Return True if array `x` consists only of binary values"""
msg = "Matrix must be binary"
assert np.array_equal(x, x.astype(bool)), msg
return True
#######################################################################
# Data Generators #
#######################################################################
def random_one_hot_matrix(n_examples, n_classes):
"""Create a random one-hot matrix of shape (`n_examples`, `n_classes`)"""
X = np.eye(n_classes)
X = X[np.random.choice(n_classes, n_examples)]
return X
def random_stochastic_matrix(n_examples, n_classes):
"""Create a random stochastic matrix of shape (`n_examples`, `n_classes`)"""
X = np.random.rand(n_examples, n_classes)
X /= X.sum(axis=1, keepdims=True)
return X
def random_tensor(shape, standardize=False):
"""
Create a random real-valued tensor of shape `shape`. If `standardize` is
True, ensure each column has mean 0 and std 1.
"""
offset = np.random.randint(-300, 300, shape)
X = np.random.rand(*shape) + offset
if standardize:
eps = np.finfo(float).eps
X = (X - X.mean(axis=0)) / (X.std(axis=0) + eps)
return X
def random_binary_tensor(shape, sparsity=0.5):
"""
Create a random binary tensor of shape `shape`. `sparsity` is a value
between 0 and 1 controlling the ratio of 0s to 1s in the output tensor.
"""
return (np.random.rand(*shape) >= (1 - sparsity)).astype(float)
def random_paragraph(n_words, vocab=None):
"""
Generate a random paragraph consisting of `n_words` words. If `vocab` is
not None, words will be drawn at random from this list. Otherwise, words
will be sampled uniformly from a collection of 26 Latin words.
"""
if vocab is None:
vocab = [
"at",
"stet",
"accusam",
"aliquyam",
"clita",
"lorem",
"ipsum",
"dolor",
"dolore",
"dolores",
"sit",
"amet",
"consetetur",
"sadipscing",
"elitr",
"sed",
"diam",
"nonumy",
"eirmod",
"duo",
"ea",
"eos",
"erat",
"est",
"et",
"gubergren",
]
return [np.random.choice(vocab) for _ in range(n_words)]
#######################################################################
# Custom Warnings #
#######################################################################
| 29.902778 | 80 | 0.510683 |
c56da321682df09ceea1c41371b833fb49044e9e | 1,373 | py | Python | test/test_googleoauth2.py | GallopLabs/libsaas | 80b2d51b81a769eacafc3847cc33700ac80e66fc | [
"MIT"
] | null | null | null | test/test_googleoauth2.py | GallopLabs/libsaas | 80b2d51b81a769eacafc3847cc33700ac80e66fc | [
"MIT"
] | null | null | null | test/test_googleoauth2.py | GallopLabs/libsaas | 80b2d51b81a769eacafc3847cc33700ac80e66fc | [
"MIT"
] | null | null | null | import unittest
from libsaas.executors import test_executor
from libsaas.services import googleoauth2
| 32.690476 | 66 | 0.584122 |
c56df3f7bc34ea2a6465e6d328eeae9b03525f21 | 5,654 | py | Python | post_office/migrations/0001_initial.py | carrerasrodrigo/django-post_office | 0257a39f9f2d20c1a42c58e8fd4dfaf591221132 | [
"MIT"
] | null | null | null | post_office/migrations/0001_initial.py | carrerasrodrigo/django-post_office | 0257a39f9f2d20c1a42c58e8fd4dfaf591221132 | [
"MIT"
] | null | null | null | post_office/migrations/0001_initial.py | carrerasrodrigo/django-post_office | 0257a39f9f2d20c1a42c58e8fd4dfaf591221132 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import post_office.fields
import post_office.validators
import post_office.models
| 49.596491 | 155 | 0.583658 |
c56e81e80b9caed3db5600ddbb8cc958f425902d | 3,890 | py | Python | ic_gan/data_utils/store_kmeans_indexes.py | ozcelikfu/IC-GAN_fMRI_Reconstruction | 31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c | [
"MIT"
] | null | null | null | ic_gan/data_utils/store_kmeans_indexes.py | ozcelikfu/IC-GAN_fMRI_Reconstruction | 31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c | [
"MIT"
] | null | null | null | ic_gan/data_utils/store_kmeans_indexes.py | ozcelikfu/IC-GAN_fMRI_Reconstruction | 31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Store dataset indexes of datapoints selected by k-means algorithm."""
from argparse import ArgumentParser
import numpy as np
import os
import h5py as h5
import faiss
if __name__ == "__main__":
parser = ArgumentParser(
description="Storing cluster indexes for k-means-based data subsampling"
)
parser.add_argument(
"--resolution",
type=int,
default=64,
help="Data resolution (default: %(default)s)",
)
parser.add_argument(
"--which_dataset", type=str, default="imagenet", help="Dataset choice."
)
parser.add_argument(
"--data_root",
type=str,
default="data",
help="Default location where data is stored (default: %(default)s)",
)
parser.add_argument(
"--feature_extractor",
type=str,
default="classification",
choices=["classification", "selfsupervised"],
help="Choice of feature extractor",
)
parser.add_argument(
"--backbone_feature_extractor",
type=str,
default="resnet50",
choices=["resnet50"],
help="Choice of feature extractor backbone",
)
parser.add_argument(
"--kmeans_subsampled",
type=int,
default=-1,
help="Number of k-means centers if using subsampled training instances"
" (default: %(default)s)",
)
parser.add_argument(
"--gpu",
action="store_true",
default=False,
help="Use faiss with GPUs (default: %(default)s)",
)
args = vars(parser.parse_args())
main(args)
| 29.029851 | 100 | 0.607198 |
c570fd6a05953760ae560c4fbed0f8ac9f2fd02d | 100 | py | Python | src/cattrs/errors.py | aha79/cattrs | 50ba769c8349f5891b157d2bb7f06602822ac0a3 | [
"MIT"
] | null | null | null | src/cattrs/errors.py | aha79/cattrs | 50ba769c8349f5891b157d2bb7f06602822ac0a3 | [
"MIT"
] | null | null | null | src/cattrs/errors.py | aha79/cattrs | 50ba769c8349f5891b157d2bb7f06602822ac0a3 | [
"MIT"
] | null | null | null | from cattr.errors import StructureHandlerNotFoundError
__all__ = ["StructureHandlerNotFoundError"]
| 25 | 54 | 0.86 |
c5744b17de40e44fcacba60862bc64a6577cf8bb | 4,873 | py | Python | plugins/funcs.py | prxpostern/URLtoTG003 | b41ef5e756193798d8f92ccaa55c0fd7ab5ef931 | [
"MIT"
] | null | null | null | plugins/funcs.py | prxpostern/URLtoTG003 | b41ef5e756193798d8f92ccaa55c0fd7ab5ef931 | [
"MIT"
] | null | null | null | plugins/funcs.py | prxpostern/URLtoTG003 | b41ef5e756193798d8f92ccaa55c0fd7ab5ef931 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from main import Config
from pyrogram import filters
from pyrogram import Client
#from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from urllib.parse import quote_plus, unquote
import math, os, time, datetime, aiohttp, asyncio, mimetypes, logging
from helpers.download_from_url import download_file, get_size
from helpers.file_handler import send_to_transfersh_async, progress
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from helpers.display_progress import progress_for_pyrogram, humanbytes
from helpers.tools import execute
from helpers.ffprobe import stream_creator
from helpers.thumbnail_video import thumb_creator
from helpers.url_uploader import leecher2
from helpers.video_renamer import rnv2
from helpers.audio_renamer import rna2
from helpers.file_renamer import rnf2
from helpers.vconverter import to_video2
from helpers.media_info import cinfo2
from helpers.link_info import linfo2
logger = logging.getLogger(__name__)
HELP_TXT = """
A Simple Telegram Bot to
Upload Files From **Direct** and **Google Drive** and **Youtube** Links,
Convert Document Media to Video,
and Rename Audio/Video/Document Files.
/upload : reply to your url .
`http://aaa.bbb.ccc/ddd.eee` | **fff.ggg**
or
`http://aaa.bbb.ccc/ddd.eee`
/c2v : reply to your document to convert it into streamable video.
/rnv : reply to your video. Example:
`/rnv | videoname`
/rna : reply to your audio. \"`-`\" : leave without change.
`/rna | audioname | title | artists`
`/rna | audioname`
`/rna | - | title`
`/rna | - | - | artists`
/rnf : reply to your document. Example:
`/rnf | filename.ext`
"""
| 36.916667 | 195 | 0.692592 |
c574ba0d5085fcc10f94dc14bafe60401b5587a7 | 2,304 | py | Python | git_code_debt/repo_parser.py | cclauss/git-code-debt | 6ced089857d3ccda4a00d274e85d7f26de0bdefd | [
"MIT"
] | null | null | null | git_code_debt/repo_parser.py | cclauss/git-code-debt | 6ced089857d3ccda4a00d274e85d7f26de0bdefd | [
"MIT"
] | null | null | null | git_code_debt/repo_parser.py | cclauss/git-code-debt | 6ced089857d3ccda4a00d274e85d7f26de0bdefd | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import contextlib
import shutil
import subprocess
import tempfile
from git_code_debt.util.iter import chunk_iter
from git_code_debt.util.subprocess import cmd_output
Commit = collections.namedtuple('Commit', ('sha', 'date'))
Commit.blank = Commit('0' * 40, 0)
COMMIT_FORMAT = '--format=%H%n%ct'
| 26.790698 | 74 | 0.588542 |
c574d6290b0c40bcbc5696cd5ebb36152641b976 | 215 | py | Python | func_one.py | FoxProklya/Step-Python | 67514509655e552fc5adcc7963b971ef6f0bb46a | [
"MIT"
] | null | null | null | func_one.py | FoxProklya/Step-Python | 67514509655e552fc5adcc7963b971ef6f0bb46a | [
"MIT"
] | null | null | null | func_one.py | FoxProklya/Step-Python | 67514509655e552fc5adcc7963b971ef6f0bb46a | [
"MIT"
] | null | null | null |
x = int(input())
print(f(x))
| 14.333333 | 26 | 0.316279 |
c576551072f708a32f1945826e72ae5d21285cce | 2,605 | py | Python | scripts/multiprocess_tokenizer/worker.py | talolard/vampire | e2ae46112fda237b072453c9f1c5e89bd7b4135b | [
"Apache-2.0"
] | null | null | null | scripts/multiprocess_tokenizer/worker.py | talolard/vampire | e2ae46112fda237b072453c9f1c5e89bd7b4135b | [
"Apache-2.0"
] | null | null | null | scripts/multiprocess_tokenizer/worker.py | talolard/vampire | e2ae46112fda237b072453c9f1c5e89bd7b4135b | [
"Apache-2.0"
] | null | null | null | import typing
from typing import Any
import json
import os
from multiprocessing import Process, Queue
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from spacy.tokenizer import Tokenizer
import spacy
from tqdm.auto import tqdm
import time
nlp = spacy.load("en")
| 28.315217 | 97 | 0.6119 |
c578aaaded2e7b75110f4c69848a9fb001f45ff0 | 5,823 | py | Python | MC-Fisher.py | hosua/Minecraft-Fisher | 416c476cd6e5ef0c6bb978aacd9816aa9ba36f7e | [
"MIT"
] | null | null | null | MC-Fisher.py | hosua/Minecraft-Fisher | 416c476cd6e5ef0c6bb978aacd9816aa9ba36f7e | [
"MIT"
] | null | null | null | MC-Fisher.py | hosua/Minecraft-Fisher | 416c476cd6e5ef0c6bb978aacd9816aa9ba36f7e | [
"MIT"
] | null | null | null | # For larger scale projects, I really should learn to use classes... lol
from PIL import ImageGrab, ImageTk, Image
import keyboard
import pyautogui
import tkinter as tk
import os
import time, datetime
import text_redirect as TR
import sys
# GUI stuff
TITLE = "Minecraft-Fisher - Made by Hoswoo"
DARK_BLUE = '#0A3D62'
LIGHT_BLUE = "#7ddeff"
DARK_GREY = "#2C3335"
CONSOLE_BG = '#A1AAB5'
FONT_BIG = ('calibre', 12, 'bold')
FONT = ('calibre', 10, 'bold')
FONT_CONSOLE = ('Times', 10, 'normal')
SIZE = ("400x500")
root = tk.Tk()
root.configure(bg=DARK_BLUE)
root.title(TITLE)
root.geometry(SIZE)
root_dir = os.getcwd()
# GUI Console
console_frame = tk.Frame(root, bg=DARK_BLUE, height=250, width=200)
console_sub_frame = tk.Frame(console_frame, bg=DARK_BLUE)
console_text = tk.Text(root, height=12,
width=60, bg=CONSOLE_BG, fg=DARK_GREY, font=FONT_CONSOLE)
console_text.config(state="disabled")
console_text.see("end")
sys.stdout = TR.TextRedirector(console_text) # Send console output to textbox instead of actual console.
# sys.stderr = TR.TextRedirector(console_text) # Errors will output in console
print("PLEASE READ BEFORE USING:\n")
print("The bot works by detecting a specific shade of red on the bobber. With that being said...")
print("Before you use the bot, you should turn your brightness all the way up.")
print("You will also have to map your right-mouse-click to 'r'. (This was a workaround due to the mouse input causing issues)")
print("For best results, ensure you are in a very well lit area and that the fish bobber appears within your capture region!")
print("NOTE: If your health hearts are in the capture region, it will falsely detect the bobber.")
# Global constants
BOBBER_COLOR = (208, 41, 41, 255)
BOBBER_COLOR_NIGHT = (206, 40, 39, 255)
region_var = tk.StringVar()
region_var.set(300) # Default to 300, should work for most people.
BOX_SIZE = int(region_var.get()) # get box size from spinbox
FILENAME = "pic.png"
x = 0
y = 0
region_label = tk.Label(root, text="Region size",
bg=DARK_BLUE, fg=LIGHT_BLUE, font=FONT)
region_spinbox = tk.Spinbox(root, from_=25, to=1000,
increment=25, textvariable=region_var, width=6)
range_validation = root.register(validate)
region_spinbox.config(validate="key", validatecommand=(range_validation, '% P')) # Absolutely no idea how this works lol
pic_frame = tk.Frame(root, bg="#FFFFFF", height=BOX_SIZE, width=BOX_SIZE)
#img = ImageTk.PhotoImage(Image.open(FILENAME))
pic_frame_label = tk.Label(pic_frame)
pic_frame_label.pack()
pic_frame.pack()
running = False
times_not_detected = 0
start_btn = tk.Button(root, text="Start (~)", bg=DARK_GREY,
fg=LIGHT_BLUE, command=start_task, width=10)
stop_btn = tk.Button(root, text="Stop (F1)", bg=DARK_GREY,
fg=LIGHT_BLUE, command=stop_task, width=10)
region_label.pack()
region_spinbox.pack()
start_btn.pack()
stop_btn.pack()
console_frame.pack()
console_sub_frame.pack()
console_text.pack()
keyboard.add_hotkey('`', start_task)
keyboard.add_hotkey('F1', stop_task)
root.mainloop()
| 34.052632 | 127 | 0.665636 |
3d63dfe6fe9f0bef4a7c9bfd9c4a5ff955fbcafe | 1,248 | py | Python | ModelAnalysis/biomodel_iterator.py | BioModelTools/ModelAnalysis | 89d6426ec9fbbb6836897889266848793d109dcc | [
"MIT"
] | null | null | null | ModelAnalysis/biomodel_iterator.py | BioModelTools/ModelAnalysis | 89d6426ec9fbbb6836897889266848793d109dcc | [
"MIT"
] | 3 | 2017-09-04T20:06:45.000Z | 2017-09-07T01:57:45.000Z | ModelAnalysis/biomodel_iterator.py | BioModelTools/ModelAnalysis | 89d6426ec9fbbb6836897889266848793d109dcc | [
"MIT"
] | null | null | null | """
Iterates through a collection of BioModels
"""
from sbml_shim import SBMLShim
import sys
import os.path
################################################
# Classes that count pattern occurrences
################################################
if __name__ == '__main__':
main(sys.argv)
| 25.469388 | 80 | 0.584936 |
3d640bec431e81affc07c61301d5e5f1d49c75e8 | 411 | py | Python | app/domain/company/models.py | JBizarri/fast-api-crud | 3eb0391c1a1f2e054092de717b73898c7efed5cb | [
"MIT"
] | null | null | null | app/domain/company/models.py | JBizarri/fast-api-crud | 3eb0391c1a1f2e054092de717b73898c7efed5cb | [
"MIT"
] | null | null | null | app/domain/company/models.py | JBizarri/fast-api-crud | 3eb0391c1a1f2e054092de717b73898c7efed5cb | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING, List
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from ...database import BaseModel
if TYPE_CHECKING:
from ..user.models import User
| 20.55 | 63 | 0.737226 |
3d66a81186dceebace0295ecba9cdcb9533d8966 | 1,913 | py | Python | tools/captcha_image_downloader.py | metormaon/signum-py | 7c6eaf11025f77c4cfbe6fb9aa77b5dadb485d8c | [
"MIT"
] | null | null | null | tools/captcha_image_downloader.py | metormaon/signum-py | 7c6eaf11025f77c4cfbe6fb9aa77b5dadb485d8c | [
"MIT"
] | 1 | 2020-08-01T23:28:38.000Z | 2020-08-01T23:28:38.000Z | tools/captcha_image_downloader.py | metormaon/signum-py | 7c6eaf11025f77c4cfbe6fb9aa77b5dadb485d8c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from os import path
from google_images_download import google_images_download
for keyword in [
"dog", "cat", "bird", "elephant", "fork", "knife", "spoon", "carrot", "orange", "turnip", "tomato", "potato",
"water", "hair", "table", "chair", "house", "factory", "microwave", "cigarette", "ashtray", "brush", "battery",
"comb", "box", "book", "bag", "calendar", "computer", "lipstick", "pencil", "perfume", "telephone", "television",
"headset", "angry", "apple", "armour", "baby", "bag", "ball", "bank", "basket", "bath", "bear", "bean", "bell",
"blue", "bottle", "bread", "bridge", "bus", "cake", "candle", "car", "card", "cheese", "chicken", "chocolate",
"circle", "clock", "cloud", "coffee", "coat", "coin", "cook", "corn", "cup", "dance", "deer", "desk", "door",
"dress", "duck", "happy", "smile", "yellow", "ear", "earth", "mars", "saturn", "jupiter", "egg", "eight", "one",
"two", "three", "four", "five", "six", "seven", "nine", "ten", "electricity", "piano", "guitar", "flute", "drum",
"exit", "dark", "excited", "surprise", "eye", "nose", "mouth", "leg", "hand", "face", "family", "farm", "fat",
"fear", "finger", "fire", "flag", "flower", "fly", "food", "football", "forest", "fox", "friend", "garden", "game",
"gate"
]:
if not path.exists("../captcha-images/" + keyword):
response = google_images_download.googleimagesdownload()
arguments = {"keywords": keyword,
"limit": 15,
"print_urls": True,
"usage_rights": "labeled-for-reuse",
"output_directory": "../captcha-images",
"safe_search": True,
"format": "jpg",
"size": "medium"
}
paths = response.download(arguments)
print(paths)
else:
print("Skipping " + keyword)
| 51.702703 | 119 | 0.526398 |
3d6b7691e8c5eed4e135eafd2eed629b0d7310de | 4,752 | py | Python | ott2butKAMA1/jessetkdata/dnafiles/BNB-USDT 2018-02-15 2021-01-01.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 38 | 2021-09-18T15:33:28.000Z | 2022-02-21T17:29:08.000Z | ott2butKAMA1/jessetkdata/dnafiles/BNB-USDT 2018-02-15 2021-01-01.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 4 | 2022-01-02T14:46:12.000Z | 2022-02-16T18:39:41.000Z | ott2butKAMA1/jessetkdata/dnafiles/BNB-USDT 2018-02-15 2021-01-01.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 11 | 2021-10-19T06:21:43.000Z | 2022-02-21T17:29:10.000Z | dnas = [
['jVXfX<', 37, 64, 24.67, 14, 7, -4.53, {'ott_len': 42, 'ott_percent': 508, 'stop_loss': 263, 'risk_reward': 65, 'chop_rsi_len': 31, 'chop_bandwidth': 83}],
['o:JK9p', 50, 62, 32.74, 37, 8, -0.2, {'ott_len': 45, 'ott_percent': 259, 'stop_loss': 201, 'risk_reward': 41, 'chop_rsi_len': 12, 'chop_bandwidth': 274}],
['tGVME/', 35, 74, 20.06, 20, 10, -4.75, {'ott_len': 48, 'ott_percent': 375, 'stop_loss': 254, 'risk_reward': 43, 'chop_rsi_len': 20, 'chop_bandwidth': 36}],
['a<<sMo', 59, 27, 25.74, 33, 6, -1.06, {'ott_len': 36, 'ott_percent': 277, 'stop_loss': 139, 'risk_reward': 76, 'chop_rsi_len': 24, 'chop_bandwidth': 271}],
['`Ol@gL', 29, 65, 9.47, 25, 8, -2.95, {'ott_len': 36, 'ott_percent': 446, 'stop_loss': 351, 'risk_reward': 31, 'chop_rsi_len': 40, 'chop_bandwidth': 142}],
['SWJi?Y', 36, 73, 32.8, 37, 8, -0.92, {'ott_len': 28, 'ott_percent': 516, 'stop_loss': 201, 'risk_reward': 68, 'chop_rsi_len': 16, 'chop_bandwidth': 190}],
['v@WLkU', 46, 47, 45.51, 20, 10, -4.43, {'ott_len': 49, 'ott_percent': 313, 'stop_loss': 258, 'risk_reward': 42, 'chop_rsi_len': 43, 'chop_bandwidth': 175}],
['lR\\iHN', 38, 62, 35.84, 28, 7, -4.01, {'ott_len': 43, 'ott_percent': 472, 'stop_loss': 280, 'risk_reward': 68, 'chop_rsi_len': 21, 'chop_bandwidth': 149}],
['l7\\gc^', 60, 35, 42.7, 25, 8, -1.2, {'ott_len': 43, 'ott_percent': 233, 'stop_loss': 280, 'risk_reward': 66, 'chop_rsi_len': 38, 'chop_bandwidth': 208}],
['wLXY\\1', 36, 71, 20.85, 14, 7, -4.76, {'ott_len': 50, 'ott_percent': 419, 'stop_loss': 263, 'risk_reward': 53, 'chop_rsi_len': 34, 'chop_bandwidth': 43}],
['i7nMgb', 54, 24, 28.38, 0, 4, -2.04, {'ott_len': 41, 'ott_percent': 233, 'stop_loss': 360, 'risk_reward': 43, 'chop_rsi_len': 40, 'chop_bandwidth': 223}],
['F/0eI[', 40, 154, 33.68, 42, 21, 2.91, {'ott_len': 20, 'ott_percent': 162, 'stop_loss': 85, 'risk_reward': 64, 'chop_rsi_len': 22, 'chop_bandwidth': 197}],
['\\ERgMp', 53, 28, 16.3, 33, 6, -2.59, {'ott_len': 33, 'ott_percent': 357, 'stop_loss': 236, 'risk_reward': 66, 'chop_rsi_len': 24, 'chop_bandwidth': 274}],
['_7@QqN', 44, 87, 28.24, 46, 15, 3.21, {'ott_len': 35, 'ott_percent': 233, 'stop_loss': 156, 'risk_reward': 46, 'chop_rsi_len': 46, 'chop_bandwidth': 149}],
['OEJO,F', 41, 105, 33.62, 20, 10, -4.61, {'ott_len': 25, 'ott_percent': 357, 'stop_loss': 201, 'risk_reward': 45, 'chop_rsi_len': 4, 'chop_bandwidth': 120}],
['5swn)a', 30, 86, 13.25, 8, 12, -6.03, {'ott_len': 9, 'ott_percent': 765, 'stop_loss': 400, 'risk_reward': 72, 'chop_rsi_len': 3, 'chop_bandwidth': 219}],
['4juD3[', 36, 95, 32.91, 14, 7, -3.13, {'ott_len': 8, 'ott_percent': 685, 'stop_loss': 391, 'risk_reward': 35, 'chop_rsi_len': 9, 'chop_bandwidth': 197}],
['91u6iJ', 33, 163, 31.1, 25, 27, -3.59, {'ott_len': 12, 'ott_percent': 180, 'stop_loss': 391, 'risk_reward': 22, 'chop_rsi_len': 41, 'chop_bandwidth': 135}],
['c3rg61', 39, 91, 11.05, 27, 11, -1.18, {'ott_len': 38, 'ott_percent': 197, 'stop_loss': 378, 'risk_reward': 66, 'chop_rsi_len': 11, 'chop_bandwidth': 43}],
['\\BAZGb', 40, 71, 22.33, 36, 11, -3.44, {'ott_len': 33, 'ott_percent': 330, 'stop_loss': 161, 'risk_reward': 54, 'chop_rsi_len': 21, 'chop_bandwidth': 223}],
['H<XF,l', 40, 98, 31.16, 16, 12, -5.22, {'ott_len': 21, 'ott_percent': 277, 'stop_loss': 263, 'risk_reward': 37, 'chop_rsi_len': 4, 'chop_bandwidth': 260}],
['5Bl/TL', 32, 153, 26.35, 28, 21, 0.03, {'ott_len': 9, 'ott_percent': 330, 'stop_loss': 351, 'risk_reward': 16, 'chop_rsi_len': 29, 'chop_bandwidth': 142}],
['DFRlX-', 38, 112, 21.16, 27, 11, -1.95, {'ott_len': 18, 'ott_percent': 366, 'stop_loss': 236, 'risk_reward': 70, 'chop_rsi_len': 31, 'chop_bandwidth': 28}],
['1EkquE', 33, 156, 45.58, 27, 18, -1.61, {'ott_len': 7, 'ott_percent': 357, 'stop_loss': 347, 'risk_reward': 75, 'chop_rsi_len': 49, 'chop_bandwidth': 116}],
['D9YmB.', 35, 139, 12.09, 42, 14, -1.17, {'ott_len': 18, 'ott_percent': 251, 'stop_loss': 267, 'risk_reward': 71, 'chop_rsi_len': 18, 'chop_bandwidth': 32}],
['_(KrZG', 40, 145, 18.09, 28, 21, -4.73, {'ott_len': 35, 'ott_percent': 100, 'stop_loss': 205, 'risk_reward': 76, 'chop_rsi_len': 32, 'chop_bandwidth': 124}],
['1CndgF', 34, 156, 49.82, 41, 17, 2.8, {'ott_len': 7, 'ott_percent': 339, 'stop_loss': 360, 'risk_reward': 63, 'chop_rsi_len': 40, 'chop_bandwidth': 120}],
['tutp,b', 50, 40, 52.45, 0, 5, -5.75, {'ott_len': 48, 'ott_percent': 782, 'stop_loss': 387, 'risk_reward': 74, 'chop_rsi_len': 4, 'chop_bandwidth': 223}],
['07t1iJ', 30, 199, 23.05, 26, 30, -1.64, {'ott_len': 6, 'ott_percent': 233, 'stop_loss': 387, 'risk_reward': 18, 'chop_rsi_len': 41, 'chop_bandwidth': 135}],
['75\\adC', 37, 200, 68.9, 21, 32, -4.78, {'ott_len': 10, 'ott_percent': 215, 'stop_loss': 280, 'risk_reward': 61, 'chop_rsi_len': 38, 'chop_bandwidth': 109}],
]
| 144 | 159 | 0.619529 |
3d6d1e7bb92fb8ada9eb142b244859a83f2f343d | 2,909 | py | Python | modules/winrm/isodate/__init__.py | frankyrumple/smc | 975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6 | [
"MIT"
] | null | null | null | modules/winrm/isodate/__init__.py | frankyrumple/smc | 975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6 | [
"MIT"
] | null | null | null | modules/winrm/isodate/__init__.py | frankyrumple/smc | 975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6 | [
"MIT"
] | null | null | null | ##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Import all essential functions and constants to re-export them here for easy
access.
This module contains also various pre-defined ISO 8601 format strings.
'''
from .isodates import parse_date, date_isoformat
from .isotime import parse_time, time_isoformat
from .isodatetime import parse_datetime, datetime_isoformat
from .isoduration import parse_duration, duration_isoformat, Duration
from .isoerror import ISO8601Error
from .isotzinfo import parse_tzinfo, tz_isoformat
from .tzinfo import UTC, FixedOffset, LOCAL
from .duration import Duration
from .isostrf import strftime
from .isostrf import DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE
from .isostrf import DATE_BAS_WEEK, DATE_BAS_WEEK_COMPLETE
from .isostrf import DATE_CENTURY, DATE_EXT_COMPLETE
from .isostrf import DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK
from .isostrf import DATE_EXT_WEEK_COMPLETE, DATE_MONTH, DATE_YEAR
from .isostrf import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from .isostrf import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from .isostrf import TIME_HOUR
from .isostrf import TZ_BAS, TZ_EXT, TZ_HOUR
from .isostrf import DT_BAS_COMPLETE, DT_EXT_COMPLETE
from .isostrf import DT_BAS_ORD_COMPLETE, DT_EXT_ORD_COMPLETE
from .isostrf import DT_BAS_WEEK_COMPLETE, DT_EXT_WEEK_COMPLETE
from .isostrf import D_DEFAULT, D_WEEK, D_ALT_EXT, D_ALT_BAS
from .isostrf import D_ALT_BAS_ORD, D_ALT_EXT_ORD
| 51.946429 | 78 | 0.77243 |
3d6fef82415cc33c1f679313aef262f6b3b670a9 | 17,848 | py | Python | sbvat/utils.py | thudzj/BVAT | 2c7073cb7967583035eece7f4819821b313d73e6 | [
"MIT"
] | 3 | 2019-08-04T03:05:51.000Z | 2021-04-24T02:35:05.000Z | sbvat/utils.py | thudzj/BVAT | 2c7073cb7967583035eece7f4819821b313d73e6 | [
"MIT"
] | null | null | null | sbvat/utils.py | thudzj/BVAT | 2c7073cb7967583035eece7f4819821b313d73e6 | [
"MIT"
] | 1 | 2019-12-29T13:49:22.000Z | 2019-12-29T13:49:22.000Z | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import tensorflow as tf
import os
import time
import json
from networkx.readwrite import json_graph
from sklearn.metrics import f1_score
import multiprocessing
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features, sparse=True):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
if sparse:
return sparse_to_tuple(features)
else:
return features.toarray()
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders, nbrs):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support']: support})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
r1 = sample_nodes(nbrs)
feed_dict.update({placeholders['adv_mask1']: r1})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| 38.218415 | 200 | 0.601244 |
3d71bcc45f53747aca6197878307201d4f4b2564 | 506 | py | Python | tags/models.py | yuyuyuhaoshi/Blog-BE | a485d5159076d619d4fd6019fe9b96ac04020d4d | [
"Apache-2.0"
] | null | null | null | tags/models.py | yuyuyuhaoshi/Blog-BE | a485d5159076d619d4fd6019fe9b96ac04020d4d | [
"Apache-2.0"
] | null | null | null | tags/models.py | yuyuyuhaoshi/Blog-BE | a485d5159076d619d4fd6019fe9b96ac04020d4d | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils.timezone import now
from django.contrib.auth.models import User
from utils.base_model import SoftDeletionModel
| 26.631579 | 88 | 0.717391 |