hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54d9afa8624f72c8f6f8e3ffc3c4fcb52e42ad11
| 1,744
|
py
|
Python
|
iri-node/fabfile.py
|
jinnerbichler/home-automflashion
|
f93442712322ab819651f453437c11f685640e83
|
[
"Apache-2.0"
] | 8
|
2018-02-06T15:18:08.000Z
|
2020-07-12T20:16:22.000Z
|
iri-node/fabfile.py
|
jinnerbichler/home-autoflashion
|
f93442712322ab819651f453437c11f685640e83
|
[
"Apache-2.0"
] | 1
|
2018-09-02T17:10:57.000Z
|
2018-10-02T04:14:43.000Z
|
iri-node/fabfile.py
|
jinnerbichler/home-autoflashion
|
f93442712322ab819651f453437c11f685640e83
|
[
"Apache-2.0"
] | 1
|
2019-08-14T04:39:48.000Z
|
2019-08-14T04:39:48.000Z
|
import time
from fabric.api import run, env, task, put, cd, local, sudo
env.use_ssh_config = True
env.hosts = ['iota_node']
| 24.222222
| 113
| 0.639908
|
54da3dc2f38e9f403fcf4bc41db3259f59c8f372
| 1,763
|
py
|
Python
|
features.py
|
ptorresmanque/MachineLearning_v2.0
|
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
|
[
"MIT"
] | null | null | null |
features.py
|
ptorresmanque/MachineLearning_v2.0
|
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
|
[
"MIT"
] | null | null | null |
features.py
|
ptorresmanque/MachineLearning_v2.0
|
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
|
[
"MIT"
] | null | null | null |
import sqlite3
from random import randint, choice
import numpy as np
conn = sqlite3.connect('ej.db')
c = conn.cursor()
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO#
c.execute('SELECT MAX(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMax = resultado[0]
c.execute('SELECT MIN(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMin = resultado[0]
altoProm = abs((altoMax + altoMin) / 2)
#print altoMax , altoProm , altoMin
arrAlto = [altoMax , altoProm , altoMin]
c.execute('SELECT MAX(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMax = resultado[0]
c.execute('SELECT MIN(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMin = resultado[0]
anchoProm = abs((anchoMax + anchoMin) / 2)
anchoMaxProm = abs((anchoMax + anchoProm) / 2)
anchoMinProm = abs((anchoMin + anchoProm) / 2)
arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin]
#### CREANDO CLASES NEGATIVAS
for i in range(0,3):
for j in range(0,5):
for _ in range(10):
negAncho = arrAncho[j]
negAlto = arrAlto[i]
rand_alto_max = int(negAlto * 1.5)
rand_alto_min = int(negAlto * 0.5)
r3 = rand_alto_max * 2
rand_ancho_max = int(negAncho*1.5)
rand_ancho_min = int(negAncho*0.5)
r33 = rand_ancho_max * 2
f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)])
f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)])
c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)",
(f2, f1, f2*f1, 0))
conn.commit()
conn.close()
| 23.506667
| 103
| 0.640953
|
54da935d3d5cf04aac496677e269b59710d17100
| 5,503
|
py
|
Python
|
dev/ideas/cython/playing_around.py
|
achilleas-k/brian2
|
906563b6b1321585b082f79f74f1b4ab386347ec
|
[
"BSD-2-Clause"
] | null | null | null |
dev/ideas/cython/playing_around.py
|
achilleas-k/brian2
|
906563b6b1321585b082f79f74f1b4ab386347ec
|
[
"BSD-2-Clause"
] | null | null | null |
dev/ideas/cython/playing_around.py
|
achilleas-k/brian2
|
906563b6b1321585b082f79f74f1b4ab386347ec
|
[
"BSD-2-Clause"
] | null | null | null |
from pylab import *
import cython
import time, timeit
from brian2.codegen.runtime.cython_rt.modified_inline import modified_cython_inline
import numpy
from scipy import weave
import numexpr
import theano
from theano import tensor as tt
tau = 20 * 0.001
N = 1000000
b = 1.2 # constant current mean, the modulation varies
freq = 10.0
t = 0.0
dt = 0.0001
_array_neurongroup_a = a = linspace(.05, 0.75, N)
_array_neurongroup_v = v = rand(N)
ns = {'_array_neurongroup_a': a, '_array_neurongroup_v': v,
'_N': N,
'dt': dt, 't': t, 'tau': tau, 'b': b, 'freq': freq,# 'sin': numpy.sin,
'pi': pi,
}
code = '''
cdef int _idx
cdef int _vectorisation_idx
cdef int N = <int>_N
cdef double a, v, _v
#cdef double [:] _cy_array_neurongroup_a = _array_neurongroup_a
#cdef double [:] _cy_array_neurongroup_v = _array_neurongroup_v
cdef double* _cy_array_neurongroup_a = &(_array_neurongroup_a[0])
cdef double* _cy_array_neurongroup_v = &(_array_neurongroup_v[0])
for _idx in range(N):
_vectorisation_idx = _idx
a = _cy_array_neurongroup_a[_idx]
v = _cy_array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
#_v = a*b+0.0001*sin(v)
#_v = a*b+0.0001*v
v = _v
_cy_array_neurongroup_v[_idx] = v
'''
f_mod, f_arg_list = modified_cython_inline(code, locals=ns, globals={})
# return theano.function([a, v],
# a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
theano.config.gcc.cxxflags = '-O3 -ffast-math'
theano_func = get_theano_func()
#print theano.pp(theano_func.maker.fgraph.outputs[0])
#print
#theano.printing.debugprint(theano_func.maker.fgraph.outputs[0])
#theano.printing.pydotprint(theano_func, 'func.png')
#exit()
if __name__=='__main__':
funcs = [#timefunc_cython_inline,
timefunc_cython_modified_inline,
timefunc_numpy,
timefunc_numpy_smart,
timefunc_numpy_blocked,
timefunc_numexpr,
timefunc_numexpr_smart,
timefunc_weave_slow,
timefunc_weave_fast,
timefunc_theano,
]
if 1:
print 'Values'
print '======'
for f in funcs:
check_values(f)
print
if 1:
print 'Times'
print '====='
for f in funcs:
dotimeit(f)
| 30.743017
| 125
| 0.589678
|
54db106024a4f46cf548821fe280245ccaf57da7
| 114
|
py
|
Python
|
azbankgateways/views/__init__.py
|
lordmahyar/az-iranian-bank-gateways
|
e9eb7101f2b91318847d63d783c22c4a8d430ba3
|
[
"MIT"
] | 196
|
2020-12-07T11:29:19.000Z
|
2022-03-23T09:32:56.000Z
|
azbankgateways/views/__init__.py
|
lordmahyar/az-iranian-bank-gateways
|
e9eb7101f2b91318847d63d783c22c4a8d430ba3
|
[
"MIT"
] | 25
|
2021-01-13T11:56:35.000Z
|
2022-03-14T19:41:51.000Z
|
azbankgateways/views/__init__.py
|
lordmahyar/az-iranian-bank-gateways
|
e9eb7101f2b91318847d63d783c22c4a8d430ba3
|
[
"MIT"
] | 44
|
2021-01-08T18:27:47.000Z
|
2022-03-22T03:36:04.000Z
|
from .banks import callback_view, go_to_bank_gateway
from .samples import sample_payment_view, sample_result_view
| 38
| 60
| 0.877193
|
54db89c835de6895b4c1b46df78297a288ccdb1f
| 3,254
|
py
|
Python
|
dev/unittest/update.py
|
PowerDNS/exabgp
|
bbf69f25853e10432fbe588b5bc2f8d9f1e5dda2
|
[
"BSD-3-Clause"
] | 8
|
2015-01-11T09:57:26.000Z
|
2019-07-05T05:57:02.000Z
|
dev/unittest/update.py
|
Acidburn0zzz/exabgp
|
bbf69f25853e10432fbe588b5bc2f8d9f1e5dda2
|
[
"BSD-3-Clause"
] | 1
|
2018-11-15T22:10:09.000Z
|
2018-11-15T22:20:31.000Z
|
dev/unittest/update.py
|
Acidburn0zzz/exabgp
|
bbf69f25853e10432fbe588b5bc2f8d9f1e5dda2
|
[
"BSD-3-Clause"
] | 6
|
2015-09-11T01:51:06.000Z
|
2020-03-10T19:16:18.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
update.py
Created by Thomas Mangin on 2009-09-06.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import unittest
from exabgp.configuration.environment import environment
env = environment.setup('')
from exabgp.bgp.message.update.update import *
from exabgp.bgp.message.update.attribute.community import to_Community
from exabgp.bgp.message.update.attribute.community import Community, Communities
# def test_2_ipv4_broken (self):
# header = ''.join([chr(c) for c in h])
# message = ''.join([chr(c) for c in m])
# message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xf, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x18, 0xa, 0x0, 0x1]])
# update = new_Update(message)
if __name__ == '__main__':
unittest.main()
| 38.282353
| 313
| 0.667486
|
54dbf6330b24d0c6aff3e7ee1c31934c49d43385
| 12,082
|
py
|
Python
|
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
|
1555692eceb6b85127d21cd43e6fc780b7f91ffd
|
[
"Apache-2.0"
] | null | null | null |
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
|
1555692eceb6b85127d21cd43e6fc780b7f91ffd
|
[
"Apache-2.0"
] | 1
|
2019-04-24T12:14:59.000Z
|
2019-04-24T12:14:59.000Z
|
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
|
1555692eceb6b85127d21cd43e6fc780b7f91ffd
|
[
"Apache-2.0"
] | null | null | null |
# nuScenes dev-kit.
# Code written by Holger Caesar & Oscar Beijbom, 2018.
# Licensed under the Creative Commons [see licence.txt]
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
from nuscenes import NuScenes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes
from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_name', type=str, default='cvpr_2019',
help='Name of the configuration to use for evaluation, e.g. cvpr_2019.')
parser.add_argument('--plot_examples', type=int, default=10,
help='How many example visualizations to write to disk.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render PR and TP curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_name_ = args.config_name
plot_examples_ = args.plot_examples
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
cfg_ = config_factory(config_name_)
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
output_dir=output_dir_, verbose=verbose_)
nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
| 45.421053
| 120
| 0.630525
|
54dcf21edb2556756e4c18e431858f02788f9d3a
| 9,520
|
py
|
Python
|
tests/get_problem_atcoder.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | null | null | null |
tests/get_problem_atcoder.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | null | null | null |
tests/get_problem_atcoder.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | null | null | null |
import unittest
from onlinejudge_api.main import main
| 36.615385
| 157
| 0.411029
|
54dcf64898b0684c67b6786b86aa9adc1e8b99c7
| 681
|
py
|
Python
|
odm/libexec/odm_tenant.py
|
UMCollab/ODM
|
95da49939dbcd54318a58a132aa76725fd9c0b5f
|
[
"MIT"
] | 2
|
2019-04-26T13:26:02.000Z
|
2019-10-18T10:36:52.000Z
|
odm/libexec/odm_tenant.py
|
flowerysong/ODM
|
95da49939dbcd54318a58a132aa76725fd9c0b5f
|
[
"MIT"
] | 1
|
2020-10-28T00:38:07.000Z
|
2020-10-28T00:38:07.000Z
|
odm/libexec/odm_tenant.py
|
flowerysong/ODM
|
95da49939dbcd54318a58a132aa76725fd9c0b5f
|
[
"MIT"
] | 1
|
2019-02-21T16:41:24.000Z
|
2019-02-21T16:41:24.000Z
|
#!/usr/bin/env python3
# This file is part of ODM and distributed under the terms of the
# MIT license. See COPYING.
import json
import sys
import odm.cli
if __name__ == '__main__':
main()
| 21.28125
| 79
| 0.638767
|
54dde115e15519f27b695b4a4ec6e5589e225fb7
| 17,182
|
py
|
Python
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
|
53c917a1a2491a373efa23e4ef8570b5e863fabc
|
[
"Apache-2.0"
] | 74
|
2015-12-25T09:43:18.000Z
|
2022-03-30T00:23:30.000Z
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
|
53c917a1a2491a373efa23e4ef8570b5e863fabc
|
[
"Apache-2.0"
] | 184
|
2016-11-23T15:57:16.000Z
|
2022-03-15T05:25:59.000Z
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
|
53c917a1a2491a373efa23e4ef8570b5e863fabc
|
[
"Apache-2.0"
] | 98
|
2015-12-13T12:20:34.000Z
|
2022-03-18T15:28:35.000Z
|
# Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import TestCase
import spdx
from spdx.parsers.tagvalue import Parser
from spdx.parsers.lexers.tagvalue import Lexer
from spdx.parsers.tagvaluebuilders import Builder
from spdx.parsers.loggers import StandardLogger
from spdx.version import Version
| 49.091429
| 178
| 0.650506
|
54df90a5374a87e257978dcb4c0e1caa9abfa7f7
| 2,024
|
py
|
Python
|
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
|
dd3e6e29251000946e34d80704c040b5bcad7f8e
|
[
"MIT"
] | null | null | null |
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
|
dd3e6e29251000946e34d80704c040b5bcad7f8e
|
[
"MIT"
] | null | null | null |
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
|
dd3e6e29251000946e34d80704c040b5bcad7f8e
|
[
"MIT"
] | 3
|
2020-02-27T13:45:19.000Z
|
2020-03-26T13:38:17.000Z
|
# Standard library imports
from subprocess import call as subprocess_call
from utility import fileexists
from time import sleep as time_sleep
from datetime import datetime
mount_try = 1
not_yet = True
done = False
start_time = datetime.now()
if fileexists("/home/rpi4-sftp/usb/drive_present.txt"):
when_usba = 0
else:
when_usba = -1
if fileexists("/home/duck-sftp/usb/drive_present.txt"):
when_usbb = 0
else:
when_usbb = -1
if fileexists("/home/pi/mycloud/drive_present.txt"):
when_mycloud = 0
else:
when_mycloud = -1
while (mount_try < 30) and not_yet:
try:
usba_mounted = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted and usbb_mounted and mycloud_mounted):
print("Something Needs mounting this is try number: ", mount_try)
subprocess_call(["sudo", "mount", "-a"])
mount_try += 1
usba_mounted_after = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted_after = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted_after = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted) and usba_mounted_after:
when_usba = round((datetime.now() - start_time).total_seconds(),2)
if not(usbb_mounted) and usbb_mounted_after:
when_usbb = round((datetime.now() - start_time).total_seconds(),2)
if not(mycloud_mounted) and mycloud_mounted_after:
when_mycloud = round((datetime.now() - start_time).total_seconds(),2)
if usba_mounted_after and usbb_mounted_after and mycloud_mounted_after:
print("Success at :",when_usba,when_usbb,when_mycloud, " secs from start")
not_yet = False
done = True
except:
print("Count: ", count," error")
time_sleep(1)
if done:
print("Great!")
else:
print("Failed to do all or drive_present.txt file not present; Times :",when_usba,when_usbb,when_mycloud)
while True:
time_sleep(20000)
| 36.142857
| 107
| 0.733202
|
54e0817402b9c2ce35c6af23684ce91b4042e10a
| 5,639
|
py
|
Python
|
home/views.py
|
Kshitij-Kumar-Singh-Chauhan/docon
|
bff0547e7bbd030e027217a2ca7800a8da529b56
|
[
"MIT"
] | null | null | null |
home/views.py
|
Kshitij-Kumar-Singh-Chauhan/docon
|
bff0547e7bbd030e027217a2ca7800a8da529b56
|
[
"MIT"
] | null | null | null |
home/views.py
|
Kshitij-Kumar-Singh-Chauhan/docon
|
bff0547e7bbd030e027217a2ca7800a8da529b56
|
[
"MIT"
] | 2
|
2021-06-17T05:35:07.000Z
|
2021-06-17T06:01:23.000Z
|
from django.http.response import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect, render
from cryptography.fernet import Fernet
from .models import Book, UserDetails
from .models import Contact
from .models import Book
from .models import Report
from .models import Diagnostic
from datetime import datetime
# Create your views here.
# def index(request):
# context={ 'alpha': 'This is sent'}
# if request.method=='POST':
# pass
# else: return render(request, 'index.html',context)
#HttpResponse('This is homepage')
# def appointment(request,email,name):
# if request.method == "POST":
# problem = request.POST.get('problem')
# book = Appoint(problem=problem, email=email, name=name)
# book.save()
# return render(request,"index.html")
| 33.565476
| 124
| 0.567477
|
54e0ed7eefaaeac2cfcbec8d464ffc806c518afa
| 9,892
|
py
|
Python
|
compressor/tests/templatetags.py
|
bigmlcom/django_compressor
|
66dfda503633018275fdb64ad46ef80dc9a3901d
|
[
"Apache-2.0"
] | null | null | null |
compressor/tests/templatetags.py
|
bigmlcom/django_compressor
|
66dfda503633018275fdb64ad46ef80dc9a3901d
|
[
"Apache-2.0"
] | null | null | null |
compressor/tests/templatetags.py
|
bigmlcom/django_compressor
|
66dfda503633018275fdb64ad46ef80dc9a3901d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import with_statement
import os
import sys
from mock import Mock
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from compressor.conf import settings
from compressor.signals import post_compress
from compressor.tests.base import css_tag, test_dir
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
def script(content="", src="", scripttype="text/javascript"):
"""
returns a unicode text html script element.
>>> script('#this is a comment', scripttype="text/applescript")
'<script type="text/applescript">#this is a comment</script>'
"""
out_script = u'<script '
if scripttype:
out_script += u'type="%s" ' % scripttype
if src:
out_script += u'src="%s" ' % src
return out_script[:-1] + u'>%s</script>' % content
| 41.563025
| 107
| 0.616761
|
54e0f7ad3e850fa6d21aab5200a2493a26332352
| 3,324
|
py
|
Python
|
cle/cle/backends/relocations/generic.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
cle/cle/backends/relocations/generic.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
cle/cle/backends/relocations/generic.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
from ...address_translator import AT
from ...errors import CLEOperationError
from . import Relocation
import struct
import logging
l = logging.getLogger('cle.relocations.generic')
| 36.130435
| 117
| 0.666968
|
54e179a25d793c478f7e42c99a00025d13aed6d0
| 1,438
|
py
|
Python
|
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010-2014 openpyxl
import pytest
from openpyxl.styles.borders import Border, Side
from openpyxl.styles.fills import GradientFill
from openpyxl.styles.colors import Color
from openpyxl.writer.styles import StyleWriter
from openpyxl.tests.helper import get_xml, compare_xml
| 25.678571
| 78
| 0.684284
|
54e1fce9e0db363710daf71e66104aba025bc831
| 477
|
py
|
Python
|
ringapp/migrations/0009_auto_20150116_1759.py
|
rschwiebert/RingApp
|
35675b3dd81728d71b7dc70071be3185d7f99bf4
|
[
"MIT"
] | 10
|
2015-02-02T12:40:05.000Z
|
2022-01-29T14:11:03.000Z
|
ringapp/migrations/0009_auto_20150116_1759.py
|
rschwiebert/RingApp
|
35675b3dd81728d71b7dc70071be3185d7f99bf4
|
[
"MIT"
] | 22
|
2015-01-07T21:29:24.000Z
|
2022-03-19T01:15:13.000Z
|
ringapp/migrations/0009_auto_20150116_1759.py
|
rschwiebert/RingApp
|
35675b3dd81728d71b7dc70071be3185d7f99bf4
|
[
"MIT"
] | 1
|
2016-08-07T15:41:51.000Z
|
2016-08-07T15:41:51.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 20.73913
| 47
| 0.589099
|
54e218f734c2d85cbff6df8c45d35331a499ae96
| 654
|
py
|
Python
|
front-end/testsuite-python-lib/Python-3.1/Lib/json/tests/test_dump.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
Lib/json/tests/test_dump.py
|
orestis/python
|
870a82aac7788ffa105e2a3e4480b3715c93bff6
|
[
"PSF-2.0"
] | null | null | null |
Lib/json/tests/test_dump.py
|
orestis/python
|
870a82aac7788ffa105e2a3e4480b3715c93bff6
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
from unittest import TestCase
from io import StringIO
import json
| 29.727273
| 69
| 0.547401
|
54e3b8446107d9bccd2d0bc314395d7a3117387b
| 7,069
|
py
|
Python
|
src/resources/clients/python_client/visitstate.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226
|
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/resources/clients/python_client/visitstate.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100
|
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/resources/clients/python_client/visitstate.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84
|
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
import sys
| 34.651961
| 54
| 0.660914
|
54e459da47af69f9dc842497504519a50554986e
| 774
|
py
|
Python
|
tests/__init__.py
|
zhangyiming07/QT4C
|
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
|
[
"BSD-3-Clause"
] | 53
|
2020-02-20T06:56:03.000Z
|
2022-03-03T03:09:25.000Z
|
tests/__init__.py
|
zhangyiming07/QT4C
|
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
|
[
"BSD-3-Clause"
] | 6
|
2020-03-03T03:15:53.000Z
|
2021-01-29T02:24:06.000Z
|
tests/__init__.py
|
zhangyiming07/QT4C
|
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
|
[
"BSD-3-Clause"
] | 17
|
2020-02-26T03:51:41.000Z
|
2022-03-24T02:23:51.000Z
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''
'''
import unittest
import os
import sys
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(test_dir))
if __name__ == '__main__':
main()
| 28.666667
| 103
| 0.719638
|
54e639174a97601933059aabae1c3acdb2b90d00
| 323
|
py
|
Python
|
brute/brute_build.py
|
sweetsbeats/starter-snake-python
|
e7cb56a3a623a324f4b5ef956020990e8c61f871
|
[
"MIT"
] | null | null | null |
brute/brute_build.py
|
sweetsbeats/starter-snake-python
|
e7cb56a3a623a324f4b5ef956020990e8c61f871
|
[
"MIT"
] | null | null | null |
brute/brute_build.py
|
sweetsbeats/starter-snake-python
|
e7cb56a3a623a324f4b5ef956020990e8c61f871
|
[
"MIT"
] | 2
|
2019-05-05T00:41:26.000Z
|
2019-05-05T00:46:45.000Z
|
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("""
int test(int t);
""")
ffibuilder.set_source("_pi_cffi",
"""
#include "brute.h"
""",
sources=['brute.c'])
if __name__ == "__main__":
ffibuilder.compile(verbose = True)
| 19
| 42
| 0.479876
|
54e64db782245fc204cf4d668f6d515f9131a03b
| 2,392
|
py
|
Python
|
src/board.py
|
JNotelddim/python-snake
|
da95339d3a982040a84422e5f7b95453095a4450
|
[
"MIT"
] | null | null | null |
src/board.py
|
JNotelddim/python-snake
|
da95339d3a982040a84422e5f7b95453095a4450
|
[
"MIT"
] | null | null | null |
src/board.py
|
JNotelddim/python-snake
|
da95339d3a982040a84422e5f7b95453095a4450
|
[
"MIT"
] | null | null | null |
"""Board Module"""
import copy
from typing import Tuple, List
from src.coordinate import Coordinate
from src.snake import Snake
def get_other_snakes(self, exclude_id) -> List[Snake]:
"""Get the List of Snakes whose IDs don't match the given ID."""
return [snake for snake in self.snakes if snake.id != exclude_id]
def advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Return a new board with our snake advanced along given path."""
new_board = copy.deepcopy(self)
return new_board.__help_advance_snake_along_path(snake_id, path)
def __help_advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Do the actual advancement of the snake along the path."""
me = next((snake for snake in self.snakes if snake.id == snake_id), None)
if not me:
raise ValueError("No snake for given id!")
me.coordinates += path
me.coordinates = me.coordinates[len(path):]
me.coordinates.reverse()
me.coordinates.append(me.coordinates[-1])
print("new coords:")
for coord in me.coordinates:
print(coord)
return self
| 37.375
| 85
| 0.633361
|
54e781207e20bd9e8679af88a83847cfe7947287
| 2,349
|
py
|
Python
|
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
|
340294300f93d12cabc59b055ff2548df8f4081a
|
[
"MIT"
] | null | null | null |
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
|
340294300f93d12cabc59b055ff2548df8f4081a
|
[
"MIT"
] | 1
|
2022-03-15T23:48:51.000Z
|
2022-03-15T23:48:51.000Z
|
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
|
340294300f93d12cabc59b055ff2548df8f4081a
|
[
"MIT"
] | null | null | null |
import os
import zipfile
from typing import List
import pandas as pd
import urllib
from personalized_nlp.settings import STORAGE_DIR
from personalized_nlp.utils.data_splitting import split_texts
from personalized_nlp.datasets.datamodule_base import BaseDataModule
| 32.178082
| 96
| 0.638995
|
54e789caffaeff5bc10488464b0b5f0c11ea3f0e
| 522
|
py
|
Python
|
App/migrations/0010_remove_user_percentage_preferences_user_preferences.py
|
dlanghorne0428/StudioMusicPlayer
|
54dabab896b96d90b68d6435edfd52fe6a866bc2
|
[
"MIT"
] | null | null | null |
App/migrations/0010_remove_user_percentage_preferences_user_preferences.py
|
dlanghorne0428/StudioMusicPlayer
|
54dabab896b96d90b68d6435edfd52fe6a866bc2
|
[
"MIT"
] | 44
|
2022-01-21T01:33:59.000Z
|
2022-03-26T23:35:25.000Z
|
App/migrations/0010_remove_user_percentage_preferences_user_preferences.py
|
dlanghorne0428/StudioMusicPlayer
|
54dabab896b96d90b68d6435edfd52fe6a866bc2
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0 on 2022-03-03 02:15
from django.db import migrations, models
| 22.695652
| 71
| 0.597701
|
54e901540b5f6fa6fc62f5e51511aa0c656882ca
| 3,653
|
py
|
Python
|
venv/Lib/site-packages/captcha/conf/settings.py
|
Rudeus3Greyrat/admin-management
|
7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23
|
[
"MIT"
] | 1
|
2020-05-21T06:48:34.000Z
|
2020-05-21T06:48:34.000Z
|
venv/Lib/site-packages/captcha/conf/settings.py
|
Rudeus3Greyrat/admin-management
|
7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23
|
[
"MIT"
] | 3
|
2021-03-19T03:07:36.000Z
|
2021-04-08T20:33:38.000Z
|
venv/Lib/site-packages/captcha/conf/settings.py
|
Rudeus3Greyrat/admin-management
|
7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23
|
[
"MIT"
] | 1
|
2020-05-21T06:48:36.000Z
|
2020-05-21T06:48:36.000Z
|
import os
import warnings
from django.conf import settings
CAPTCHA_FONT_PATH = getattr(settings, 'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf')))
CAPTCHA_FONT_SIZE = getattr(settings, 'CAPTCHA_FONT_SIZE', 22)
CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35, 35))
CAPTCHA_BACKGROUND_COLOR = getattr(settings, 'CAPTCHA_BACKGROUND_COLOR', '#ffffff')
CAPTCHA_FOREGROUND_COLOR = getattr(settings, 'CAPTCHA_FOREGROUND_COLOR', '#001100')
CAPTCHA_CHALLENGE_FUNCT = getattr(settings, 'CAPTCHA_CHALLENGE_FUNCT', 'captcha.helpers.random_char_challenge')
CAPTCHA_NOISE_FUNCTIONS = getattr(settings, 'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots',))
CAPTCHA_FILTER_FUNCTIONS = getattr(settings, 'CAPTCHA_FILTER_FUNCTIONS', ('captcha.helpers.post_smooth',))
CAPTCHA_WORDS_DICTIONARY = getattr(settings, 'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words')
CAPTCHA_PUNCTUATION = getattr(settings, 'CAPTCHA_PUNCTUATION', '''_"',.;:-''')
CAPTCHA_FLITE_PATH = getattr(settings, 'CAPTCHA_FLITE_PATH', None)
CAPTCHA_SOX_PATH = getattr(settings, 'CAPTCHA_SOX_PATH', None)
CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes
CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars
# CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True)
CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MIN_LENGTH', 0)
CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MAX_LENGTH', 99)
CAPTCHA_IMAGE_SIZE = getattr(settings, 'CAPTCHA_IMAGE_SIZE', None)
CAPTCHA_IMAGE_TEMPLATE = getattr(settings, 'CAPTCHA_IMAGE_TEMPLATE', 'captcha/image.html')
CAPTCHA_HIDDEN_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_HIDDEN_FIELD_TEMPLATE', 'captcha/hidden_field.html')
CAPTCHA_TEXT_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_TEXT_FIELD_TEMPLATE', 'captcha/text_field.html')
if getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None):
msg = ("CAPTCHA_FIELD_TEMPLATE setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None)
if getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None):
msg = ("CAPTCHA_OUTPUT_FORMAT setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None)
CAPTCHA_MATH_CHALLENGE_OPERATOR = getattr(settings, 'CAPTCHA_MATH_CHALLENGE_OPERATOR', '*')
CAPTCHA_GET_FROM_POOL = getattr(settings, 'CAPTCHA_GET_FROM_POOL', False)
CAPTCHA_GET_FROM_POOL_TIMEOUT = getattr(settings, 'CAPTCHA_GET_FROM_POOL_TIMEOUT', 5)
CAPTCHA_TEST_MODE = getattr(settings, 'CAPTCHA_TEST_MODE', False)
# Failsafe
if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH:
CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH
| 52.942029
| 141
| 0.800712
|
54ea3d9d70532f8dc30f4d5946975cecc10f6326
| 11,009
|
py
|
Python
|
pilbox/test/app_test.py
|
joevandyk/pilbox
|
b84732a78e5bdb2d24bf7ef4177d45806ac03ea6
|
[
"Apache-2.0"
] | null | null | null |
pilbox/test/app_test.py
|
joevandyk/pilbox
|
b84732a78e5bdb2d24bf7ef4177d45806ac03ea6
|
[
"Apache-2.0"
] | null | null | null |
pilbox/test/app_test.py
|
joevandyk/pilbox
|
b84732a78e5bdb2d24bf7ef4177d45806ac03ea6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, \
with_statement
import logging
import os.path
import time
import tornado.escape
import tornado.gen
import tornado.ioloop
from tornado.test.util import unittest
from tornado.testing import AsyncHTTPTestCase, gen_test
import tornado.web
from pilbox.app import PilboxApplication
from pilbox.errors import SignatureError, ClientError, HostError, \
BackgroundError, DimensionsError, FilterError, FormatError, ModeError, \
PositionError, QualityError, UrlError, ImageFormatError, FetchError
from pilbox.signature import sign
from pilbox.test import image_test
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import cv
except ImportError:
cv = None
logger = logging.getLogger("tornado.application")
| 39.887681
| 79
| 0.606504
|
54eaca929e4c45b157fe05142cabf897db4cf571
| 1,202
|
py
|
Python
|
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
|
68a2cedccae694eb84880f3aa55cc01d458e055e
|
[
"WTFPL"
] | 6
|
2017-08-09T09:41:42.000Z
|
2021-04-22T05:10:17.000Z
|
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
|
68a2cedccae694eb84880f3aa55cc01d458e055e
|
[
"WTFPL"
] | null | null | null |
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
|
68a2cedccae694eb84880f3aa55cc01d458e055e
|
[
"WTFPL"
] | 5
|
2015-11-04T12:57:10.000Z
|
2020-10-18T17:32:25.000Z
|
#!/usr/bin/env python
from scipy import *
from pylab import *
#from pylab import imshow
#!
#! Some graphical explorations of the Julia sets with python and pyreport
#!#########################################################################
#$
#$ We start by defining a function J:
#$ \[ J_c : z \rightarrow z^2 + c \]
#$
[x,y] = ogrid[ -1:1:0.002, -1:1:0.002 ]
z = x + y *1j
#! If we study the divergence of function J under repeated iteration
#! depending on its inital conditions we get a very pretty graph
threshTime = zeros_like(z)
for i in range(40):
z = J(0.285)(z)
threshTime += z*conj(z) > 4
figure(0)
axes([0,0,1,1])
axis('off')
imshow(threshTime)
bone()
show()
#! We can also do that systematicaly for other values of c:
axes([0,0,1,1])
axis('off')
rcParams.update({'figure.figsize': [10.5,5]})
c_values = (0.285 + 0.013j, 0.45 - 0.1428j, -0.70176 -0.3842j,
-0.835-0.2321j, -0.939 +0.167j, -0.986+0.87j)
for i,c in enumerate(c_values):
threshTime = zeros_like(z)
z = x + y *1j
for n in range(40):
z = J(c)(z)
threshTime += z*conj(z) > 4
subplot(2,3,i+1)
imshow(threshTime)
axis('off')
show()
| 26.130435
| 75
| 0.584859
|
54ec18e7d2fb320aa765697469037a76c03cbf50
| 535
|
py
|
Python
|
resources/migrations/0126_add_field_disallow_overlapping_reservations_per_user.py
|
codepointtku/respa
|
bb9cd8459d5562569f976dbc609ec41ceecc8023
|
[
"MIT"
] | 1
|
2019-12-17T10:02:17.000Z
|
2019-12-17T10:02:17.000Z
|
resources/migrations/0126_add_field_disallow_overlapping_reservations_per_user.py
|
codepointtku/respa
|
bb9cd8459d5562569f976dbc609ec41ceecc8023
|
[
"MIT"
] | 38
|
2020-01-24T11:30:53.000Z
|
2022-01-28T12:42:47.000Z
|
resources/migrations/0126_add_field_disallow_overlapping_reservations_per_user.py
|
digipointtku/respa
|
a529e0df4d3f072df7801adb5bf97a5f4abd1243
|
[
"MIT"
] | 14
|
2020-02-26T08:17:34.000Z
|
2021-09-14T07:57:21.000Z
|
# Generated by Django 2.2.21 on 2021-06-23 12:43
from django.db import migrations, models
import django.db.models.deletion
| 26.75
| 126
| 0.676636
|
54eceeb38625ac7f7302479b3298ad5a3adabd40
| 1,307
|
py
|
Python
|
src/lora_multihop/module_config.py
|
marv1913/lora_multihop
|
ef07493c2f763d07161fa25d4b884ef79b94afa4
|
[
"MIT"
] | null | null | null |
src/lora_multihop/module_config.py
|
marv1913/lora_multihop
|
ef07493c2f763d07161fa25d4b884ef79b94afa4
|
[
"MIT"
] | 1
|
2022-02-20T13:18:13.000Z
|
2022-02-24T18:32:23.000Z
|
src/lora_multihop/module_config.py
|
marv1913/lora_multihop
|
ef07493c2f763d07161fa25d4b884ef79b94afa4
|
[
"MIT"
] | null | null | null |
import logging
from lora_multihop import serial_connection, variables
| 39.606061
| 101
| 0.746748
|
54ed860d4a6171f4dc1581a63c75ee95835b9b75
| 6,287
|
py
|
Python
|
eris/script/ferdian.py
|
ferdianap/Eris_test
|
c2a00d65f816ad6d48a65c14b4bea4f3d081b86b
|
[
"BSD-3-Clause"
] | 1
|
2015-06-12T04:38:09.000Z
|
2015-06-12T04:38:09.000Z
|
eris/script/ferdian.py
|
ferdianap/eris
|
c2a00d65f816ad6d48a65c14b4bea4f3d081b86b
|
[
"BSD-3-Clause"
] | null | null | null |
eris/script/ferdian.py
|
ferdianap/eris
|
c2a00d65f816ad6d48a65c14b4bea4f3d081b86b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
copied from
Baxter RSDK Joint Position Example: file playback
"""
from __future__ import print_function
import sys
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
import glob
from std_srvs.srv import Empty
def clean_line(line, names):
"""
Cleans a single line of recorded joint positions
@param line: the line described in a list to process
@param names: joint name keys
"""
#convert the line of strings to a float or None
line = [try_float(x) for x in line.rstrip().split(',')]
#zip the values with the joint names
combined = zip(names[1:], line[1:])
#take out any tuples that have a none value
cleaned = [x for x in combined if x[1] is not None]
#convert it to a dictionary with only valid commands
command = dict(cleaned)
left_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'left_')
right_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'right_')
return (command, left_command, right_command, line)
def map_file(filename, loops=1):
"""
Loops through csv file
@param filename: the file to play
@param loops: number of times to loop
values < 0 mean 'infinite'
Does not loop indefinitely, but only until the file is read
and processed. Reads each line, split up in columns and
formats each line into a controller command in the form of
name/value pairs. Names come from the column headers
first column is the time stamp
"""
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
rate = rospy.Rate(1000)
if grip_left.error():
grip_left.reset()
if grip_right.error():
grip_right.reset()
if (not grip_left.calibrated() and
grip_left.type() != 'custom'):
grip_left.calibrate()
if (not grip_right.calibrated() and
grip_right.type() != 'custom'):
grip_right.calibrate()
print("Playing back: %s" % (filename,))
with open(filename, 'r') as f:
lines = f.readlines()
keys = lines[0].rstrip().split(',')
l = 0
# If specified, repeat the file playback 'loops' number of times
while loops < 1 or l < loops:
i = 0
l += 1
print("Moving to start position...")
_cmd, lcmd_start, rcmd_start, _raw = clean_line(lines[1], keys)
left.move_to_joint_positions(lcmd_start)
right.move_to_joint_positions(rcmd_start)
start_time = rospy.get_time()
for values in lines[1:]:
i += 1
loopstr = str(loops) if loops > 0 else "forever"
sys.stdout.write("\r Record %d of %d, loop %d of %s" %
(i, len(lines) - 1, l, loopstr))
sys.stdout.flush()
cmd, lcmd, rcmd, values = clean_line(values, keys)
#command this set of commands until the next frame
while (rospy.get_time() - start_time) < values[0]:
if rospy.is_shutdown():
print("\n Aborting - ROS shutdown")
return False
if len(lcmd):
left.set_joint_positions(lcmd)
if len(rcmd):
right.set_joint_positions(rcmd)
if ('left_gripper' in cmd and
grip_left.type() != 'custom'):
grip_left.command_position(cmd['left_gripper'])
if ('right_gripper' in cmd and
grip_right.type() != 'custom'):
grip_right.command_position(cmd['right_gripper'])
rate.sleep()
print
return True
###
if __name__ == '__main__':
main()
| 34.543956
| 77
| 0.655798
|
54f048a7a0b7d058cdc56c1d7f2c7462bde0f3d6
| 4,461
|
py
|
Python
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 3,301
|
2018-10-01T16:30:44.000Z
|
2022-03-30T08:07:16.000Z
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 206
|
2019-11-27T14:04:42.000Z
|
2022-03-28T08:02:05.000Z
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 765
|
2018-10-09T02:02:19.000Z
|
2022-03-31T12:06:21.000Z
|
import abc
from typing import Dict, Callable
import tensorflow as tf
from flink_ml_framework.context import Context
from flink_ml_framework.java_file import *
from ..runner import tf_helper, io_helper
from ..runner.output_writer import DirectOutputWriter
try:
from flink_ml_tensorflow.tensorflow_context import TFContext
except:
from flink_ml_tensorflow2.tensorflow_context import TFContext
# noinspection PyUnresolvedReferences
from tensorflow_io.core.python.ops import core_ops
__all__ = ['TF1_TYPE', 'TF2_TYPE']
TF1_TYPE = 'tf1'
TF2_TYPE = 'tf2'
| 36.867769
| 110
| 0.647837
|
54f164400ecea40c3dfdfcd5317d3f9f381a79ff
| 12,450
|
py
|
Python
|
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
|
2c5540de36166c81832c1ccd0ee40c52e598e05c
|
[
"MIT"
] | 1
|
2021-03-25T01:21:19.000Z
|
2021-03-25T01:21:19.000Z
|
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
|
2c5540de36166c81832c1ccd0ee40c52e598e05c
|
[
"MIT"
] | null | null | null |
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
|
2c5540de36166c81832c1ccd0ee40c52e598e05c
|
[
"MIT"
] | null | null | null |
import pytest
ENCODING = 'utf-8'
| 29.294118
| 110
| 0.67245
|
54f3bbb19576152c565203e49a32298c3f423ec9
| 6,337
|
py
|
Python
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
|
c0c29c0463dccc6ad286bd59e77993fdf0d05fb2
|
[
"RSA-MD"
] | 2
|
2017-12-15T23:10:11.000Z
|
2018-05-07T04:18:03.000Z
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
|
c0c29c0463dccc6ad286bd59e77993fdf0d05fb2
|
[
"RSA-MD"
] | 1
|
2018-02-26T06:23:32.000Z
|
2018-02-27T03:34:01.000Z
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
|
c0c29c0463dccc6ad286bd59e77993fdf0d05fb2
|
[
"RSA-MD"
] | 2
|
2017-10-19T21:50:24.000Z
|
2018-01-01T03:40:35.000Z
|
import json
import os
from utilities.SaveLoadJson import SaveLoadJson as SLJ
from utilities.LineCount import LineCount as LC
import subprocess
from geolite2 import geolite2
| 37.276471
| 97
| 0.447373
|
54f4e0fec59282b2d1c7f1cba1c1b99fa606ce17
| 70
|
py
|
Python
|
nemo/collections/nlp/losses/__init__.py
|
KalifiaBillal/NeMo
|
4fc670ad0c886be2623247921d4311ba30f486f8
|
[
"Apache-2.0"
] | 1
|
2021-01-26T21:54:36.000Z
|
2021-01-26T21:54:36.000Z
|
nemo/collections/nlp/losses/__init__.py
|
aiskumo/NeMo
|
b51a39f9834ad50db77c4246aeb6e2349695add5
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/losses/__init__.py
|
aiskumo/NeMo
|
b51a39f9834ad50db77c4246aeb6e2349695add5
|
[
"Apache-2.0"
] | 2
|
2021-02-04T14:45:50.000Z
|
2021-02-04T14:56:05.000Z
|
from nemo.collections.nlp.losses.sgd_loss import SGDDialogueStateLoss
| 35
| 69
| 0.885714
|
54f75a0784cdbed72bcde377b44202a6cfd58c51
| 382
|
py
|
Python
|
netrunner/test_settings.py
|
MrAGi/netrunner-cambridge
|
bae0603486c2aa5a980e8e19207452fb01ec2193
|
[
"MIT"
] | null | null | null |
netrunner/test_settings.py
|
MrAGi/netrunner-cambridge
|
bae0603486c2aa5a980e8e19207452fb01ec2193
|
[
"MIT"
] | null | null | null |
netrunner/test_settings.py
|
MrAGi/netrunner-cambridge
|
bae0603486c2aa5a980e8e19207452fb01ec2193
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['LOCAL_DB_NAME'],
'USER': os.environ['LOCAL_DB_USER'],
'PASSWORD': os.environ['LOCAL_DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| 21.222222
| 59
| 0.568063
|
54f79d31af30b3622247fe2c6abad64bc05814e8
| 231
|
py
|
Python
|
Python_Exercicios/calcula_terreno.py
|
thalles-dreissig20/Quebra_Cabeca
|
eeb9458dbabac72d9867e5ec5d7f1aa9b5993d79
|
[
"MIT"
] | null | null | null |
Python_Exercicios/calcula_terreno.py
|
thalles-dreissig20/Quebra_Cabeca
|
eeb9458dbabac72d9867e5ec5d7f1aa9b5993d79
|
[
"MIT"
] | 1
|
2021-11-29T18:37:14.000Z
|
2021-11-29T18:37:14.000Z
|
Python_Exercicios/calcula_terreno.py
|
thalles-dreissig20/Quebra_Cabeca
|
eeb9458dbabac72d9867e5ec5d7f1aa9b5993d79
|
[
"MIT"
] | null | null | null |
print('Controle de terrenos')
print('-' * 20)
l = float(input('qual a largura do terreno: '))
c = float(input('qual o comprimento do terreno: '))
area(l , c)
| 25.666667
| 51
| 0.627706
|
54f7f3b4bb05515aa800aef3ce44e23eb1933bf4
| 443
|
py
|
Python
|
Desafios/desafio_041.py
|
romulogoleniesky/Python_C_E_V
|
2dcf5fb3505a20443788a284c52114c6434118ce
|
[
"MIT"
] | null | null | null |
Desafios/desafio_041.py
|
romulogoleniesky/Python_C_E_V
|
2dcf5fb3505a20443788a284c52114c6434118ce
|
[
"MIT"
] | null | null | null |
Desafios/desafio_041.py
|
romulogoleniesky/Python_C_E_V
|
2dcf5fb3505a20443788a284c52114c6434118ce
|
[
"MIT"
] | null | null | null |
import datetime
ano = (datetime.datetime.now()).year
nasc = int(input("Digite o seu ano de nascimento: "))
categoria = 0
if (ano - nasc) <= 9:
categoria = str("MIRIM")
elif 9 < (ano - nasc) <= 14:
categoria = str("INFANTIL")
elif 14 < (ano - nasc) <= 19 :
categoria = str("JUNIOR")
elif 19 < (ano - nasc) <= 25:
categoria = str("SNIOR")
else:
categoria = str("MASTER")
print(f"A categoria do atleta {str(categoria)}.")
| 26.058824
| 53
| 0.616253
|
54f89b5cd05a9ee6ba8e82764ddc7f2a5b7aea7d
| 1,689
|
py
|
Python
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
|
7e0ac621c36f839e1df6876ec517d0ad00672790
|
[
"BSD-3-Clause"
] | 37
|
2020-06-15T02:04:37.000Z
|
2022-02-09T06:26:42.000Z
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
|
7e0ac621c36f839e1df6876ec517d0ad00672790
|
[
"BSD-3-Clause"
] | 5
|
2020-08-06T13:16:34.000Z
|
2022-02-04T07:29:29.000Z
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
|
7e0ac621c36f839e1df6876ec517d0ad00672790
|
[
"BSD-3-Clause"
] | 11
|
2020-09-01T23:08:51.000Z
|
2022-02-09T06:26:44.000Z
|
import torch
def ndcg_binary_at_k_batch_torch(X_pred, heldout_batch, k=100, device='cpu'):
"""
Normalized Discounted Cumulative Gain@k for for predictions [B, I] and ground-truth [B, I], with binary relevance.
ASSUMPTIONS: all the 0's in heldout_batch indicate 0 relevance.
"""
batch_users = X_pred.shape[0] # batch_size
_, idx_topk = torch.topk(X_pred, k, dim=1, sorted=True)
tp = 1. / torch.log2(torch.arange(2, k + 2, device=device).float())
heldout_batch_nonzero = (heldout_batch > 0).float()
DCG = (heldout_batch_nonzero[torch.arange(batch_users, device=device).unsqueeze(1), idx_topk] * tp).sum(dim=1)
heldout_nonzero = (heldout_batch > 0).sum(dim=1) # num. of non-zero items per batch. [B]
IDCG = torch.tensor([(tp[:min(n, k)]).sum() for n in heldout_nonzero]).to(device)
return DCG / IDCG
def recall_at_k_batch_torch(X_pred, heldout_batch, k=100):
"""
Recall@k for predictions [B, I] and ground-truth [B, I].
"""
batch_users = X_pred.shape[0]
_, topk_indices = torch.topk(X_pred, k, dim=1, sorted=False) # [B, K]
X_pred_binary = torch.zeros_like(X_pred)
if torch.cuda.is_available():
X_pred_binary = X_pred_binary.cuda()
X_pred_binary[torch.arange(batch_users).unsqueeze(1), topk_indices] = 1
X_true_binary = (heldout_batch > 0).float() # .toarray() # [B, I]
k_tensor = torch.tensor([k], dtype=torch.float32)
if torch.cuda.is_available():
X_true_binary = X_true_binary.cuda()
k_tensor = k_tensor.cuda()
tmp = (X_true_binary * X_pred_binary).sum(dim=1).float()
recall = tmp / torch.min(k_tensor, X_true_binary.sum(dim=1).float())
return recall
| 44.447368
| 118
| 0.674956
|
54f8ec657caa5b90b66baca8ce435c82f8e1413e
| 5,029
|
py
|
Python
|
simba/run_dash_tkinter.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 172
|
2019-12-18T22:19:42.000Z
|
2022-03-29T01:58:25.000Z
|
simba/run_dash_tkinter.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 165
|
2020-01-10T19:05:16.000Z
|
2022-03-31T16:08:36.000Z
|
simba/run_dash_tkinter.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 80
|
2019-12-20T00:01:43.000Z
|
2022-03-29T16:20:10.000Z
|
# All credit to https://stackoverflow.com/questions/46571448/tkinter-and-a-html-file - thanks DELICA - https://stackoverflow.com/users/7027346/delica
from cefpython3 import cefpython as cef
import ctypes
try:
import tkinter as tk
from tkinter import messagebox
except ImportError:
import Tkinter as tk
import sys
import platform
import logging as _logging
# Fix for PyCharm hints warnings
WindowUtils = cef.WindowUtils()
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
# Globals
logger = _logging.getLogger("tkinter_.py")
url = "localhost:8050/"
# if __name__ == '__main__':
logger.setLevel(_logging.INFO)
stream_handler = _logging.StreamHandler()
formatter = _logging.Formatter("[%(filename)s] %(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("CEF Python {ver}".format(ver=cef.__version__))
logger.info("Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
logger.info("Tk {ver}".format(ver=tk.Tcl().eval('info patchlevel')))
assert cef.__version__ >= "55.3", "CEF Python v55.3+ required to run this"
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
root = tk.Tk()
app = MainFrame(root)
root.protocol("WM_DELETE_WINDOW", on_closing)
# Tk must be initialized before CEF otherwise fatal error (Issue #306)
cef.Initialize()
root.mainloop()
# app.mainloop()
cef.Shutdown()
| 30.478788
| 149
| 0.644064
|
54fb3d7c53a19a5375f0b43976b42347774b6cca
| 1,010
|
py
|
Python
|
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
import re
from glob import glob
import os.path as osp
infiles = glob(osp.join(osp.dirname(__file__),"*.xml.in"))
for fname in infiles:
with open(fname,"r") as fh:
in_lines = fh.readlines()
out_lines = do_substitution(in_lines)
outfname = fname[:-3]
with open(outfname,"w") as fh:
fh.writelines(out_lines)
| 25.897436
| 68
| 0.581188
|
54fbc8636ea0532bcc0fa404a8de1597f6db3f5f
| 354
|
py
|
Python
|
myproject/apps/events/migrations/0002_alter_eventhero_options.py
|
cahyareza/django_admin_cookbook
|
6c82dbd3aebe455b68feb020d5cad7978b8191b7
|
[
"MIT"
] | null | null | null |
myproject/apps/events/migrations/0002_alter_eventhero_options.py
|
cahyareza/django_admin_cookbook
|
6c82dbd3aebe455b68feb020d5cad7978b8191b7
|
[
"MIT"
] | null | null | null |
myproject/apps/events/migrations/0002_alter_eventhero_options.py
|
cahyareza/django_admin_cookbook
|
6c82dbd3aebe455b68feb020d5cad7978b8191b7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-03-28 11:57
from django.db import migrations
| 19.666667
| 60
| 0.601695
|
54fcf0226ece66aeec4bb6bba4646c87e745e2e5
| 799
|
py
|
Python
|
hilton_sign_in.py
|
bmintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-12T10:33:13.000Z
|
2019-02-24T05:01:40.000Z
|
hilton_sign_in.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | null | null | null |
hilton_sign_in.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
import sys
import urllib.parse
import selenium.webdriver
driver = selenium.webdriver.Firefox()
# for some reason, detectportal.firefox.com and connectivitycheck.gstatic.com are not blocked
# therefore, they cannot be used to detect connectivity
# we instead visit another site that is known not to ever have TLS
driver.get('http://neverssl.com')
if 'neverssl.com' in urllib.parse.urlparse(driver.current_url).netloc:
exit()
driver.find_element_by_css_selector('label[for="promo_button"]').click()
driver.find_element_by_css_selector('input[alt="Next"]').click()
driver.find_element_by_css_selector('#PromotionCode').send_keys('lobby18')
driver.find_element_by_css_selector('input[alt="Connect"]').click()
exit()
| 30.730769
| 93
| 0.779725
|
54fd38f1410793bf1398c7ca975380689133f595
| 1,539
|
py
|
Python
|
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
|
4eadf29a4e9c05d0b14d3b9c973eb8db3ea7edba
|
[
"CC0-1.0"
] | null | null | null |
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
|
4eadf29a4e9c05d0b14d3b9c973eb8db3ea7edba
|
[
"CC0-1.0"
] | null | null | null |
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
|
4eadf29a4e9c05d0b14d3b9c973eb8db3ea7edba
|
[
"CC0-1.0"
] | 1
|
2019-09-01T04:15:21.000Z
|
2019-09-01T04:15:21.000Z
|
#!/usr/bin/env python
import os
from collections import OrderedDict
import cPickle as pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.cm import get_cmap
from matplotlib import style
from scipy import stats
from scipy import integrate
if __name__ == "__main__":
FILEPATH = "~/Savanna/Data/HowardSprings_IAV/pickled/agg/mean_monthly_leaf.pkl"
PKLPATH = os.path.expanduser(FILEPATH)
main()
| 23.676923
| 83
| 0.684211
|
54fe1eee5bca5dc248b6bf225d479bd8fc671965
| 1,041
|
py
|
Python
|
app/index.py
|
vprnet/school-closings
|
04c63170ea36cabe0a3486f0e58830952e1fd0a8
|
[
"Apache-2.0"
] | null | null | null |
app/index.py
|
vprnet/school-closings
|
04c63170ea36cabe0a3486f0e58830952e1fd0a8
|
[
"Apache-2.0"
] | null | null | null |
app/index.py
|
vprnet/school-closings
|
04c63170ea36cabe0a3486f0e58830952e1fd0a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python2.7
from flask import Flask
import sys
from flask_frozen import Freezer
from upload_s3 import set_metadata
from config import AWS_DIRECTORY
app = Flask(__name__)
app.config.from_object('config')
from views import *
# Serving from s3 leads to some complications in how static files are served
if len(sys.argv) > 1:
if sys.argv[1] == 'build':
PROJECT_ROOT = '/' + AWS_DIRECTORY
elif sys.argv[1] == 'test':
PROJECT_ROOT = '/www.vpr.net/' + AWS_DIRECTORY
else:
PROJECT_ROOT = '/'
app.wsgi_app = WebFactionMiddleware(app.wsgi_app)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
app.debug = True
freezer = Freezer(app)
freezer.freeze()
set_metadata()
else:
app.run(debug=True)
| 24.209302
| 76
| 0.668588
|
07010f1430c53be8c3d42e4a620d3fc295e28964
| 1,799
|
py
|
Python
|
proxyclient/linux.py
|
modwizcode/m1n1
|
96d133e854dfe878ea39f9c994545a2026a446a8
|
[
"MIT"
] | 1
|
2021-06-05T08:30:21.000Z
|
2021-06-05T08:30:21.000Z
|
proxyclient/linux.py
|
modwizcode/m1n1
|
96d133e854dfe878ea39f9c994545a2026a446a8
|
[
"MIT"
] | null | null | null |
proxyclient/linux.py
|
modwizcode/m1n1
|
96d133e854dfe878ea39f9c994545a2026a446a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from setup import *
payload = open(sys.argv[1], "rb").read()
dtb = open(sys.argv[2], "rb").read()
if len(sys.argv) > 3:
initramfs = open(sys.argv[3], "rb").read()
initramfs_size = len(initramfs)
else:
initramfs = None
initramfs_size = 0
compressed_size = len(payload)
compressed_addr = u.malloc(compressed_size)
dtb_addr = u.malloc(len(dtb))
print("Loading %d bytes to 0x%x..0x%x..." % (compressed_size, compressed_addr, compressed_addr + compressed_size))
iface.writemem(compressed_addr, payload, True)
print("Loading DTB to 0x%x..." % dtb_addr)
iface.writemem(dtb_addr, dtb)
kernel_size = 32 * 1024 * 1024
kernel_base = u.memalign(2 * 1024 * 1024, kernel_size)
print("Kernel_base: 0x%x" % kernel_base)
assert not (kernel_base & 0xffff)
if initramfs is not None:
initramfs_base = u.memalign(65536, initramfs_size)
print("Loading %d initramfs bytes to 0x%x..." % (initramfs_size, initramfs_base))
iface.writemem(initramfs_base, initramfs, True)
p.kboot_set_initrd(initramfs_base, initramfs_size)
if p.kboot_prepare_dt(dtb_addr):
print("DT prepare failed")
sys.exit(1)
#kernel_size = p.xzdec(compressed_addr, compressed_size)
#if kernel_size < 0:
#raise Exception("Decompression header check error!",)
#print("Uncompressed kernel size: %d bytes" % kernel_size)
print("Uncompressing...")
iface.dev.timeout = 40
kernel_size = p.gzdec(compressed_addr, compressed_size, kernel_base, kernel_size)
print(kernel_size)
if kernel_size < 0:
raise Exception("Decompression error!")
print("Decompress OK...")
p.dc_cvau(kernel_base, kernel_size)
p.ic_ivau(kernel_base, kernel_size)
print("Ready to boot")
daif = u.mrs(DAIF)
daif |= 0x3c0
u.msr(DAIF, daif)
print("DAIF: %x" % daif)
p.kboot_boot(kernel_base)
iface.ttymode()
| 24.310811
| 114
| 0.721512
|
07027cec6982fe1f9197878d8796ee05b6d45b5e
| 1,313
|
py
|
Python
|
src/server.py
|
shizhongpwn/ancypwn
|
716146e4986c514754492c8503ab196eecb9466d
|
[
"MIT"
] | 1
|
2021-06-29T03:41:27.000Z
|
2021-06-29T03:41:27.000Z
|
src/server.py
|
shizhongpwn/ancypwn
|
716146e4986c514754492c8503ab196eecb9466d
|
[
"MIT"
] | null | null | null |
src/server.py
|
shizhongpwn/ancypwn
|
716146e4986c514754492c8503ab196eecb9466d
|
[
"MIT"
] | 1
|
2021-06-18T05:36:28.000Z
|
2021-06-18T05:36:28.000Z
|
import json
import os
import multiprocessing
import struct
import importlib
from socketserver import TCPServer, StreamRequestHandler
| 31.261905
| 79
| 0.657273
|
0704c30e12f5e2ffe2ea17cf59fe41a9fd37e4af
| 565
|
py
|
Python
|
speech_to_text/views.py
|
zace3d/video_analysis
|
9001486ae64160ca497f6b9a99df5d9a5c5422cc
|
[
"Apache-2.0"
] | null | null | null |
speech_to_text/views.py
|
zace3d/video_analysis
|
9001486ae64160ca497f6b9a99df5d9a5c5422cc
|
[
"Apache-2.0"
] | 9
|
2019-12-04T22:38:16.000Z
|
2021-06-10T17:51:32.000Z
|
speech_to_text/views.py
|
zace3d/video_analysis
|
9001486ae64160ca497f6b9a99df5d9a5c5422cc
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . import helpers
# Create your views here.
| 25.681818
| 70
| 0.766372
|
070792428b154808490c0fc141036d69c221ccfb
| 2,981
|
py
|
Python
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 4,258
|
2015-01-04T22:06:10.000Z
|
2022-03-31T23:40:27.000Z
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 1,013
|
2015-01-12T02:31:03.000Z
|
2021-09-16T19:09:03.000Z
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 965
|
2015-01-11T21:06:07.000Z
|
2022-03-17T16:53:57.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.vpn
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Alex Cline <alex.cline@gmail.com> @alex.cline
"""
from cloudaux.aws.ec2 import describe_vpn_connections
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from security_monkey.watcher import ChangeItem
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
| 37.2625
| 115
| 0.614223
|
0708030cc6b0ac486ef0bd568029e80e9873483c
| 2,332
|
py
|
Python
|
particle.py
|
coush001/Imperial-MSc-Group-Project-2
|
9309217895802d11c6fe9d2dca9b21f98fbc1c61
|
[
"MIT"
] | null | null | null |
particle.py
|
coush001/Imperial-MSc-Group-Project-2
|
9309217895802d11c6fe9d2dca9b21f98fbc1c61
|
[
"MIT"
] | null | null | null |
particle.py
|
coush001/Imperial-MSc-Group-Project-2
|
9309217895802d11c6fe9d2dca9b21f98fbc1c61
|
[
"MIT"
] | null | null | null |
from itertools import count
import numpy as np
| 30.285714
| 107
| 0.551887
|
0708b3e7b515fbe0913b6b5bb88c0fbd4c828abe
| 501
|
py
|
Python
|
app/main/form.py
|
hussein18149/PITCHBOARD
|
9aa515f8dd18464830bdf80488a317e8e791bd1b
|
[
"MIT"
] | null | null | null |
app/main/form.py
|
hussein18149/PITCHBOARD
|
9aa515f8dd18464830bdf80488a317e8e791bd1b
|
[
"MIT"
] | null | null | null |
app/main/form.py
|
hussein18149/PITCHBOARD
|
9aa515f8dd18464830bdf80488a317e8e791bd1b
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
| 31.3125
| 73
| 0.754491
|
07092a144b2a5c13ba5ef9b78acec4dd39f5a15b
| 4,840
|
py
|
Python
|
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
|
a1c600074f532c1af6bd59bc2cc662a1aecd39c4
|
[
"MIT"
] | 1
|
2017-10-31T21:02:59.000Z
|
2017-10-31T21:02:59.000Z
|
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
|
a1c600074f532c1af6bd59bc2cc662a1aecd39c4
|
[
"MIT"
] | null | null | null |
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
|
a1c600074f532c1af6bd59bc2cc662a1aecd39c4
|
[
"MIT"
] | null | null | null |
import re
import astrodata
from astrodata import (astro_data_tag, TagSet, astro_data_descriptor,
returns_list)
from astrodata.fits import FitsLoader, FitsProvider
from ..soar import AstroDataSOAR
| 33.846154
| 79
| 0.590083
|
0709b6cd82b1f84edf49917175e51ec7e1ae9747
| 264
|
py
|
Python
|
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
|
75056454bfb49197eb44f6b4d6a1b0a0b4b408ec
|
[
"MIT"
] | null | null | null |
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
|
75056454bfb49197eb44f6b4d6a1b0a0b4b408ec
|
[
"MIT"
] | null | null | null |
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
|
75056454bfb49197eb44f6b4d6a1b0a0b4b408ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
| 15.529412
| 30
| 0.556818
|
070a2f74e288d9e0f7d67adf9e2e415a8758caa2
| 1,957
|
py
|
Python
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
|
402679490bf0972d09aaaadee3b5b9850c2a36e4
|
[
"Apache-2.0"
] | 17
|
2020-07-29T11:08:19.000Z
|
2021-01-07T11:23:33.000Z
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
|
402679490bf0972d09aaaadee3b5b9850c2a36e4
|
[
"Apache-2.0"
] | 5
|
2020-08-04T02:51:39.000Z
|
2020-08-21T03:44:08.000Z
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
|
402679490bf0972d09aaaadee3b5b9850c2a36e4
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import numpy as np
import cv2
import random
import torch
from configs import Config
from kernelGAN import KernelGAN
from data import DataGenerator
from learner import Learner
import tqdm
DATA_LOC = "/mnt/data/NTIRE2020/realSR/track2" # "/mnt/data/NTIRE2020/realSR/track1"
DATA_X = "DPEDiphone-tr-x" # "Corrupted-tr-x"
DATA_Y = "DPEDiphone-tr-y" # "Corrupted-tr-y"
DATA_VAL = "DPEDiphone-va" # "Corrupted-va-x"
if __name__ == "__main__":
seed_num = 0
torch.manual_seed(seed_num)
torch.cuda.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed_num)
random.seed(seed_num)
# exit(0)
data = {"X":[os.path.join(DATA_LOC, DATA_X, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_X)) if f[-4:] == ".png"],
"Y":[os.path.join(DATA_LOC, DATA_Y, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_Y)) if f[-4:] == ".png"],
"val":[os.path.join(DATA_LOC, DATA_VAL, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_VAL)) if f[-4:] == ".png"]}
Kernels = []
Noises = []
for f in data["X"]:
estimate_kernel(f)
print("fin.")
| 30.107692
| 130
| 0.654573
|
070a513dc67a15b46d7b419d4ba1b638e56fb11a
| 731
|
py
|
Python
|
test/rdfa/test_non_xhtml.py
|
RDFLib/PyRDFa
|
efc24d4940910ca1e65900c25b62047301bbdcc7
|
[
"BSD-3-Clause"
] | 8
|
2015-04-01T19:55:22.000Z
|
2020-04-25T08:50:05.000Z
|
test/rdfa/test_non_xhtml.py
|
DalavanCloud/PyRDFa
|
fd5c8826fb9e5f6f5a578564b1149fdae6c40aad
|
[
"BSD-3-Clause"
] | null | null | null |
test/rdfa/test_non_xhtml.py
|
DalavanCloud/PyRDFa
|
fd5c8826fb9e5f6f5a578564b1149fdae6c40aad
|
[
"BSD-3-Clause"
] | 1
|
2019-02-12T03:15:00.000Z
|
2019-02-12T03:15:00.000Z
|
from unittest import TestCase
from pyRdfa import pyRdfa
| 33.227273
| 100
| 0.682627
|
070a6926f75c6689b9bf183a8c81961b1ffe5bbd
| 1,150
|
py
|
Python
|
python/pyoai/setup.py
|
jr3cermak/robs-kitchensink
|
74b7eb1b1acd8b700d61c5a9ba0c69be3cc6763a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
python/pyoai/setup.py
|
jr3cermak/robs-kitchensink
|
74b7eb1b1acd8b700d61c5a9ba0c69be3cc6763a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
python/pyoai/setup.py
|
jr3cermak/robs-kitchensink
|
74b7eb1b1acd8b700d61c5a9ba0c69be3cc6763a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name='pyoai',
version='2.4.6.b',
author='Infrae',
author_email='rob.cermak@gmail.com',
url='https://github.com/jr3cermak/robs-kitchensink/tree/master/python/pyoai',
classifiers=["Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment"],
description="""\
The oaipmh module is a Python implementation of an "Open Archives
Initiative Protocol for Metadata Harvesting" (version 2) client and server.
The protocol is described here:
http://www.openarchives.org/OAI/openarchivesprotocol.html
""",
long_description=(open(join(dirname(__file__), 'README.rst')).read()+
'\n\n'+
open(join(dirname(__file__), 'HISTORY.txt')).read()),
packages=find_packages('src'),
package_dir = {'': 'src'},
zip_safe=False,
license='BSD',
keywords='OAI-PMH xml archive',
install_requires=['lxml'],
)
| 35.9375
| 81
| 0.650435
|
070b402dc83b92f4ca29c79684b3e9fb26a6238f
| 4,201
|
py
|
Python
|
utils/functions.py
|
Roozbeh-Bazargani/CPSC-533R-project
|
453f093b23d2363f09c61079d1d4fbd878abf3be
|
[
"MIT"
] | null | null | null |
utils/functions.py
|
Roozbeh-Bazargani/CPSC-533R-project
|
453f093b23d2363f09c61079d1d4fbd878abf3be
|
[
"MIT"
] | null | null | null |
utils/functions.py
|
Roozbeh-Bazargani/CPSC-533R-project
|
453f093b23d2363f09c61079d1d4fbd878abf3be
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import math
#0 left hip
#1 left knee
#2 left foot
#3 right hip
#4 right knee
#5 right foot
#6 middle hip
#7 neck
#8 nose
#9 head
#10 left shoulder
#11 left elbow
#12 left wrist
#13 right shoulder
#14 right elbow
#15 right wrist
'''
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
return torch.norm(J - K - J_R + K_R, dim=1)**2
'''
'''
def random_rotation(J3d):
# J = torch.transpose(J3d, 1, 2)
J = J3d
root = torch.zeros(J.shape[0:2])
for i in range(J.shape[0]):
theta = torch.rand(1).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root[i] = J[i,:,8] # joint 8 = nose is root
temp = rotation(J[i,:,:], theta, root[i].unsqueeze(1), False)
# print(temp.shape)
J[i,:,:] = temp
return J, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[2] # absolute depth of the root joint
v_t = torch.tensor([[0], [0], [D]]).cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]).cuda() # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J.cuda() - root.cuda()) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
# J = torch.transpose(J3d_R, 1, 2)
J = J3d_R
for i in range(J.shape[0]):
J[i,:,:] = rotation(J[i,:,:].cuda(), theta.cuda(), root[i].unsqueeze(1).cuda(), True)
return J
'''
| 42.434343
| 181
| 0.633183
|
070c8541550d5f85dceb7ec0adf8c900bec0c786
| 303
|
py
|
Python
|
Desafio Python/Aula 22 des109.py
|
ayresmajor/Curso-python
|
006229cec38ea365bf43b19e3ce93fbd32e1dca6
|
[
"MIT"
] | null | null | null |
Desafio Python/Aula 22 des109.py
|
ayresmajor/Curso-python
|
006229cec38ea365bf43b19e3ce93fbd32e1dca6
|
[
"MIT"
] | null | null | null |
Desafio Python/Aula 22 des109.py
|
ayresmajor/Curso-python
|
006229cec38ea365bf43b19e3ce93fbd32e1dca6
|
[
"MIT"
] | null | null | null |
from des109 import moeda
preco = float(input('Digite o preo pretendido: '))
print(f'''A metade do preo {(moeda.metade(preco))}
O dobro do preo {(moeda.dobra(preco))}
Aumentando o preo 10% temos {(moeda.aumentar(preco, 10))}
Diminuindo o preo 13% temos {(moeda.aumentar(preco, 13))}''')
| 37.875
| 64
| 0.693069
|
070d242ccbb22625007056e552b13c344fbecb38
| 474
|
py
|
Python
|
Chapter13_code/ch13_r05_using_the_rpc_api/xmlrpc.py
|
PacktPublishing/Odoo-Development-Cookbook
|
5553110c0bc352c4541f11904e236cad3c443b8b
|
[
"MIT"
] | 55
|
2016-05-23T16:05:50.000Z
|
2021-07-19T00:16:46.000Z
|
Chapter13_code/ch13_r05_using_the_rpc_api/xmlrpc.py
|
kogkog098/Odoo-Development-Cookbook
|
166c9b98efbc9108b30d719213689afb1f1c294d
|
[
"MIT"
] | 1
|
2016-12-09T02:14:21.000Z
|
2018-07-02T09:02:20.000Z
|
Chapter13_code/ch13_r05_using_the_rpc_api/xmlrpc.py
|
kogkog098/Odoo-Development-Cookbook
|
166c9b98efbc9108b30d719213689afb1f1c294d
|
[
"MIT"
] | 52
|
2016-06-01T20:03:59.000Z
|
2020-10-31T23:58:25.000Z
|
#!/usr/bin/env python2
import xmlrpclib
db = 'odoo9'
user = 'admin'
password = 'admin'
uid = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/2/common')\
.authenticate(db, user, password, {})
odoo = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/2/object')
installed_modules = odoo.execute_kw(
db, uid, password, 'ir.module.module', 'search_read',
[[('state', '=', 'installed')], ['name']], {})
for module in installed_modules:
print module['name']
| 31.6
| 69
| 0.681435
|
070dfc39dd180a0fc71b0110b529e2e8beee6cea
| 10,971
|
py
|
Python
|
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
|
f6519166dd30bd8140f05aa3e43225ab27c2ea6d
|
[
"Apache-2.0"
] | 1
|
2019-09-03T13:38:08.000Z
|
2019-09-03T13:38:08.000Z
|
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
|
f6519166dd30bd8140f05aa3e43225ab27c2ea6d
|
[
"Apache-2.0"
] | null | null | null |
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
|
f6519166dd30bd8140f05aa3e43225ab27c2ea6d
|
[
"Apache-2.0"
] | null | null | null |
from abc import *
import numpy as np
###########################################################
###########################################################
###########################################################
| 44.417004
| 120
| 0.668854
|
070fb86171845062c7fc24a28acd90660006212e
| 521
|
py
|
Python
|
ufdl-core-app/src/ufdl/core_app/models/mixins/_UserRestrictedQuerySet.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
ufdl-core-app/src/ufdl/core_app/models/mixins/_UserRestrictedQuerySet.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | 85
|
2020-07-24T00:04:28.000Z
|
2022-02-10T10:35:15.000Z
|
ufdl-core-app/src/ufdl/core_app/models/mixins/_UserRestrictedQuerySet.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
| 28.944444
| 79
| 0.658349
|
071028fc162506887f63334754f84e376a76520e
| 31,879
|
py
|
Python
|
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = ['PartnerRegistrationArgs', 'PartnerRegistration']
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = authorized_azure_subscription_ids
__props__.__dict__["customer_service_uri"] = customer_service_uri
__props__.__dict__["location"] = location
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["long_description"] = long_description
__props__.__dict__["partner_customer_service_extension"] = partner_customer_service_extension
__props__.__dict__["partner_customer_service_number"] = partner_customer_service_number
__props__.__dict__["partner_name"] = partner_name
__props__.__dict__["partner_registration_name"] = partner_registration_name
__props__.__dict__["partner_resource_type_description"] = partner_resource_type_description
__props__.__dict__["partner_resource_type_display_name"] = partner_resource_type_display_name
__props__.__dict__["partner_resource_type_name"] = partner_resource_type_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["setup_uri"] = setup_uri
__props__.__dict__["tags"] = tags
__props__.__dict__["visibility_state"] = visibility_state
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventgrid:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20201015preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20201015preview:PartnerRegistration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PartnerRegistration, __self__).__init__(
'azure-native:eventgrid:PartnerRegistration',
resource_name,
__props__,
opts)
| 51.584142
| 454
| 0.685624
|
071099c9cb76fe44fe601d2109b5cad6021d0a3d
| 2,420
|
py
|
Python
|
_ar/masking_provement.py
|
TomKingsfordUoA/ResidualMaskingNetwork
|
6ce5ddf70f8ac8f1e6da2746b0bbeb9e457ceb7d
|
[
"MIT"
] | 242
|
2020-01-09T11:06:21.000Z
|
2022-03-26T14:51:48.000Z
|
_ar/masking_provement.py
|
huyhnueit68/ResidualMaskingNetwork
|
b77abb6e548b9a09b5c96b1592d71332b45d050e
|
[
"MIT"
] | 33
|
2020-01-09T08:42:10.000Z
|
2022-03-23T07:52:56.000Z
|
_ar/masking_provement.py
|
huyhnueit68/ResidualMaskingNetwork
|
b77abb6e548b9a09b5c96b1592d71332b45d050e
|
[
"MIT"
] | 61
|
2020-01-19T02:20:37.000Z
|
2022-03-25T13:08:48.000Z
|
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
| 26.021505
| 85
| 0.647934
|
07112b5b2ca5ebda12c4c78461b67e41243aa4a8
| 1,727
|
py
|
Python
|
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | 1
|
2022-03-03T23:19:57.000Z
|
2022-03-03T23:19:57.000Z
|
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | null | null | null |
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | null | null | null |
import time
import colorama
while True:
consulta = gerenciador_de_pagamento()
consulta = str(input('Quer consultar novamente? '))
if consulta in ['sim', 'Sim', 'SIM']:
pass
elif consulta in ['no', 'nao','No', 'Nao', 'NAO','NO']:
break
else:
break
| 38.377778
| 105
| 0.59062
|
0711bae755946fd50e5034659184b298bbe243f6
| 1,786
|
py
|
Python
|
src/scs_core/osio/data/abstract_topic.py
|
seoss/scs_core
|
0d4323c5697a39eb44a887f179ba5dca3716c1d2
|
[
"MIT"
] | 3
|
2019-03-12T01:59:58.000Z
|
2020-09-12T07:27:42.000Z
|
src/scs_core/osio/data/abstract_topic.py
|
seoss/scs_core
|
0d4323c5697a39eb44a887f179ba5dca3716c1d2
|
[
"MIT"
] | 1
|
2018-04-20T07:58:38.000Z
|
2021-03-27T08:52:45.000Z
|
src/scs_core/osio/data/abstract_topic.py
|
seoss/scs_core
|
0d4323c5697a39eb44a887f179ba5dca3716c1d2
|
[
"MIT"
] | 4
|
2017-09-29T13:08:43.000Z
|
2019-10-09T09:13:58.000Z
|
"""
Created on 2 Apr 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from collections import OrderedDict
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
| 23.194805
| 118
| 0.403135
|
0711c47f68c0681b184df5cde182256dcc62322f
| 11,286
|
py
|
Python
|
sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNamespaceResult',
'AwaitableGetNamespaceResult',
'get_namespace',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""", DeprecationWarning)
def get_namespace(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Description of a Namespace resource.
Latest API Version: 2017-04-01.
:param str namespace_name: The namespace name.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_namespace is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""")
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs/latest:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
created_at=__ret__.created_at,
critical=__ret__.critical,
data_center=__ret__.data_center,
enabled=__ret__.enabled,
id=__ret__.id,
location=__ret__.location,
metric_id=__ret__.metric_id,
name=__ret__.name,
namespace_type=__ret__.namespace_type,
provisioning_state=__ret__.provisioning_state,
region=__ret__.region,
scale_unit=__ret__.scale_unit,
service_bus_endpoint=__ret__.service_bus_endpoint,
sku=__ret__.sku,
status=__ret__.status,
subscription_id=__ret__.subscription_id,
tags=__ret__.tags,
type=__ret__.type,
updated_at=__ret__.updated_at)
| 37.003279
| 329
| 0.641148
|
0713544bf0325f76443f346f91d5551b3d2799f3
| 393
|
py
|
Python
|
chue/utils.py
|
naren-m/chue
|
6f77ad990c911353524c5c99bcf6e30155edaf97
|
[
"MIT"
] | null | null | null |
chue/utils.py
|
naren-m/chue
|
6f77ad990c911353524c5c99bcf6e30155edaf97
|
[
"MIT"
] | null | null | null |
chue/utils.py
|
naren-m/chue
|
6f77ad990c911353524c5c99bcf6e30155edaf97
|
[
"MIT"
] | null | null | null |
import json
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
| 28.071429
| 64
| 0.793893
|
0713bf1d16fde855bda0ed021b030d08feadd022
| 3,486
|
py
|
Python
|
selfdrive/car/chrysler/radar_interface.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | 85
|
2019-06-14T17:51:31.000Z
|
2022-02-09T22:18:20.000Z
|
selfdrive/car/chrysler/radar_interface.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | 4
|
2020-04-12T21:34:03.000Z
|
2020-04-15T22:22:15.000Z
|
selfdrive/car/chrysler/radar_interface.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | 73
|
2018-12-03T19:34:42.000Z
|
2020-07-27T05:10:23.000Z
|
#!/usr/bin/env python3
import os
from opendbc.can.parser import CANParser
from cereal import car
from selfdrive.car.interfaces import RadarInterfaceBase
RADAR_MSGS_C = list(range(0x2c2, 0x2d4+2, 2)) # c_ messages 706,...,724
RADAR_MSGS_D = list(range(0x2a2, 0x2b4+2, 2)) # d_ messages
LAST_MSG = max(RADAR_MSGS_C + RADAR_MSGS_D)
NUMBER_MSGS = len(RADAR_MSGS_C) + len(RADAR_MSGS_D)
| 37.085106
| 98
| 0.645439
|
0714065ddc085782b982ec392f121b65f95bc048
| 911
|
py
|
Python
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
|
8775e299f710ae5b977d49dc0672b607f2a10378
|
[
"MIT"
] | 429
|
2015-01-06T18:44:20.000Z
|
2022-03-19T22:22:11.000Z
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
|
8775e299f710ae5b977d49dc0672b607f2a10378
|
[
"MIT"
] | 254
|
2015-01-01T18:11:57.000Z
|
2022-03-22T09:55:51.000Z
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
|
8775e299f710ae5b977d49dc0672b607f2a10378
|
[
"MIT"
] | 102
|
2015-01-17T11:41:16.000Z
|
2022-02-24T23:47:30.000Z
|
"""
wrapper for ccmake command line tool
"""
import subprocess
name = 'ccmake'
platforms = ['linux', 'osx']
optional = True
not_found = "required for 'fips config' functionality"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
"""test if ccmake is in the path
:returns: True if ccmake is in the path
"""
try:
out = subprocess.check_output(['ccmake', '--version'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(build_dir) :
"""run ccmake to configure cmake project
:param build_dir: directory where ccmake should run
:returns: True if ccmake returns successful
"""
res = subprocess.call('ccmake .', cwd=build_dir, shell=True)
return res == 0
| 26.794118
| 80
| 0.535675
|
071593280ef30a4532ccbb4b6f3c6b4f7d728fa5
| 4,251
|
py
|
Python
|
image_quality/handlers/data_generator.py
|
mbartoli/image-quality-assessment
|
b957c781ac8a11f8668f58345524f33503338b3b
|
[
"Apache-2.0"
] | 1
|
2021-03-27T15:09:30.000Z
|
2021-03-27T15:09:30.000Z
|
image_quality/handlers/data_generator.py
|
welcotravel/image-quality-assessment
|
b9e17de93578220e5ae142725d9153098759e7c8
|
[
"Apache-2.0"
] | null | null | null |
image_quality/handlers/data_generator.py
|
welcotravel/image-quality-assessment
|
b9e17de93578220e5ae142725d9153098759e7c8
|
[
"Apache-2.0"
] | 1
|
2020-10-05T03:20:53.000Z
|
2020-10-05T03:20:53.000Z
|
import os
import numpy as np
import tensorflow as tf
from image_quality.utils import utils
| 40.485714
| 106
| 0.713479
|
07167e515430a27837434e8e166dc173dffdcc37
| 1,914
|
py
|
Python
|
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
|
9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab
|
[
"MIT"
] | null | null | null |
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
|
9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab
|
[
"MIT"
] | null | null | null |
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
|
9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab
|
[
"MIT"
] | null | null | null |
from Test import Test, Test as test
'''
Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out.
Example:
Given an input string of:
apples, pears # and bananas
grapes
bananas !apples
The output expected would be:
apples, pears
grapes
bananas
The code would be called like so:
result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# result should == "apples, pears\ngrapes\nbananas"
'''
# Split by rows, then find earliest marker and extract string before it
# Top solution, split list by \n, edit in place
# Top solution expanded
Test.assert_equals(solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]), "apples, pears\ngrapes\nbananas")
Test.assert_equals(solution("a #b\nc\nd $e f g", ["#", "$"]), "a\nc\nd")
Test.assert_equals(solution('= - avocados oranges pears cherries\nlemons apples\n- watermelons strawberries', ['#', '?', '=', ',', '.', '-', '!']), '\nlemons apples\n')
| 31.9
| 171
| 0.640021
|
0718f25c782fcd74f5e9c8f0ae638c3321dd5b08
| 6,221
|
py
|
Python
|
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | 5
|
2020-09-09T09:44:31.000Z
|
2021-07-02T09:49:21.000Z
|
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | null | null | null |
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | 3
|
2020-07-10T17:51:47.000Z
|
2021-04-13T16:33:44.000Z
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp
import numpy as np
from qat.comm.quops.ttypes import QuantumChannel, RepresentationType
from qat.comm.datamodel.ttypes import Matrix, ComplexNumber
def array_to_matrix(array):
"""
Transform a two dimmentional numpy array to a myqlm Matrix.
Args:
array: (ndarray) a two dimmentional numpy array
Returns:
(Matrix): a myqlm Matrix
"""
assert len(array.shape) == 2, "The array must be two dimmentional"
data = []
for arr in array:
for elem in arr:
data.append(ComplexNumber(np.real(elem), np.imag(elem)))
matri = Matrix(array.shape[0], array.shape[1], data)
return matri
def qiskit_to_qchannel(representation):
"""
Create a myqlm representation of quantum channel from a qiskit representation
of a quantum channel.
Args:
representation: (Kraus|Choi|Chi|SuperOp|PTM) qiskit representation of a quantum channel.
Returns:
(QuantumChannel): myqlm representation of a quantum channel.
"""
qchannel = None
qiskit_data = representation.data
# Find what representation it is.
# Then create the corresponding matrix (kraus_ops|basis|matrix)from the data
# of the representation.
# Finally, create the QuantumChannel with the RepresentationType, the arity
# (got from the qiskit representation) and the matrix.
if isinstance(representation, Kraus):
kraus_ops = []
for arr in qiskit_data:
kraus_ops.append(array_to_matrix(arr))
qchannel = QuantumChannel(
representation=RepresentationType.KRAUS,
arity=representation.num_qubits,
kraus_ops=kraus_ops)
elif isinstance(representation, Chi):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.CHI,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, SuperOp):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.SUPEROP,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, PTM):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.PTM,
arity=representation.num_qubits,
matrix=matri)
elif isinstance(representation, Choi):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.CHOI,
arity=representation.num_qubits,
matrix=matri)
return qchannel
def qchannel_to_qiskit(representation):
"""
Create a qiskit representation of quantum channel from a myqlm representation
of a quantum channel.
Args:
representation: (QuantumChannel) myqlm representation of a quantum channel.
Returns:
(Kraus|Choi|Chi|SuperOp|PTM): qiskit representation of a quantum channel.
"""
rep = representation.representation
# Find what representation it is.
# Then create the corresponding matrix and shape it like qiskit is expecting it.
# Finally, create the qiskit representation from that matrix.
if rep in (RepresentationType.PTM, RepresentationType.CHOI):
matri = representation.matrix
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)
if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):
final_data = []
for matri in representation.basis:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
if rep == RepresentationType.CHI:
return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])
return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])
if rep == RepresentationType.KRAUS:
final_data = []
for matri in representation.kraus_ops:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
return Kraus(final_data)
return None
| 37.70303
| 96
| 0.649735
|
0719b950e4a48282eaf1194cb80f0583e44f000f
| 2,061
|
py
|
Python
|
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
|
d59a5436d162108226f31b33b194dfecada40d72
|
[
"BSD-3-Clause"
] | null | null | null |
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
|
d59a5436d162108226f31b33b194dfecada40d72
|
[
"BSD-3-Clause"
] | null | null | null |
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
|
d59a5436d162108226f31b33b194dfecada40d72
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
import numpy as np
from mne import Annotations, create_info
from mne.io import RawArray
def simulate_nirs_raw(sfreq=3., amplitude=1.,
sig_dur=300., stim_dur=5.,
isi_min=15., isi_max=45.):
"""
Create simulated data.
.. warning:: Work in progress: I am trying to think on the best API.
Parameters
----------
sfreq : Number
The sample rate.
amplitude : Number
The amplitude of the signal to simulate in uM.
sig_dur : Number
The length of the signal to generate in seconds.
stim_dur : Number
The length of the stimulus to generate in seconds.
isi_min : Number
The minimum duration of the inter stimulus interval in seconds.
isi_max : Number
The maximum duration of the inter stimulus interval in seconds.
Returns
-------
raw : instance of Raw
The generated raw instance.
"""
from nilearn.stats.first_level_model import make_first_level_design_matrix
from pandas import DataFrame
frame_times = np.arange(sig_dur * sfreq) / sfreq
onset = 0.
onsets = []
conditions = []
durations = []
while onset < sig_dur - 60:
onset += np.random.uniform(isi_min, isi_max) + stim_dur
onsets.append(onset)
conditions.append("A")
durations.append(stim_dur)
events = DataFrame({'trial_type': conditions,
'onset': onsets,
'duration': durations})
dm = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial',
drift_order=0)
annotations = Annotations(onsets, durations, conditions)
info = create_info(ch_names=['Simulated'], sfreq=sfreq, ch_types=['hbo'])
raw = RawArray(dm[["A"]].to_numpy().T * amplitude * 1.e-6,
info, verbose=False)
raw.set_annotations(annotations)
return raw
| 29.442857
| 78
| 0.606016
|
071a7e610b94fdc4f5c933fd228639d190c83b96
| 3,650
|
py
|
Python
|
build/lib/dataaccess/TransactionRepository.py
|
athanikos/cryptodataaccess
|
6189a44c65a9b03c02822a534e865740ab488809
|
[
"MIT"
] | null | null | null |
build/lib/dataaccess/TransactionRepository.py
|
athanikos/cryptodataaccess
|
6189a44c65a9b03c02822a534e865740ab488809
|
[
"MIT"
] | null | null | null |
build/lib/dataaccess/TransactionRepository.py
|
athanikos/cryptodataaccess
|
6189a44c65a9b03c02822a534e865740ab488809
|
[
"MIT"
] | null | null | null |
from cryptomodel.cryptostore import user_notification, user_channel, user_transaction, operation_type
from mongoengine import Q
from cryptodataaccess import helpers
from cryptodataaccess.helpers import if_none_raise, if_none_raise_with_id
| 42.44186
| 120
| 0.670137
|
071afc12457e1373ac1b61126e3c5e710f213fb9
| 1,536
|
py
|
Python
|
app/util/auth2.py
|
FSU-ACM/Contest-Server
|
00a71cdcee1a7e4d4e4d8e33b5d6decf27f02313
|
[
"MIT"
] | 8
|
2019-01-13T21:57:53.000Z
|
2021-11-29T12:32:48.000Z
|
app/util/auth2.py
|
FSU-ACM/Contest-Server
|
00a71cdcee1a7e4d4e4d8e33b5d6decf27f02313
|
[
"MIT"
] | 73
|
2018-02-13T00:58:39.000Z
|
2022-02-10T11:59:53.000Z
|
app/util/auth2.py
|
FSU-ACM/Contest-Server
|
00a71cdcee1a7e4d4e4d8e33b5d6decf27f02313
|
[
"MIT"
] | 4
|
2018-02-08T18:56:54.000Z
|
2019-02-13T19:01:53.000Z
|
""" util.auth2: Authentication tools
This module is based off of util.auth, except with the action
paradigm removed.
"""
from flask import session
from app.models import Account
from app.util import course as course_util
# Session keys
SESSION_EMAIL = 'email'
def create_account(email: str, password: str, first_name: str,
last_name: str, fsuid: str, course_list: list = []):
"""
Creates an account for a single user.
:email: Required, the email address of the user.
:password: Required, user's chosen password.
:first_name: Required, user's first name.
:last_name: Required, user's last name.
:fsuid: Optional, user's FSUID.
:course_list: Optional, courses being taken by user
:return: Account object.
"""
account = Account(
email=email,
first_name=first_name,
last_name=last_name,
fsuid=fsuid,
is_admin=False
)
# Set user's extra credit courses
course_util.set_courses(account, course_list)
account.set_password(password)
account.save()
return account
def get_account(email: str=None):
"""
Retrieves account via email (defaults to using session), otherwise
redirects to login page.
:email: Optional email string, if not provided will use session['email']
:return: Account if email is present in session, None otherwise.
"""
try:
email = email or session['email']
return Account.objects.get_or_404(email=email)
except:
return None
| 26.033898
| 76
| 0.670573
|
071b7fe4a170335142cb957704dfc31f09df575c
| 1,125
|
py
|
Python
|
FeView/pstaticwidget.py
|
motiurce/FeView
|
8897b37062be88dd5ead2c8524f6b3b73451e25d
|
[
"MIT"
] | 10
|
2021-04-09T02:32:23.000Z
|
2022-03-12T15:21:41.000Z
|
FeView/pstaticwidget.py
|
ElsevierSoftwareX/SOFTX-D-21-00063
|
50eca2a003e6281dea3f1cf43fee221b61f53978
|
[
"MIT"
] | 2
|
2021-08-07T09:02:21.000Z
|
2022-02-25T09:30:22.000Z
|
FeView/pstaticwidget.py
|
motiurce/FeView
|
8897b37062be88dd5ead2c8524f6b3b73451e25d
|
[
"MIT"
] | 7
|
2021-04-09T02:32:25.000Z
|
2022-03-12T15:21:45.000Z
|
from PyQt5.QtWidgets import *
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
| 46.875
| 89
| 0.728889
|
071b9acd086c7ba6412ea5c6a8e8d3fc44d05f5c
| 1,719
|
py
|
Python
|
pyallocation/solvers/exhaustive.py
|
julesy89/pyallocation
|
af80a8e2367a006121dd0702b55efa7b954bb039
|
[
"Apache-2.0"
] | null | null | null |
pyallocation/solvers/exhaustive.py
|
julesy89/pyallocation
|
af80a8e2367a006121dd0702b55efa7b954bb039
|
[
"Apache-2.0"
] | null | null | null |
pyallocation/solvers/exhaustive.py
|
julesy89/pyallocation
|
af80a8e2367a006121dd0702b55efa7b954bb039
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from pymoo.core.algorithm import Algorithm
from pymoo.core.population import Population
from pymoo.util.termination.no_termination import NoTermination
from pyallocation.allocation import FastAllocation
from pyallocation.problem import AllocationProblem
| 26.859375
| 75
| 0.623618
|
071d3f55a7b2c99140b70a77b17ee7b9f4ba705d
| 602
|
py
|
Python
|
config.py
|
yasminbraga/ufopa-reports
|
6d8b213eb0dfce6775d0bb0fd277e8dc09da041c
|
[
"MIT"
] | null | null | null |
config.py
|
yasminbraga/ufopa-reports
|
6d8b213eb0dfce6775d0bb0fd277e8dc09da041c
|
[
"MIT"
] | null | null | null |
config.py
|
yasminbraga/ufopa-reports
|
6d8b213eb0dfce6775d0bb0fd277e8dc09da041c
|
[
"MIT"
] | 2
|
2019-11-24T13:30:35.000Z
|
2022-01-12T11:47:11.000Z
|
import os
| 27.363636
| 212
| 0.752492
|
071dbe42fd5b14449158462daf2a890df418a73d
| 2,651
|
py
|
Python
|
heat/api/openstack/v1/views/stacks_view.py
|
noironetworks/heat
|
7cdadf1155f4d94cf8f967635b98e4012a7acfb7
|
[
"Apache-2.0"
] | 265
|
2015-01-02T09:33:22.000Z
|
2022-03-26T23:19:54.000Z
|
heat/api/openstack/v1/views/stacks_view.py
|
noironetworks/heat
|
7cdadf1155f4d94cf8f967635b98e4012a7acfb7
|
[
"Apache-2.0"
] | 8
|
2015-09-01T15:43:19.000Z
|
2021-12-14T05:18:23.000Z
|
heat/api/openstack/v1/views/stacks_view.py
|
noironetworks/heat
|
7cdadf1155f4d94cf8f967635b98e4012a7acfb7
|
[
"Apache-2.0"
] | 295
|
2015-01-06T07:00:40.000Z
|
2021-09-06T08:05:06.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from heat.api.openstack.v1 import util
from heat.api.openstack.v1.views import views_common
from heat.rpc import api as rpc_api
_collection_name = 'stacks'
basic_keys = (
rpc_api.STACK_ID,
rpc_api.STACK_NAME,
rpc_api.STACK_DESCRIPTION,
rpc_api.STACK_STATUS,
rpc_api.STACK_STATUS_DATA,
rpc_api.STACK_CREATION_TIME,
rpc_api.STACK_DELETION_TIME,
rpc_api.STACK_UPDATED_TIME,
rpc_api.STACK_OWNER,
rpc_api.STACK_PARENT,
rpc_api.STACK_USER_PROJECT_ID,
rpc_api.STACK_TAGS,
)
| 33.556962
| 78
| 0.659751
|
071ec6aa5cdf0ac5081a189dd02a7abf4954448d
| 3,571
|
py
|
Python
|
pykrev/formula/find_intersections.py
|
Kzra/pykrev
|
1a328fccded962f309e951c8509b87a82c3d3ae6
|
[
"MIT"
] | 4
|
2021-02-18T10:19:13.000Z
|
2021-10-04T16:17:30.000Z
|
pykrev/formula/find_intersections.py
|
erikafreeman/pykrev
|
1a328fccded962f309e951c8509b87a82c3d3ae6
|
[
"MIT"
] | null | null | null |
pykrev/formula/find_intersections.py
|
erikafreeman/pykrev
|
1a328fccded962f309e951c8509b87a82c3d3ae6
|
[
"MIT"
] | 1
|
2021-09-23T16:03:03.000Z
|
2021-09-23T16:03:03.000Z
|
import itertools
import numpy as np
import pandas as pd
def find_intersections(formula_lists,group_labels,exclusive = True):
"""
Docstring for function pyKrev.find_intersections
====================
This function compares n lists of molecular formula and outputs a dictionary containing the intersections between each list.
Use
----
find_intersections([list_1,..,list_n],['group_1',...,'group_n'])
Returns a dictionary in which each key corresponds to a combination of group labels
and the corresponding value is a set containing the intersections between the groups in that combination.
Parameters
----------
formula_lists: a list containing n lists of molecular formula. Each item in the sub list should be a formula string.
group_labels: a list containing n strings of corresponding group labels.
exclusive: True or False, depending on whether you want the intersections to contain only unique values.
"""
if len(formula_lists) != len(group_labels):
raise InputError('formula_lists and group_labels must be of equal length')
combinations = [seq for i in range(0,len(group_labels)+1) for seq in itertools.combinations(group_labels,i) if len(seq) > 0]
combinations = sorted(combinations,key = lambda c : len(c),reverse = True) # sort combinations by length
if exclusive == True:
assigned_formula = set() #create a set that will hold all the formula already assigned to a group
amb = pd.DataFrame(data = formula_lists).T
amb.columns = group_labels
intersections = dict()
for combo in combinations:
queries = []
for c in combo:
formula = list(filter(None,amb[c])) #Remove None entries introduced by dataframe
queries.append(set(formula))
if len(queries) == 1: #if there is only one query find the unique elements in it
q_set = frozenset(queries[0]) #qset is a frozen set, so it will not be mutated by changes to queries[0]
for f_list in formula_lists: #cycle all formula in formula_lists
set_f = frozenset(f_list) #convert f_list to sets, must be frozen so type matches q_set
if set_f == q_set: # ignore the set that corresponds to the query
pass
else:
queries[0] = queries[0] - set_f #delete any repeated elements in fset
intersections[combo] = queries[0]
elif len(queries) > 1:
if exclusive == True:
q_intersect = intersect(queries)
intersections[combo] = q_intersect - assigned_formula #remove any elements from q_intersect that have already been assigned
assigned_formula.update(q_intersect) #update the assigned_set with q_intersect
else:
intersections[combo] = intersect(queries)
return intersections
def intersect(samples,counter=0):
""" This command uses recursion to find the intersections between a variable number of sets given in samples.
Where samples = [set_1,set_2,...,set_n] """
if len(samples) == 1:
return samples[0]
a = samples[counter]
b = samples[counter+1::]
if len(b) == 1: #check to see whether the recursion has reached the final element
return a & b[0]
else:
counter += 1
return a & intersect(samples,counter)
| 46.376623
| 143
| 0.633436
|
071ee3300e784ba72ea76c1cd34d240a111eb588
| 5,386
|
py
|
Python
|
Create Playlist.py
|
j4ck64/PlaylistDirectories
|
4a7caf0923620a84aea9bb91e643011e7ee118db
|
[
"MIT"
] | null | null | null |
Create Playlist.py
|
j4ck64/PlaylistDirectories
|
4a7caf0923620a84aea9bb91e643011e7ee118db
|
[
"MIT"
] | null | null | null |
Create Playlist.py
|
j4ck64/PlaylistDirectories
|
4a7caf0923620a84aea9bb91e643011e7ee118db
|
[
"MIT"
] | null | null | null |
import os
import glob
import shutil
from tinytag import TinyTag
""" root = 'C:/'
copy_to = '/copy to/folder'
tag = TinyTag.get('C:/Users/jchap/OneDrive/Pictures/(VERYRAREBOYZ) (feat. $ki Mask The Slump God and Drugz).mp3')
print(tag.artist)
print('song duration: '+str(tag.duration))
"""
f = []
f=glob.glob('C:/Users/jchap/OneDrive/*.mp3')
print(f)
musicDirectory=[]
musicFiles =[]
# tag = TinyTag.get(f[0])
# print(tag.artist)
# for root, dirs, files in os.walk("C:/Users/jchap/OneDrive/"):
for root, dirs, files in os.walk("C:/"):
for file in files:
if file.endswith(".mp3"):
musicFiles.append(file)
musicDirectory.append(os.path.join(root, file))
#print(os.path.join(root, file))
print('files'+str(musicFiles))
tag = TinyTag.get(musicDirectory[0])
print('Artist',tag.artist)
print('Album Artist',tag.albumartist)
print('Title',tag.title)
print('Biterate',tag.bitrate)
print('music directory'+str(musicDirectory))
print(len(musicDirectory))
currentDirectory =os.path.dirname(__file__)
with open(currentDirectory+'/The_Krabby_Patty Formula_.m3u', "r") as f:
content_list = [word.strip() for word in f]
""" my_file = open(currentDirectory+'/The_Krabby_Patty Formula_.m3u', "r")
content_list = my_file. readlines() """
# print('playlist contents')
# print(content_list)
musicDirectory
musicWithoutDuplicates = []
duplicatesList = []
count =0
# check for tags equal to none
#musicDirectory =[x for x in musicDirectory j = TinyTag.get(x) if x != 'wdg']
#remove tracks without albumn artist or title
for track in reversed(range(len(musicDirectory))):
try:
trackTag = TinyTag.get(musicDirectory[track])
if str(trackTag.albumartist)== 'None' or str(trackTag.title)=='None':
print('albumArtist = none',musicDirectory[track])
print('removing track and adding to log file')
musicDirectory.remove(musicDirectory[track])
except IndexError:
break
#check for duplicates
for j in range(len(musicDirectory)):
musicDtag = TinyTag.get(musicDirectory[j])
duplicateL=[]
duplicateLBiterate=[]
for duplicate in range(len(musicDirectory)):
duplicateTag = TinyTag.get(musicDirectory[duplicate])
musicWithoutDuplicates.append(musicDirectory[j])
if duplicateTag.albumartist == musicDtag.albumartist or duplicateTag.albumartist in musicDtag.albumartist:
if duplicateTag.title == musicDtag.title or duplicateTag.title in musicDtag.title :
#check if last iteration
if duplicate>=len(musicDirectory)-1:
print("found a duplicate!",musicDirectory[duplicate],duplicateTag.albumartist,duplicateTag.title)
if len(duplicateLBiterate)==1:## did something here may need to change the conditional statement or add another
print('biterate')
#[x for x in duplicateL if TinyTag.get(musicDirectory[x]).bitrate > musicDirectory[x]]
print("Current duplicate Bite rate", duplicateLBiterate)
for x in range(len(duplicateL)):
if TinyTag.get(duplicateL[x]).bitrate == max(duplicateLBiterate):
#REMOVE ONE WITH THE BEST BITERATE
duplicateL.remove(duplicateL[x])
print('duplicate list',duplicateL)
#Add
duplicatesList = duplicatesList + duplicateL
else:
print("found a duplicate!",musicDirectory[duplicate],duplicateTag.albumartist,duplicateTag.title)
duplicateL.append(musicDirectory[duplicate])
duplicateLBiterate.append(duplicateTag.bitrate)
print('dup ',duplicatesList)
#remove duplicates from list
for u in range(len(duplicatesList)):
for i in range(len(musicDirectory)):
if duplicatesList[u]==musicDirectory[i]:
musicDirectory.remove(musicDirectory[i])
print('music ',musicDirectory)
#create playlist
newPlaylist = open("Test.m3u", "w")
#add file path to the respective track in the new playlist
for content in enumerate(content_list):
# split strings into artist and title
trackNumber=content[0]
trackArray =str(content[1]).split('-')
albumArtist= trackArray[0].strip()
title=trackArray[1].strip()
print('title:',title)
print('albumArtist:',albumArtist)
for trackDirectory in range(len(musicDirectory)):
trackTag = TinyTag.get(musicDirectory[trackDirectory])
if trackTag.albumartist == albumArtist or trackTag.albumartist in albumArtist:
if trackTag.title == title or trackTag.title in title:
newPlaylist.write(trackDirectory + " " + content)
newPlaylist.close()
try:
while True:
content.next()
except StopIteration:
pass
break
else:
print()
else:
print()
| 35.668874
| 133
| 0.604716
|
071fd543532fedf42da52e8b37bdf2f56e668e0e
| 1,636
|
py
|
Python
|
PyBank/main.py
|
Alexis-Kepano/python_challenge
|
2d86e0d891c549d5fba99bd48d612be80746e34b
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
Alexis-Kepano/python_challenge
|
2d86e0d891c549d5fba99bd48d612be80746e34b
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
Alexis-Kepano/python_challenge
|
2d86e0d891c549d5fba99bd48d612be80746e34b
|
[
"ADSL"
] | null | null | null |
#import modules
import os
import csv
#input
csvpath = os.path.join('Resources', 'budget_data.csv')
#output
outfile = os.path.join('Analysis', 'pybankstatements.txt')
#declare variables
months = []; total_m = 1; net_total = 0; total_change = 0; monthly_changes = []; greatest_inc = ['', 0]; greatest_dec = ['', 0]
#open & read csv
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
first_row = next(csvreader)
previous_row = int(first_row[1])
net_total = int(first_row[1])
#loop
for row in csvreader:
net_total += int(row[1])
total_m = total_m+1
current_value = int(row[1])
change_value = int(current_value-previous_row)
monthly_changes.append(change_value)
months.append(row[0])
previous_row = int(row[1])
total_change = total_change + change_value
if change_value > greatest_inc[1]:
greatest_inc[0] = str(row[0])
greatest_inc[1] = change_value
if change_value < greatest_dec[1]:
greatest_dec[0] = str(row[0])
greatest_dec[1] = change_value
avg_change = total_change/len(months)
output = (
f"\n Financial Analysis \n"
f"------------------------------\n"
f"Total Months: {total_m}\n"
f"Total: ${net_total}\n"
f"Average Change: ${avg_change:.2f}\n"
f"Greatest Increase in Profits: {greatest_inc[0]} (${greatest_inc[1]})\n"
f"Greatest Decrease in Profits: {greatest_dec[0]} (${greatest_dec[1]})\n")
with open(outfile, "w") as txt_file:
txt_file.write(output)
outfile
| 28.206897
| 127
| 0.621027
|
072012e3a0677e91ae06d829a2d1c70bfa487fe4
| 1,502
|
py
|
Python
|
bot/constants/messages.py
|
aasw0ng/thornode-telegram-bot
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
[
"MIT"
] | 15
|
2020-04-21T07:51:26.000Z
|
2021-11-02T05:45:48.000Z
|
bot/constants/messages.py
|
aasw0ng/thornode-telegram-bot
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
[
"MIT"
] | 78
|
2020-04-13T23:01:16.000Z
|
2021-05-09T11:46:25.000Z
|
bot/constants/messages.py
|
aasw0ng/thornode-telegram-bot
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
[
"MIT"
] | 5
|
2020-09-03T21:19:16.000Z
|
2021-11-20T00:17:56.000Z
|
from enum import Enum
from constants.globals import HEALTH_EMOJIS
NETWORK_ERROR = ' There was an error while getting data \nAn API endpoint is down!'
HEALTH_LEGEND = f'\n*Node health*:\n{HEALTH_EMOJIS[True]} - *healthy*\n{HEALTH_EMOJIS[False]} - *unhealthy*\n' \
f'{HEALTH_EMOJIS[None]} - *unknown*\n'
NETWORK_HEALTHY_AGAIN = "The network is safe and efficient again! "
| 35.761905
| 112
| 0.631158
|
07201c5460a410eeac1f4cdd74f83fabb16f4ba2
| 3,993
|
py
|
Python
|
src/interactive_conditional_samples.py
|
RanHerOver/cometaai
|
02d459da5bbc58536112cfe6343f5ceef4ff2356
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
RanHerOver/cometaai
|
02d459da5bbc58536112cfe6343f5ceef4ff2356
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
RanHerOver/cometaai
|
02d459da5bbc58536112cfe6343f5ceef4ff2356
|
[
"MIT"
] | null | null | null |
import random
import fire
import json
import os
import numpy as np
import tensorflow as tf
import pytumblr
import mysql.connector
import datetime
from random import seed
import model, sample, encoder
if __name__ == '__main__':
fire.Fire(interact_model())
| 30.953488
| 143
| 0.599048
|
0720bde47f5a6d668b162186b490b208d369a3a2
| 233
|
py
|
Python
|
desktop/core/ext-py/pyasn1-0.1.8/pyasn1/compat/iterfunc.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 422
|
2015-01-08T14:08:08.000Z
|
2022-02-07T11:47:37.000Z
|
desktop/core/ext-py/pyasn1-0.1.8/pyasn1/compat/iterfunc.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 581
|
2015-01-01T08:07:16.000Z
|
2022-02-23T11:44:37.000Z
|
desktop/core/ext-py/pyasn1-0.1.8/pyasn1/compat/iterfunc.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 115
|
2015-01-08T14:41:00.000Z
|
2022-02-13T12:31:17.000Z
|
from sys import version_info
if version_info[0] <= 2 and version_info[1] <= 4:
else:
all = all
| 21.181818
| 49
| 0.579399
|
072173681d53ec2482387460364698d940573600
| 3,839
|
py
|
Python
|
src/cms/carousels/serializers.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 6
|
2021-01-26T17:22:53.000Z
|
2022-02-15T10:09:03.000Z
|
src/cms/carousels/serializers.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 5
|
2020-12-24T14:29:23.000Z
|
2021-08-10T10:32:18.000Z
|
src/cms/carousels/serializers.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 2
|
2020-12-24T14:13:39.000Z
|
2020-12-30T16:48:52.000Z
|
from rest_framework import serializers
from cms.api.serializers import UniCMSContentTypeClass, UniCMSCreateUpdateSerializer
from cms.medias.serializers import MediaSerializer
from . models import Carousel, CarouselItem, CarouselItemLink, CarouselItemLinkLocalization, CarouselItemLocalization
| 35.546296
| 117
| 0.657202
|
072216b7c95085e52120d7afc6bcf448dd8b5843
| 7,298
|
py
|
Python
|
demos/colorization_demo/python/colorization_demo.py
|
mzegla/open_model_zoo
|
092576b4c598c1e301ebc38ad74b323972e54f3e
|
[
"Apache-2.0"
] | null | null | null |
demos/colorization_demo/python/colorization_demo.py
|
mzegla/open_model_zoo
|
092576b4c598c1e301ebc38ad74b323972e54f3e
|
[
"Apache-2.0"
] | null | null | null |
demos/colorization_demo/python/colorization_demo.py
|
mzegla/open_model_zoo
|
092576b4c598c1e301ebc38ad74b323972e54f3e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from openvino.runtime import Core, get_version
import cv2 as cv
import numpy as np
import logging as log
from time import perf_counter
import sys
from argparse import ArgumentParser, SUPPRESS
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python'))
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python/openvino/model_zoo'))
import monitors
from images_capture import open_images_capture
from model_api.performance_metrics import PerformanceMetrics
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.DEBUG, stream=sys.stdout)
if __name__ == "__main__":
args = build_arg().parse_args()
sys.exit(main(args) or 0)
| 43.183432
| 109
| 0.639216
|
07223524f59210dbb5356506e6de9ffb41f47883
| 8,174
|
py
|
Python
|
swagger_client/models/transfer.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/models/transfer.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/models/transfer.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.amount import Amount # noqa: F401,E501
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Transfer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.186207
| 277
| 0.565207
|
07224ff81e97b5ee51932d0d9bca20ab01f96757
| 10,366
|
py
|
Python
|
external/trappy/tests/test_caching.py
|
vdonnefort/lisa
|
38e5f246e6c94201a60a8698e7f29277f11c425e
|
[
"Apache-2.0"
] | 1
|
2020-11-30T16:14:02.000Z
|
2020-11-30T16:14:02.000Z
|
external/trappy/tests/test_caching.py
|
vdonnefort/lisa
|
38e5f246e6c94201a60a8698e7f29277f11c425e
|
[
"Apache-2.0"
] | null | null | null |
external/trappy/tests/test_caching.py
|
vdonnefort/lisa
|
38e5f246e6c94201a60a8698e7f29277f11c425e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2017 ARM Limited, Google and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from builtins import chr
import os
import json
import shutil
import sys
import unittest
import utils_tests
import trappy
from trappy.ftrace import GenericFTrace
from trappy.systrace import SysTrace
| 38.535316
| 88
| 0.644125
|
07229c65c61816346ca75d9d08af09c5eb62b6ff
| 6,813
|
py
|
Python
|
src/mf_horizon_client/client/pipelines/blueprints.py
|
MF-HORIZON/mf-horizon-python-client
|
67a4a094767cb8e5f01956f20f5ca7726781614a
|
[
"MIT"
] | null | null | null |
src/mf_horizon_client/client/pipelines/blueprints.py
|
MF-HORIZON/mf-horizon-python-client
|
67a4a094767cb8e5f01956f20f5ca7726781614a
|
[
"MIT"
] | null | null | null |
src/mf_horizon_client/client/pipelines/blueprints.py
|
MF-HORIZON/mf-horizon-python-client
|
67a4a094767cb8e5f01956f20f5ca7726781614a
|
[
"MIT"
] | null | null | null |
from enum import Enum
| 39.842105
| 122
| 0.57405
|
0723f800260b47fe29201f275a3497c9e0250212
| 6,758
|
py
|
Python
|
pyChess/olaf/views.py
|
An-Alone-Cow/pyChess
|
2729a3a89e4d7d79659488ecb1b0bff9cac281a3
|
[
"MIT"
] | null | null | null |
pyChess/olaf/views.py
|
An-Alone-Cow/pyChess
|
2729a3a89e4d7d79659488ecb1b0bff9cac281a3
|
[
"MIT"
] | 18
|
2017-02-05T17:52:41.000Z
|
2017-02-16T09:04:39.000Z
|
pyChess/olaf/views.py
|
An-Alone-Cow/pyChess
|
2729a3a89e4d7d79659488ecb1b0bff9cac281a3
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.utils import timezone
from olaf.models import *
from olaf.forms import *
from olaf.utility import usertools
from olaf.chess.controller import proccess_move
form_operation_dict = {
'login' : (
usertools.login_user,
LoginForm,
'olaf/login.html',
{},
'index',
{ 'message' : "You're logged in. :)"}
),
'register' : (
usertools.register_user,
RegisterForm,
'olaf/register.html',
{},
'index',
{ 'message' : "An activation email has been sent to you" }
),
'password_reset_request' : (
usertools.init_pass_reset_token,
ForgotPasswordUsernameOrEmailForm,
'olaf/password_reset_request.html',
{},
'index',
{ 'message' : "An email containing the password reset link will be sent to your email"}
),
'reset_password' : (
usertools.reset_password_action,
PasswordChangeForm,
'olaf/reset_password.html',
{},
'olaf:login',
{ 'message' : "Password successfully changed, you can login now" }
),
'resend_activation_email' : (
usertools.resend_activation_email,
ResendActivationUsernameOrEmailForm,
'olaf/resend_activation_email.html',
{},
'index',
{ 'message' : "Activation email successfully sent to your email" }
),
}
#view functions
| 31.877358
| 175
| 0.683042
|
072578f31e8482a3127fc3b417aa642b8388a425
| 2,343
|
py
|
Python
|
ce_vae_test/main_cetrainer.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
ce_vae_test/main_cetrainer.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 6
|
2021-02-02T23:00:02.000Z
|
2022-01-13T03:13:51.000Z
|
ce_vae_test/main_cetrainer.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import torch
import torch.utils.data
import matplotlib.pyplot as plt
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
from ce_vae_test.networks.min_vae import MinVae
from ce_vae_test.trainer.ce_trainer import CeVaeTrainer
from ce_vae_test.sampler.dataset_sampler import SamplerDatasetWithReplacement
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
writer = SummaryWriter()
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_sampler = SamplerDatasetWithReplacement(
dataset=datasets.MNIST('../data',
train=True,
download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size
)
test_sampler = SamplerDatasetWithReplacement(
dataset=datasets.MNIST('../data',
train=False,
transform=transforms.ToTensor()),
batch_size=args.batch_size * 10
)
cevae = MinVae(
input_size=28 * 28,
output_size=10,
latent_dim=2,
hidden_sizes_dec=[5],
device=device
).to(device)
trainer = CeVaeTrainer(
vae=cevae,
num_epochs=300,
train_loader=train_sampler,
test_loader=test_sampler,
writer=writer,
device=device,
alpha=0.90,
lamda=0.22
)
trainer.run()
| 32.09589
| 83
| 0.681605
|
07257aac63bf6240cc82f0f082448d6a6953f3dc
| 1,567
|
py
|
Python
|
appr/commands/logout.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 31
|
2017-07-05T07:25:31.000Z
|
2021-01-18T22:21:57.000Z
|
appr/commands/logout.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 48
|
2017-06-27T15:48:29.000Z
|
2021-01-26T21:02:27.000Z
|
appr/commands/logout.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 17
|
2017-07-05T07:25:38.000Z
|
2021-01-20T14:52:29.000Z
|
from __future__ import absolute_import, division, print_function
from appr.auth import ApprAuth
from appr.commands.command_base import CommandBase, PackageSplit
| 36.44186
| 94
| 0.640077
|
072580ae43bbd8ecd21160183d85274cfcb19e54
| 87
|
py
|
Python
|
musica/apps.py
|
webnowone/albumMusical
|
b9532ff0ef47b610f0f2b565f0dd77e54d638772
|
[
"Apache-2.0"
] | 1
|
2021-02-02T03:58:48.000Z
|
2021-02-02T03:58:48.000Z
|
musica/apps.py
|
webnowone/albumMusical
|
b9532ff0ef47b610f0f2b565f0dd77e54d638772
|
[
"Apache-2.0"
] | 52
|
2020-02-25T09:56:54.000Z
|
2021-09-22T18:40:50.000Z
|
musica/apps.py
|
webnowone/albumMusical
|
b9532ff0ef47b610f0f2b565f0dd77e54d638772
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
| 14.5
| 33
| 0.747126
|
072775cafe9ec9921c429b5df6eb75f74e95605d
| 10,370
|
py
|
Python
|
tzwhere/tzwhere.py
|
tuxiqae/pytzwhere
|
32d2bef9ff2d784741471fddb35fbb6732f556d5
|
[
"MIT"
] | 115
|
2015-01-09T06:18:19.000Z
|
2021-12-28T07:07:45.000Z
|
tzwhere/tzwhere.py
|
tuxiqae/pytzwhere
|
32d2bef9ff2d784741471fddb35fbb6732f556d5
|
[
"MIT"
] | 47
|
2015-04-15T20:23:44.000Z
|
2022-03-22T11:25:01.000Z
|
tzwhere/tzwhere.py
|
tuxiqae/pytzwhere
|
32d2bef9ff2d784741471fddb35fbb6732f556d5
|
[
"MIT"
] | 46
|
2015-01-26T16:42:10.000Z
|
2022-01-04T15:26:57.000Z
|
#!/usr/bin/env python
'''tzwhere.py - time zone computation from latitude/longitude.
Ordinarily this is loaded as a module and instances of the tzwhere
class are instantiated and queried directly
'''
import collections
try:
import ujson as json # loads 2 seconds faster than normal json
except:
try:
import json
except ImportError:
import simplejson as json
import math
import gzip
import os
import shapely.geometry as geometry
import shapely.prepared as prepared
# We can save about 222MB of RAM by turning our polygon lists into
# numpy arrays rather than tuples, if numpy is installed.
try:
import numpy
WRAP = numpy.asarray
COLLECTION_TYPE = numpy.ndarray
except ImportError:
WRAP = tuple
COLLECTION_TYPE = tuple
# for navigation and pulling values/files
this_dir, this_filename = os.path.split(__file__)
BASE_DIR = os.path.dirname(this_dir)
def read_tzworld(path):
reader = read_json
return reader(path)
def read_json(path):
with gzip.open(path, "rb") as f:
featureCollection = json.loads(f.read().decode("utf-8"))
return featureCollection
def feature_collection_polygons(featureCollection):
"""Turn a feature collection
into an iterator over polygons.
Given a featureCollection of the kind loaded from the json
input, unpack it to an iterator which produces a series of
(tzname, polygon) pairs, one for every polygon in the
featureCollection. Here tzname is a string and polygon is a
list of floats.
"""
for feature in featureCollection['features']:
tzname = feature['properties']['TZID']
if feature['geometry']['type'] == 'Polygon':
exterior = feature['geometry']['coordinates'][0]
interior = feature['geometry']['coordinates'][1:]
yield (tzname, (exterior, interior))
if __name__ == "__main__":
prepareMap()
| 39.884615
| 130
| 0.610993
|
07283cf3af01e90d346a6f3a53d9608574682da0
| 706
|
py
|
Python
|
tests/home_assistant/custom_features.py
|
jre21/mindmeld
|
6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81
|
[
"Apache-2.0"
] | 1
|
2021-01-06T23:39:57.000Z
|
2021-01-06T23:39:57.000Z
|
tests/home_assistant/custom_features.py
|
jre21/mindmeld
|
6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81
|
[
"Apache-2.0"
] | 1
|
2021-02-02T22:53:01.000Z
|
2021-02-02T22:53:01.000Z
|
tests/home_assistant/custom_features.py
|
jre21/mindmeld
|
6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81
|
[
"Apache-2.0"
] | null | null | null |
from mindmeld.models.helpers import register_query_feature
| 35.3
| 89
| 0.729462
|
072aa22d56a355822d78b2d3df97e983fe4fb836
| 4,783
|
py
|
Python
|
source/statuscodes.py
|
woody2371/fishbowl-api
|
f34ff9267436b1278985870fbf19863febdb391b
|
[
"MIT"
] | 6
|
2016-04-26T01:24:21.000Z
|
2021-05-13T07:48:15.000Z
|
source/statuscodes.py
|
USDev01/fishbowl-api
|
4d47e20d3385d5ebc001feec44aad321467a6d92
|
[
"MIT"
] | 3
|
2015-10-29T21:34:39.000Z
|
2021-11-08T15:22:30.000Z
|
source/statuscodes.py
|
USDev01/fishbowl-api
|
4d47e20d3385d5ebc001feec44aad321467a6d92
|
[
"MIT"
] | 12
|
2015-02-20T08:21:05.000Z
|
2021-11-06T22:27:04.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
| 37.367188
| 177
| 0.572653
|
072b5dc94ce99e8f35df268838491f8bfa5d061f
| 239
|
py
|
Python
|
app/src/server/hoge/hoge_api.py
|
jacob327/docker-flask-nginx-uwsgi-mysql
|
4b0731f746d6fda7bfecd082ddef53a9c5ec8f75
|
[
"MIT"
] | null | null | null |
app/src/server/hoge/hoge_api.py
|
jacob327/docker-flask-nginx-uwsgi-mysql
|
4b0731f746d6fda7bfecd082ddef53a9c5ec8f75
|
[
"MIT"
] | null | null | null |
app/src/server/hoge/hoge_api.py
|
jacob327/docker-flask-nginx-uwsgi-mysql
|
4b0731f746d6fda7bfecd082ddef53a9c5ec8f75
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# [Import start]
from flask import Blueprint, jsonify
# [Import end]
app = Blueprint(
'hoge',
__name__,
url_prefix='/hoge'
)
| 13.277778
| 36
| 0.606695
|
072b648fd224e151f6b9509016ac18b01f0c89c9
| 2,383
|
py
|
Python
|
preinstall_setup/makedeb-11.0.1-1-stable/src/makedeb/utils/missing_apt_dependencies.py
|
chipbuster/Energy-Languages-Setup
|
5b6192e1cc73f701a2310ac72520ed540d86c1ae
|
[
"BSD-3-Clause"
] | null | null | null |
preinstall_setup/makedeb-11.0.1-1-stable/src/makedeb/utils/missing_apt_dependencies.py
|
chipbuster/Energy-Languages-Setup
|
5b6192e1cc73f701a2310ac72520ed540d86c1ae
|
[
"BSD-3-Clause"
] | null | null | null |
preinstall_setup/makedeb-11.0.1-1-stable/src/makedeb/utils/missing_apt_dependencies.py
|
chipbuster/Energy-Languages-Setup
|
5b6192e1cc73f701a2310ac72520ed540d86c1ae
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import apt_pkg
import sys
from apt_pkg import CURSTATE_INSTALLED, version_compare
from operator import lt, le, eq, ge, gt
# Function mappings for relationship operators.
relation_operators = {"<<": lt, "<=": le, "=": eq, ">=": ge, ">>": gt}
# Set up APT cache.
apt_pkg.init()
cache = apt_pkg.Cache(None)
missing_packages = []
for i in sys.argv[1:]:
# Build the package relationship string for use by 'apt-get satisfy'.
relationship_operator = None
for j in ["<=", ">=", "<", ">", "="]:
if j in i:
relationship_operator = j
break
if relationship_operator is not None:
if relationship_operator in ["<", ">"]:
relationship_operator_formatted = j + j
else:
relationship_operator_formatted = j
package = i.split(relationship_operator)
pkgname = package[0]
pkgver = package[1]
package_string = f"{pkgname} ({relationship_operator_formatted} {pkgver})"
else:
pkgname = i
pkgver = None
package_string = pkgname
# Check if the package is in the cache.
try:
pkg = cache[pkgname]
except KeyError:
missing_packages += [package_string]
continue
# Get the list of installed and provided packages that are currently installed.
installed_pkg_versions = []
if pkg.current_state == CURSTATE_INSTALLED:
installed_pkg_versions += [pkg]
for i in pkg.provides_list:
parent_pkg = i[2].parent_pkg
if parent_pkg.current_state == CURSTATE_INSTALLED:
installed_pkg_versions += [parent_pkg]
# If an installed package was found and no relationship operators were used, the dependency has been satisfied.
if (len(installed_pkg_versions) != 0) and (relationship_operator is None):
continue
# Otherwise, check all matching installed packages and see if any of them fit the specified relationship operator.
matched_pkg = False
for i in installed_pkg_versions:
installed_version = i.current_ver.ver_str
version_result = version_compare(installed_version, pkgver)
if relation_operators[relationship_operator_formatted](version_result, 0):
matched_pkg = True
if not matched_pkg:
missing_packages += [package_string]
for i in missing_packages:
print(i)
exit(0)
| 29.419753
| 118
| 0.661771
|
072bd117dea823ba3412148c4dbda51e774d2a1f
| 11,707
|
py
|
Python
|
cohorts_proj/datasets/migrations/0009_auto_20200824_0617.py
|
zferic/harmonization-website
|
f6a081481df3a3a62cb075fbb63ad0470b0d4e06
|
[
"MIT"
] | 1
|
2020-09-20T02:32:01.000Z
|
2020-09-20T02:32:01.000Z
|
cohorts_proj/datasets/migrations/0009_auto_20200824_0617.py
|
zferic/harmonization-website
|
f6a081481df3a3a62cb075fbb63ad0470b0d4e06
|
[
"MIT"
] | 20
|
2020-04-17T14:01:41.000Z
|
2022-03-12T00:30:23.000Z
|
cohorts_proj/datasets/migrations/0009_auto_20200824_0617.py
|
zferic/harmonization-website
|
f6a081481df3a3a62cb075fbb63ad0470b0d4e06
|
[
"MIT"
] | 3
|
2020-10-08T00:24:51.000Z
|
2021-06-02T20:07:30.000Z
|
# Generated by Django 3.0.7 on 2020-08-24 06:17
from django.db import migrations, models
| 33.353276
| 154
| 0.522337
|
072ca26b4d1e4960c6363441b38a038bbb510a99
| 107
|
py
|
Python
|
test_hello.py
|
skvel/pynet_testx
|
46566e059e076cb763f8a10ed7f6ff9eac5b63b1
|
[
"Apache-2.0"
] | null | null | null |
test_hello.py
|
skvel/pynet_testx
|
46566e059e076cb763f8a10ed7f6ff9eac5b63b1
|
[
"Apache-2.0"
] | null | null | null |
test_hello.py
|
skvel/pynet_testx
|
46566e059e076cb763f8a10ed7f6ff9eac5b63b1
|
[
"Apache-2.0"
] | null | null | null |
print "Hello World!"
print "Trying my hand at Git!"
print "Something else"
for i in range(10):
print i
| 17.833333
| 30
| 0.691589
|
072cc767332977c77810de1909be8f9a35cce2f6
| 3,784
|
py
|
Python
|
tasks/views.py
|
TheDim0n/ProjectManager
|
50d36e7e3fc71655aa5a82bb19eacc07172ba5e4
|
[
"MIT"
] | null | null | null |
tasks/views.py
|
TheDim0n/ProjectManager
|
50d36e7e3fc71655aa5a82bb19eacc07172ba5e4
|
[
"MIT"
] | 1
|
2020-09-08T11:10:53.000Z
|
2020-09-08T11:10:53.000Z
|
tasks/views.py
|
TheDim0n/ProjectManager
|
50d36e7e3fc71655aa5a82bb19eacc07172ba5e4
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import DetailView, ListView
from projects.models import Project
from status.models import Status
from .models import Task
from .forms import TaskForm, FilterForm
| 31.798319
| 79
| 0.636628
|
072d2f9675748ff1a2131801c4afa2c1d8506223
| 2,083
|
py
|
Python
|
smoke/noaa/get_smokeplume_counts.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
smoke/noaa/get_smokeplume_counts.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
smoke/noaa/get_smokeplume_counts.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
import os
import math
import time
import geohash
import geojson
from geojson import MultiLineString
from shapely import geometry
import shapefile
import numpy
import datetime as dt
import pandas as pd
import logging
logger = logging.getLogger(__name__)
source_shape_file_path = "C:/temp/2018/"
threshold = 60*60
cols = ['start', 'end','start_epoch_round','end_epoch_round','start_epoch_round_dt','end_epoch_round_dt']
times = []
for root,dirs,files in os.walk(source_shape_file_path):
for file in files:
with open(os.path.join(root,file),"r") as auto:
if file.endswith(".shp"):
try:
filename = file.replace(".shp","")
shape=shapefile.Reader(source_shape_file_path+filename+"/"+file)
for r in shape.iterRecords():
start_time = dt.datetime.strptime(r[1], '%Y%j %H%M')
end_time = dt.datetime.strptime(r[2], '%Y%j %H%M')
epoch_s = dt.datetime.timestamp(dt.datetime.strptime(r[1], '%Y%j %H%M'))
epoch_e = dt.datetime.timestamp(dt.datetime.strptime(r[2], '%Y%j %H%M'))
# sometimes start is later than end time, we'll assume the earlier time is start
epoch_end_round = round(max(epoch_s,epoch_e) / threshold) * threshold
epoch_start_round = round(min(epoch_s,epoch_e) / threshold) * threshold
epoch_end_round_dt = dt.datetime.utcfromtimestamp(3600 * ((max(epoch_s,epoch_e) + 1800) // 3600))
epoch_start_round_dt = dt.datetime.utcfromtimestamp(3600 * ((min(epoch_s,epoch_e) + 1800) // 3600))
times.append([start_time,end_time,epoch_start_round,epoch_end_round,epoch_start_round_dt,epoch_end_round_dt])
break
except:
logger.error('failed to parse file:'+source_shape_file_path+filename+"/")
continue
df = pd.DataFrame(times, columns=cols)
df.to_csv('noaa_times.csv')
| 45.282609
| 133
| 0.610178
|