hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3188b47960735f6a2d30024aca9b8fc438dc3613 | 20,651 | py | Python | Lib/gds/burp/config.py | mwielgoszewski/jython-burp-api | 002383f7acc5fb237e3804fe5bd2aa2950a0240d | [
"0BSD"
] | 134 | 2015-01-21T14:22:42.000Z | 2021-09-02T10:52:43.000Z | Lib/gds/burp/config.py | d453d2/burp-jython-console | 3cec3200ede2da0f1cdbf935efc340f073c07ea2 | [
"0BSD"
] | 7 | 2015-01-19T16:54:45.000Z | 2018-10-10T15:10:13.000Z | Lib/gds/burp/config.py | d453d2/burp-jython-console | 3cec3200ede2da0f1cdbf935efc340f073c07ea2 | [
"0BSD"
] | 29 | 2015-02-13T14:08:23.000Z | 2021-12-17T03:17:40.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from ConfigParser import ConfigParser
from copy import deepcopy
from inspect import cleandoc
import os.path
from .core import ExtensionPoint
__all__ = ['Configuration', 'ConfigSection', 'Option', 'BoolOption',
'IntOption', 'FloatOption', 'ListOption',
'OrderedExtensionsOption']
_use_default = object()
def as_bool(value):
"""Convert the given value to a `bool`.
If `value` is a string, return `True` for any of "yes", "true", "enabled",
"on" or non-zero numbers, ignoring case. For non-string arguments, return
the argument converted to a `bool`, or `False` if the conversion fails.
"""
if isinstance(value, basestring):
try:
return bool(float(value))
except ValueError:
return value.strip().lower() in ('yes', 'true', 'enabled', 'on')
try:
return bool(value)
except (TypeError, ValueError):
return False
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
def _get_registry(cls, compmgr=None):
"""Return the descriptor registry.
If `compmgr` is specified, only return descriptors for components that
are enabled in the given `ComponentManager`.
"""
if compmgr is None:
return cls.registry
from .core import ComponentMeta
components = {}
for comp in ComponentMeta._components:
for attr in comp.__dict__.itervalues():
if isinstance(attr, cls):
components[attr] = comp
return dict(each for each in cls.registry.iteritems()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
| 35.666667 | 79 | 0.619486 |
318a952b81c7d9540e9926622426293ecbdc84ee | 1,572 | py | Python | src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | null | null | null | src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | null | null | null | src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2021-09-04T12:37:34.000Z | 2021-09-04T12:37:34.000Z | import circuits
from PythonQt.QtGui import QQuaternion as Quat
from PythonQt.QtGui import QVector3D as Vec
import naali
COMPNAME = "rotation"
| 34.933333 | 115 | 0.600509 |
318bb2ce68ce930a154f36901f32368d1debcea3 | 728 | py | Python | pyqubo/package_info.py | caja-matematica/pyqubo | 5e5c9d1a36c756ba8c05eac23bbefe2ac369bce5 | [
"Apache-2.0"
] | 1 | 2018-10-11T08:51:02.000Z | 2018-10-11T08:51:02.000Z | pyqubo/package_info.py | kotarotanahashi/pyqubo | d2983b5f2b9e4ebee495c345326a1b2fd98f1c4f | [
"Apache-2.0"
] | null | null | null | pyqubo/package_info.py | kotarotanahashi/pyqubo | d2983b5f2b9e4ebee495c345326a1b2fd98f1c4f | [
"Apache-2.0"
] | null | null | null | # (major, minor, patch, prerelease)
VERSION = (0, 0, 6, "")
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pyqubo'
__contact_names__ = 'Recruit Communications Co., Ltd.'
__contact_emails__ = 'rco_pyqubo@ml.cocorou.jp'
__homepage__ = 'https://pyqubo.readthedocs.io/en/latest/'
__repository_url__ = 'https://github.com/recruit-communications/pyqubo'
__download_url__ = 'https://github.com/recruit-communications/pyqubo'
__description__ = 'PyQUBO allows you to create QUBOs or Ising models from mathematical expressions.'
__license__ = 'Apache 2.0'
__keywords__ = 'QUBO, quantum annealing, annealing machine, ising model, optimization'
| 45.5 | 100 | 0.75 |
318c27aa8cc3118e9c4a2079f233ea65d902fc9c | 19,702 | py | Python | bin/DBImportOperation/etl_operations.py | karlam123/DBImport | ebaf3f909841276d289bfb2f6eec0ecafa8395cf | [
"Apache-2.0"
] | null | null | null | bin/DBImportOperation/etl_operations.py | karlam123/DBImport | ebaf3f909841276d289bfb2f6eec0ecafa8395cf | [
"Apache-2.0"
] | null | null | null | bin/DBImportOperation/etl_operations.py | karlam123/DBImport | ebaf3f909841276d289bfb2f6eec0ecafa8395cf | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import subprocess
import errno, os, pty
import shlex
from subprocess import Popen, PIPE
from ConfigReader import configuration
import mysql.connector
from mysql.connector import errorcode
from common.Singleton import Singleton
from DBImportConfig import import_config
from DBImportOperation import common_operations
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import time
| 40.372951 | 535 | 0.661963 |
318f1a187d9d522a7bfc65eda71b220b6ecec2a9 | 2,394 | py | Python | grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
] | 1 | 2021-11-25T08:12:48.000Z | 2021-11-25T08:12:48.000Z | grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
] | null | null | null | grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
] | null | null | null | from logging import getLogger
from numpy import mean, std
from pickle import dump
from wonambi import Dataset
from wonambi.trans import math, concatenate
from bidso import Task, Electrodes
lg = getLogger(__name__)
| 32.351351 | 130 | 0.651211 |
318fbfd55bdcd7ac71d0dc2747eb31643026f551 | 3,021 | py | Python | bin/analysis/ipa/constraints/split.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 6 | 2015-09-19T18:22:33.000Z | 2020-11-29T15:21:17.000Z | bin/analysis/ipa/constraints/split.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2015-08-04T08:03:46.000Z | 2015-08-04T08:03:46.000Z | bin/analysis/ipa/constraints/split.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2019-12-09T08:27:09.000Z | 2019-12-09T08:27:09.000Z | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import ast
from . base import Constraint
from .. calling import cpa
# TODO prevent over splitting? All objects with the same qualifier should be grouped?
| 23.97619 | 86 | 0.716319 |
3190a8381415a99ad9460b76cf488e2f1d88f6df | 2,202 | py | Python | tests/it/test_docker_image_tag.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
] | 4 | 2020-06-11T20:54:47.000Z | 2020-09-22T13:07:17.000Z | tests/it/test_docker_image_tag.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
] | 113 | 2019-11-07T00:40:36.000Z | 2021-01-18T12:50:16.000Z | tests/it/test_docker_image_tag.py | inetum-orleans/docker-devbox-ddb | 20c713cf7bfcaf289226a17a9648c17d16003b4d | [
"MIT"
] | null | null | null | import os
import zipfile
import yaml
from dotty_dict import Dotty
from pytest_mock import MockerFixture
from ddb.__main__ import main
from ddb.config import Config
from ddb.feature.version import is_git_repository
| 33.363636 | 103 | 0.672116 |
3190b0b0bcaf0d8c57b427dc97d503276394c78a | 5,806 | py | Python | bin/zeisel.py | bendemeo/ample | 4ca2688ff2b0e5c8f7dcbc0a1f4ddc8927fac670 | [
"MIT"
] | null | null | null | bin/zeisel.py | bendemeo/ample | 4ca2688ff2b0e5c8f7dcbc0a1f4ddc8927fac670 | [
"MIT"
] | null | null | null | bin/zeisel.py | bendemeo/ample | 4ca2688ff2b0e5c8f7dcbc0a1f4ddc8927fac670 | [
"MIT"
] | null | null | null | import numpy as np
import os
from scanorama import *
from scipy.sparse import vstack
from process import load_names
from experiments import *
from utils import *
NAMESPACE = 'zeisel'
METHOD = 'svd'
DIMRED = 100
data_names = [
'data/mouse_brain/zeisel/amygdala',
'data/mouse_brain/zeisel/cerebellum',
'data/mouse_brain/zeisel/cortex1',
'data/mouse_brain/zeisel/cortex2',
'data/mouse_brain/zeisel/cortex3',
'data/mouse_brain/zeisel/enteric',
'data/mouse_brain/zeisel/hippocampus',
'data/mouse_brain/zeisel/hypothalamus',
'data/mouse_brain/zeisel/medulla',
'data/mouse_brain/zeisel/midbraindorsal',
'data/mouse_brain/zeisel/midbrainventral',
'data/mouse_brain/zeisel/olfactory',
'data/mouse_brain/zeisel/pons',
'data/mouse_brain/zeisel/spinalcord',
'data/mouse_brain/zeisel/striatumdorsal',
'data/mouse_brain/zeisel/striatumventral',
'data/mouse_brain/zeisel/sympathetic',
'data/mouse_brain/zeisel/thalamus',
]
if __name__ == '__main__':
datasets, genes_list, n_cells = load_names(data_names, norm=False)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
if not os.path.isfile('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)):
log('Dimension reduction with {}...'.format(METHOD))
X_dimred = reduce_dimensionality(
normalize(X), method=METHOD, dimred=DIMRED
)
log('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE))
from ample import gs, uniform, srs
#samp_idx = gs(X_dimred, 20000, replace=False)
#samp_idx = uniform(X_dimred, 20000, replace=False)
samp_idx = srs(X_dimred, 20000, replace=False)
#from anndata import AnnData
#import scanpy.api as sc
#adata = AnnData(X=X_dimred[samp_idx, :])
#sc.pp.neighbors(adata, use_rep='X')
#sc.tl.louvain(adata, resolution=1.5, key_added='louvain')
#
#louv_labels = np.array(adata.obs['louvain'].tolist())
#le = LabelEncoder().fit(louv_labels)
#cell_labels = le.transform(louv_labels)
#
#np.savetxt('data/cell_labels/zeisel_louvain.txt', cell_labels)
labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(labels)
cell_labels = le.transform(labels)
experiments(
X_dimred, NAMESPACE, n_seeds=2,
cell_labels=cell_labels,
kmeans_ami=True, louvain_ami=True,
rare=True,
rare_label=le.transform(['Ependymal'])[0],
)
exit()
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_srs{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
exit()
cell_labels = (
open('data/cell_labels/zeisel_louvain.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
astro = set([ 32, 38, 40, ])
oligo = set([ 2, 5, 12, 20, 23, 33, 37, ])
focus = set([ 15, 36, 41 ])
labels = []
aob_labels = []
for cl in cell_labels:
if cl in focus:
labels.append(0)
aob_labels.append('both')
elif cl in astro or cl in oligo:
labels.append(1)
if cl in astro:
aob_labels.append('astro')
else:
aob_labels.append('oligo')
else:
labels.append(2)
aob_labels.append('none')
labels = np.array(labels)
aob_labels = np.array(aob_labels)
X = np.log1p(normalize(X[samp_idx, :]))
from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'both', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'both', NAMESPACE)
astro_oligo_violin(X, genes, 'GJA1', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'MBP', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'PLP1', aob_labels, NAMESPACE)
viz_genes = [
#'GJA1', 'MBP', 'PLP1', 'TRF',
#'CST3', 'CPE', 'FTH1', 'APOE', 'MT1', 'NDRG2', 'TSPAN7',
#'PLP1', 'MAL', 'PTGDS', 'CLDN11', 'APOD', 'QDPR', 'MAG', 'ERMN',
#'PLP1', 'MAL', 'PTGDS', 'MAG', 'CLDN11', 'APOD', 'FTH1',
#'ERMN', 'MBP', 'ENPP2', 'QDPR', 'MOBP', 'TRF',
#'CST3', 'SPARCL1', 'PTN', 'CD81', 'APOE', 'ATP1A2', 'ITM2B'
]
cell_labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_astro{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
gene_names=viz_genes, gene_expr=X, genes=genes,
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
#visualize_dropout(X, embedding, image_suffix='.png',
# viz_prefix=NAMESPACE + '_dropout')
from differential_entropies import differential_entropies
differential_entropies(X_dimred, labels)
| 35.187879 | 80 | 0.627799 |
3190dd313fa981931d847fb35c25f15a5cf9dce0 | 997 | py | Python | cogitare/monitor/workers/system_usage.py | cogitare-ai/cogitare | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | [
"MIT"
] | 90 | 2017-11-25T13:54:48.000Z | 2021-09-04T04:19:52.000Z | cogitare/monitor/workers/system_usage.py | cogitare-ai/cogitare | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | [
"MIT"
] | 43 | 2017-09-12T20:40:56.000Z | 2019-08-03T15:37:37.000Z | cogitare/monitor/workers/system_usage.py | cogitare-ai/cogitare | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | [
"MIT"
] | 9 | 2018-02-01T22:37:15.000Z | 2018-11-05T13:30:58.000Z | from threading import Thread
import psutil
import time
| 30.212121 | 120 | 0.584754 |
3191404cbd9e515326e447e2206e0f73b067c5bc | 5,866 | py | Python | test/worker/net.py | ameserole/Naumachia | dc13c33c5fcf053c74dfce8351a696d28857fd9d | [
"MIT"
] | null | null | null | test/worker/net.py | ameserole/Naumachia | dc13c33c5fcf053c74dfce8351a696d28857fd9d | [
"MIT"
] | null | null | null | test/worker/net.py | ameserole/Naumachia | dc13c33c5fcf053c74dfce8351a696d28857fd9d | [
"MIT"
] | null | null | null | import fcntl
import os
import socket
import struct
import warnings
import subprocess
import logging
import base64
logger = logging.getLogger(__name__)
# Dummy socket used for fcntl functions
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class Mac(Addr):
bytelen = 6
| 27.283721 | 123 | 0.574668 |
319239aac557dc3d968ccc908a828a9cd5002f12 | 2,161 | py | Python | kunrei.py | kosugi/alfred.romanizer | d2a3b4a9883f15101893e385f14e6dca115c1d7d | [
"BSD-2-Clause"
] | null | null | null | kunrei.py | kosugi/alfred.romanizer | d2a3b4a9883f15101893e385f14e6dca115c1d7d | [
"BSD-2-Clause"
] | null | null | null | kunrei.py | kosugi/alfred.romanizer | d2a3b4a9883f15101893e385f14e6dca115c1d7d | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
basic_table = dict(map(lambda s: s.split(u'\t'), u'''
a
i
u
e
o
ka
ki
ku
ke
ko
sa
si
su
se
so
ta
ti
tu
te
to
na
ni
nu
ne
no
ha
hi
hu
he
ho
ma
mi
mu
me
mo
ya
yu
yo
ra
ri
ru
re
ro
wa
wo
a
i
u
e
o
ga
gi
gu
ge
go
za
zi
zu
ze
zo
da
di
du
de
do
ba
bi
bu
be
bo
pa
pi
pu
pe
po
kya
kyu
kyo
sya
syu
syo
tya
tyu
tyo
nya
nyu
nyo
hya
hyu
hyo
mya
myu
myo
rya
ryu
ryo
gya
gyu
gyo
zya
zyu
zyo
dya
dyu
dyo
bya
byu
byo
pya
pyu
pyo
kwa
gwa
a
i
u
e
o
ka
ki
ku
ke
ko
sa
si
su
se
so
ta
ti
tu
te
to
na
ni
nu
ne
no
ha
hi
hu
he
ho
ma
mi
mu
me
mo
ya
yu
yo
ra
ri
ru
re
ro
wa
wo
a
i
u
e
o
ga
gi
gu
ge
go
za
zi
zu
ze
zo
da
di
du
de
do
ba
bi
bu
be
bo
pa
pi
pu
pe
po
kya
kyu
kyo
sya
syu
syo
tya
tyu
tyo
nya
nyu
nyo
hya
hyu
hyo
mya
myu
myo
rya
ryu
ryo
gya
gyu
gyo
zya
zyu
zyo
dya
dyu
dyo
bya
byu
byo
pya
pyu
pyo
kwa
gwa
'''.strip(u'\n').split(u'\n')))
long_sound_table = dict(u'a i u e o'.split())
long_sounds = u'aa ii uu ee oo ou'.split()
| 8.248092 | 64 | 0.553447 |
31947238fdb172e32519876dd493050e5588dc51 | 3,062 | py | Python | tests/test_utils.py | loganthomas/turkey-bowl | 8a02966c3fe06a4dbbcee3f31ed21c2374b77e11 | [
"MIT"
] | null | null | null | tests/test_utils.py | loganthomas/turkey-bowl | 8a02966c3fe06a4dbbcee3f31ed21c2374b77e11 | [
"MIT"
] | 74 | 2020-09-26T00:58:17.000Z | 2022-03-20T13:55:09.000Z | tests/test_utils.py | loganthomas/Thanksgiving_Football | 8a02966c3fe06a4dbbcee3f31ed21c2374b77e11 | [
"MIT"
] | 1 | 2020-09-26T01:09:38.000Z | 2020-09-26T01:09:38.000Z | """
Unit tests for utils.py
"""
# Standard libraries
import json
from pathlib import Path
# Third-party libraries
import pytest
# Local libraries
from turkey_bowl import utils
| 23.736434 | 82 | 0.653494 |
3194a2997150c6f647d46dc4f5cbb7a6cd7d252d | 559 | py | Python | regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 319 | 2015-01-02T11:34:16.000Z | 2022-03-25T00:43:33.000Z | regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 10 | 2015-02-03T02:33:09.000Z | 2021-11-09T21:41:00.000Z | regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 61 | 2015-01-02T12:01:56.000Z | 2021-12-08T07:16:16.000Z | """external method""" | 18.032258 | 61 | 0.631485 |
3196c5c7ef4586ecc4432d29b82edc3f69f92c25 | 1,695 | py | Python | pages/views.py | Total-Conversion/eco4coin | 5a155afe892ebd714547063adfd000a1437eb3a0 | [
"MIT"
] | null | null | null | pages/views.py | Total-Conversion/eco4coin | 5a155afe892ebd714547063adfd000a1437eb3a0 | [
"MIT"
] | null | null | null | pages/views.py | Total-Conversion/eco4coin | 5a155afe892ebd714547063adfd000a1437eb3a0 | [
"MIT"
] | null | null | null | from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic import TemplateView, ListView
import csv
from django.http import HttpResponse
from backend.models import CustomUser
from django.contrib.auth.mixins import LoginRequiredMixin
| 30.818182 | 113 | 0.661357 |
3197d22a066fe34f613aab3ff51fd1a605e176ab | 2,895 | py | Python | 18.part2.py | elp2/advent_of_code_2018 | 0d359422dd04b0849481796005e97d05c30e9eb4 | [
"Apache-2.0"
] | 1 | 2021-12-02T15:19:36.000Z | 2021-12-02T15:19:36.000Z | 18.part2.py | elp2/advent_of_code_2018 | 0d359422dd04b0849481796005e97d05c30e9eb4 | [
"Apache-2.0"
] | null | null | null | 18.part2.py | elp2/advent_of_code_2018 | 0d359422dd04b0849481796005e97d05c30e9eb4 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
REAL=open("18.txt").readlines()
SAMPLE=open("18.sample").readlines()
OPEN="."
TREE="|"
LUMBERYARD="#"
import copy
sample = solve(SAMPLE, 10)
assert sample == 1147
print("*** SAMPLE PASSED ***")
# print(solve(REAL, 10000))
loop = """598 570 191420
599 571 189168
600 572 185082
601 573 185227
602 574 185320
603 575 185790
604 576 186120
605 577 189956
606 578 190068
607 579 191080
608 580 190405 # too low
609 581 193795
610 582 190950
611 583 193569
612 584 194350
613 585 196308
614 586 195364
615 587 197911
616 588 199755
617 589 201144
618 590 201607
619 591 203580
620 592 201260
621 593 201950
622 594 200675 # TOO HIGH
623 595 202208
624 596 200151
625 597 198948
626 570 191420
627 571 189168
628 572 185082
629 573 185227
630 574 185320
631 575 185790
632 576 186120
633 577 189956
634 578 190068
635 579 191080
636 580 190405
637 581 193795"""
num = 1000000000
nmod = 28
for num in range(570, 638):
print(num, (num - 570) % nmod + 570)
num = 1000000000 - 1
print(num, (num - 570) % nmod + 570 + nmod) | 21.444444 | 57 | 0.601382 |
3198fc009ad14ca016fe53373d72241bd818e6a1 | 231 | py | Python | PythonExercicios/ex031.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
] | null | null | null | PythonExercicios/ex031.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
] | null | null | null | PythonExercicios/ex031.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
] | null | null | null | dis = float(input('Digite a distncia da sua viagem em Km: '))
if dis <= 200:
print('O valor da sua passagem ser {:.2f} reais'.format(dis * 0.5))
else:
print('O valor da sua passagem ser {:.2f} reais'.format(dis * 0.45))
| 38.5 | 73 | 0.645022 |
31991e3c0b73748d0ef73d80f10a961d8b27dbaf | 4,516 | py | Python | admin/collection_providers/forms.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
] | null | null | null | admin/collection_providers/forms.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
] | 20 | 2020-03-24T16:48:03.000Z | 2022-03-08T22:38:38.000Z | admin/collection_providers/forms.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
] | null | null | null | import bleach
import json
from django import forms
from osf.models import CollectionProvider, CollectionSubmission
from admin.base.utils import get_nodelicense_choices, get_defaultlicense_choices
| 45.16 | 113 | 0.629318 |
319925dc3819c9097723899fe8aef60117e396cb | 817 | py | Python | src/validate_model.py | mareklinka/esk-form-scanner-model | 30af9e1c5d652b3310222bc55f92e964bc524f2e | [
"MIT"
] | null | null | null | src/validate_model.py | mareklinka/esk-form-scanner-model | 30af9e1c5d652b3310222bc55f92e964bc524f2e | [
"MIT"
] | null | null | null | src/validate_model.py | mareklinka/esk-form-scanner-model | 30af9e1c5d652b3310222bc55f92e964bc524f2e | [
"MIT"
] | null | null | null |
import data_providers as gen
import model_storage as storage
import numpy as np
import data_visualizer
import time
def evaluate(model_name):
"""
Evaluates the model stored in the specified file.
Parameters
----------
model_name : string
The name of the file to read the model from
"""
model = storage.load_model(model_name)
model.summary()
start = time.clock()
score = model.evaluate_generator(gen.finite_generator("data\\validation"), steps=30)
end = time.clock()
print("Time per image: {} ".format((end-start)/300))
print (model.metrics_names)
print (score)
predictions = model.predict_generator(gen.finite_generator("data\\validation"), steps=30)
data_visualizer.draw_bounding_boxes("data\\validation", predictions, "data\\results") | 24.757576 | 93 | 0.69645 |
31998f7e8bdabc90d6fe3933e2b885a9ef1b8e16 | 4,154 | py | Python | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
| 30.77037 | 94 | 0.667068 |
3199bda124122e7024becc95e2646f8bec2ec029 | 930 | py | Python | activity_log/migrations/0004_auto_20170309_0929.py | farezsaputra/BandwidthControllingSystem | 54032f21e6cb94156f57222e88a98a89be310ea9 | [
"MIT"
] | null | null | null | activity_log/migrations/0004_auto_20170309_0929.py | farezsaputra/BandwidthControllingSystem | 54032f21e6cb94156f57222e88a98a89be310ea9 | [
"MIT"
] | 11 | 2021-02-10T02:18:32.000Z | 2022-03-02T09:56:43.000Z | activity_log/migrations/0004_auto_20170309_0929.py | farezsaputra/BandwidthControllingSystem | 54032f21e6cb94156f57222e88a98a89be310ea9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
| 31 | 114 | 0.631183 |
319a6bb562632b817123a3b1b59712d0c048c830 | 1,559 | py | Python | bin/read_oogeso_data.py | oogeso/oogeso | 72c05fd02d62b29fc62f60daf4989370fd80cbe1 | [
"MIT"
] | 2 | 2021-05-19T13:16:20.000Z | 2021-11-05T11:47:11.000Z | bin/read_oogeso_data.py | oogeso/oogeso | 72c05fd02d62b29fc62f60daf4989370fd80cbe1 | [
"MIT"
] | 71 | 2021-06-01T11:03:56.000Z | 2022-03-01T09:38:37.000Z | bin/read_oogeso_data.py | oogeso/oogeso | 72c05fd02d62b29fc62f60daf4989370fd80cbe1 | [
"MIT"
] | null | null | null | import json
import oogeso.io.file_io
# Read in data, validate date et.c. with methods from io
test_data_file = "examples/test case2.yaml"
json_data = oogeso.io.file_io.read_data_from_yaml(test_data_file)
json_formatted_str = json.dumps(json_data, indent=2)
print("Type json formatted str=", type(json_formatted_str))
# deserialize json data to objects
# encoder = oogeso.dto.oogeso_input_data_objects.DataclassJSONEncoder
decoder = oogeso.dto.oogeso_input_data_objects.DataclassJSONDecoder
# decoder = json.JSONDecoder()
with open("examples/energysystem.json", "r") as jsonfile:
energy_system = json.load(jsonfile, cls=decoder)
es_str = oogeso.dto.oogeso_input_data_objects.serialize_oogeso_data(energy_system)
print("Type seriealised=", type(es_str))
mydecoder = decoder()
energy_system = mydecoder.decode(json_formatted_str)
print("Type energysystem=", type(energy_system))
# energy_system = json.loads(
# json_formatted_str, cls=encoder
# )
# energy_system: oogeso.dto.oogeso_input_data_objects.EnergySystem = (
# oogeso.dto.oogeso_input_data_objects.deserialize_oogeso_data(json_data)
# )
print("========================")
print("Energy system:")
# print("Energy system type=", type(energy_system))
# print("Nodes: ", energy_system.nodes)
# print("Node1: ", energy_system.nodes["node1"])
# print("Parameters: ", energy_system.parameters)
# print("Parameters type=", type(energy_system.parameters))
# print("planning horizon: ", energy_system.parameters.planning_horizon)
# print("Carriers: ", energy_system.carriers)
print(energy_system)
| 37.119048 | 82 | 0.77229 |
319aeea582f12d05f825c637fc9e26f9381d34d7 | 94 | py | Python | TWITOFF/__init__.py | DSPT3/Twitoff | ba4a0359942a9981c9d985fa0a30cd0b44dd1d98 | [
"MIT"
] | null | null | null | TWITOFF/__init__.py | DSPT3/Twitoff | ba4a0359942a9981c9d985fa0a30cd0b44dd1d98 | [
"MIT"
] | 4 | 2021-06-08T21:03:37.000Z | 2022-03-12T00:21:25.000Z | TWITOFF/__init__.py | DSPT3/Twitoff | ba4a0359942a9981c9d985fa0a30cd0b44dd1d98 | [
"MIT"
] | 2 | 2020-02-28T11:59:32.000Z | 2021-07-12T02:28:34.000Z | """ Entry Point for Our Twitoff Flask App """
from .app import create_app
APP = create_app() | 18.8 | 45 | 0.712766 |
319c9744bc015a0408de27695b17d2663e338344 | 2,295 | py | Python | fgivenx/test/test_mass.py | ejhigson/fgivenx | 91089d8c0ce54bae0f72b41eb1da5d6e8d75738d | [
"MIT"
] | 11 | 2017-10-13T11:04:53.000Z | 2021-03-26T15:54:12.000Z | fgivenx/test/test_mass.py | ejhigson/fgivenx | 91089d8c0ce54bae0f72b41eb1da5d6e8d75738d | [
"MIT"
] | 16 | 2018-08-01T09:25:08.000Z | 2022-03-04T12:29:52.000Z | fgivenx/test/test_mass.py | ejhigson/fgivenx | 91089d8c0ce54bae0f72b41eb1da5d6e8d75738d | [
"MIT"
] | 12 | 2018-02-04T20:34:01.000Z | 2021-12-10T10:58:20.000Z | import numpy
import pytest
import os
from shutil import rmtree
from numpy.testing import assert_allclose
import scipy.stats
import scipy.integrate
import scipy.special
from fgivenx.mass import PMF, compute_pmf
| 27 | 78 | 0.620915 |
319dcd031d072f86f2934fd6b6ad4796d0a0d399 | 5,141 | py | Python | homeassistant/components/vera/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 2 | 2020-01-03T17:06:33.000Z | 2020-01-13T18:57:32.000Z | homeassistant/components/vera/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,016 | 2019-06-18T21:27:47.000Z | 2020-03-06T11:09:58.000Z | homeassistant/components/vera/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | null | null | null | """Config flow for Vera."""
from __future__ import annotations
from collections.abc import Mapping
import logging
import re
from typing import Any
import pyvera as pv
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EXCLUDE, CONF_LIGHTS, CONF_SOURCE
from homeassistant.core import callback
from homeassistant.helpers import entity_registry as er
from .const import CONF_CONTROLLER, CONF_LEGACY_UNIQUE_ID, DOMAIN
LIST_REGEX = re.compile("[^0-9]+")
_LOGGER = logging.getLogger(__name__)
def fix_device_id_list(data: list[Any]) -> list[int]:
"""Fix the id list by converting it to a supported int list."""
return str_to_int_list(list_to_str(data))
def str_to_int_list(data: str) -> list[int]:
"""Convert a string to an int list."""
return [int(s) for s in LIST_REGEX.split(data) if len(s) > 0]
def list_to_str(data: list[Any]) -> str:
"""Convert an int list to a string."""
return " ".join([str(i) for i in data])
def new_options(lights: list[int], exclude: list[int]) -> dict:
"""Create a standard options object."""
return {CONF_LIGHTS: lights, CONF_EXCLUDE: exclude}
def options_schema(options: Mapping[str, Any] = None) -> dict:
"""Return options schema."""
options = options or {}
return {
vol.Optional(
CONF_LIGHTS,
default=list_to_str(options.get(CONF_LIGHTS, [])),
): str,
vol.Optional(
CONF_EXCLUDE,
default=list_to_str(options.get(CONF_EXCLUDE, [])),
): str,
}
def options_data(user_input: dict) -> dict:
"""Return options dict."""
return new_options(
str_to_int_list(user_input.get(CONF_LIGHTS, "")),
str_to_int_list(user_input.get(CONF_EXCLUDE, "")),
)
| 32.13125 | 88 | 0.62731 |
319dd84050e699e6ce05aef24f59d85fafabbb42 | 3,368 | py | Python | bin/setup_spectrum.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
] | 7 | 2020-10-28T13:46:08.000Z | 2021-05-27T06:41:56.000Z | bin/setup_spectrum.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
] | 2 | 2020-10-27T19:15:12.000Z | 2020-10-27T19:15:25.000Z | bin/setup_spectrum.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
] | 2 | 2021-04-15T05:54:30.000Z | 2022-02-08T00:10:10.000Z | import os
from shutil import copy2 as copy
#
from pysurf.logger import get_logger
from pysurf.sampling import Sampling
from pysurf.setup import SetupBase
from pysurf.utils import exists_and_isfile
from pysurf.spp import SurfacePointProvider
from colt import Colt
from sp_calc import SinglePointCalculation
if __name__=="__main__":
SetupSpectrum.from_commandline()
| 34.367347 | 138 | 0.645487 |
319ec31c5bec95f71fc86ec8dcab8ee33a9ec4c6 | 412 | py | Python | CeV - Gustavo Guanabara/exerc033.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | 1 | 2021-12-11T19:53:41.000Z | 2021-12-11T19:53:41.000Z | CeV - Gustavo Guanabara/exerc033.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | CeV - Gustavo Guanabara/exerc033.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | #033: ler tres numeros e dizer qual o maior e qual o menor:
print("Digite 3 numeros:")
maiorn = 0
n = int(input("Numero 1: "))
if n > maiorn:
maiorn = n
menorn = n
n = int(input("Numero 2: "))
if n > maiorn:
maiorn = n
if n < menorn:
menorn = n
n = int(input("Numero 3: "))
if n > maiorn:
maiorn = n
if n < menorn:
menorn = n
print(f"o maior numero foi {maiorn} e o menor foi {menorn}")
| 20.6 | 60 | 0.601942 |
31a27b0c36981ab92aff36160266dec12ad84cdb | 5,238 | py | Python | test/test_dot.py | croqaz/dot | b57f3c68dfa1ac5a7afb9f83af6035c34e342c83 | [
"MIT"
] | null | null | null | test/test_dot.py | croqaz/dot | b57f3c68dfa1ac5a7afb9f83af6035c34e342c83 | [
"MIT"
] | null | null | null | test/test_dot.py | croqaz/dot | b57f3c68dfa1ac5a7afb9f83af6035c34e342c83 | [
"MIT"
] | null | null | null | import pytest
from prop import strict_get
from prop import get as dot_get
| 27.714286 | 68 | 0.550592 |
31a2f51b82dfe59d7e0a0af9b9f3cdff2f955130 | 9,179 | py | Python | site_scons/site_tools/mplabx_nbproject/__init__.py | kbhomes/ps2plus | 63467133367082ec06c88e5c0fd623373709717e | [
"MIT"
] | null | null | null | site_scons/site_tools/mplabx_nbproject/__init__.py | kbhomes/ps2plus | 63467133367082ec06c88e5c0fd623373709717e | [
"MIT"
] | null | null | null | site_scons/site_tools/mplabx_nbproject/__init__.py | kbhomes/ps2plus | 63467133367082ec06c88e5c0fd623373709717e | [
"MIT"
] | null | null | null | from pprint import pprint
import SCons.Builder
from SCons.Script import *
import json
import os
import copy
import collections
import xml.etree.ElementTree as ET
from mplabx import MPLABXProperties
MAKEFILE_TEXT = '''
MKDIR=mkdir
CP=cp
CCADMIN=CCadmin
RANLIB=ranlib
build: .build-post
.build-pre:
.build-post: .build-impl
clean: .clean-post
.clean-pre:
.clean-post: .clean-impl
clobber: .clobber-post
.clobber-pre:
.clobber-post: .clobber-impl
all: .all-post
.all-pre:
.all-post: .all-impl
help: .help-post
.help-pre:
.help-post: .help-impl
include nbproject/Makefile-impl.mk
include nbproject/Makefile-variables.mk
'''
PROJECT_XML_TEXT = '''
<project>
<type>com.microchip.mplab.nbide.embedded.makeproject</type>
<configuration>
<data>
<name />
<sourceRootList />
<confList />
</data>
</configuration>
</project>
'''
CONFIGURATIONS_XML_TEXT = '''
<configurationDescriptor version="65">
<logicalFolder name="root" displayName="root" projectFiles="true" />
<sourceRootList />
<projectmakefile>Makefile</projectmakefile>
<confs />
</configurationDescriptor>
'''
CONFIGURATION_ELEMENT_TEXT = '''
<conf type="2">
<toolsSet>
<targetDevice />
<languageToolchain />
<languageToolchainVersion />
</toolsSet>
<HI-TECH-COMP />
<HI-TECH-LINK />
<XC8-config-global />
</conf>
'''
def build_mplabx_nbproject(target, source, env):
'''
target - (singleton list) - Directory node to the project folder
source - (list) - XML value nodes for each project configuration
'''
project_dir = target[0]
nbproject_dir = project_dir.Dir('nbproject')
configurations_xml_file = nbproject_dir.File('configurations.xml')
project_xml_file = nbproject_dir.File('project.xml')
makefile_file = project_dir.File('Makefile')
# Make the directories
env.Execute(Mkdir(project_dir))
env.Execute(Mkdir(nbproject_dir))
# Generate the XML files
confs = source
configurations_xml_root, project_xml_root = _build_xml_files(
project_name=os.path.basename(str(project_dir)),
project_dir=project_dir,
confs=confs,
source_files=env['source_files'])
with open(str(configurations_xml_file), 'w') as f:
ET.indent(configurations_xml_root, space=' ')
ET.ElementTree(configurations_xml_root).write(f, encoding='unicode')
with open(str(project_xml_file), 'w') as f:
ET.indent(project_xml_root, space=' ')
ET.ElementTree(project_xml_root).write(f, encoding='unicode')
with open(str(makefile_file), 'w') as f:
f.write(MAKEFILE_TEXT)
_mplabx_nbproject_builder = SCons.Builder.Builder(action=build_mplabx_nbproject) | 35.996078 | 130 | 0.706613 |
31a75a5de6817edf26be7b64cce143ae2a37bc84 | 2,101 | py | Python | lib/autoconnect/example/test_server.py | simotek/autoconnect | 7d956e5bef0bcfe22b7f06061f8024df62b004ab | [
"FTL"
] | null | null | null | lib/autoconnect/example/test_server.py | simotek/autoconnect | 7d956e5bef0bcfe22b7f06061f8024df62b004ab | [
"FTL"
] | null | null | null | lib/autoconnect/example/test_server.py | simotek/autoconnect | 7d956e5bef0bcfe22b7f06061f8024df62b004ab | [
"FTL"
] | null | null | null | #
# test_server.py
#
# Copyright (C) 2001-2007 Oisin Mulvihill.
# Email: oisin.mulvihill@gmail.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library (see the file LICENSE.TXT); if not,
# write to the Free Software Foundation, Inc., 59 Temple Place,
# Suite 330, Boston, MA 02111-1307 USA.
#
# Date: 2001/12/06 15:54:30
#
import sys
import socket
import xmlrpclib
import autoconnect
from SimpleXMLRPCServer import SimpleXMLRPCServer
if __name__ == '__main__':
server = Server()
server.main()
| 30.449275 | 79 | 0.643027 |
31a8e14670c16c328e5688e740eae92822649a17 | 6,872 | py | Python | tests/test_loop_seer.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 6,132 | 2015-08-06T23:24:47.000Z | 2022-03-31T21:49:34.000Z | tests/test_loop_seer.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 2,272 | 2015-08-10T08:40:07.000Z | 2022-03-31T23:46:44.000Z | tests/test_loop_seer.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 1,155 | 2015-08-06T23:37:39.000Z | 2022-03-31T05:54:11.000Z | import os
import sys
import angr
import nose.tools
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
if __name__ == "__main__":
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
g = globals().copy()
for k, v in g.items():
if k.startswith("test_") and hasattr(v, '__call__'):
print(k)
v()
| 42.159509 | 125 | 0.709983 |
31aa23acdb0243f1a7dd745198a7dc1050b82ef5 | 1,726 | py | Python | shongololo/Imet_serial.py | swyngaard/shongololo | 0d11378fb0e61cae5da0e09c9eed10fd9195f20d | [
"Apache-2.0"
] | null | null | null | shongololo/Imet_serial.py | swyngaard/shongololo | 0d11378fb0e61cae5da0e09c9eed10fd9195f20d | [
"Apache-2.0"
] | null | null | null | shongololo/Imet_serial.py | swyngaard/shongololo | 0d11378fb0e61cae5da0e09c9eed10fd9195f20d | [
"Apache-2.0"
] | null | null | null | import serial , time , os
import serial.tools.list_ports as port
import logging
sho_logger = logging.getLogger("shongololo_logger")
def open_imets(devices):
"""Tries to open as many imet device serial ports as there are
:return:
a list of socket handles
"""
imet_sockets = []
for d in range(len(devices)): # Create list of imet open ports
port = str(devices["Imet" + str(d)])
try:
ser = serial.Serial(port, baudrate=57600, parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, timeout=3.0, xonxoff=False)
imet_sockets.append(ser)
sho_logger.info("\n Successfully opened Imet device on port {}".format(devices["Imet" + str(d)]))
except serial.SerialException as e:
sho_logger.error(e)
sho_logger.critical("\nFailed to open imet on port {}".format(devices["Imet" + str(d)]))
return imet_sockets
def find_imets():
"""
Finds available imet serial ports and determines which device is attached to which /dev/ path
:rtype: object
:return:
A dictionary of devices labled as" imet<number starting from 0>
"""
device_dict = {}
imets = 0
portlist = list(port.comports())
for p in portlist:
sp = str(p)
if "FT230" in sp:
path = sp.split('-')[0]
device_dict["Imet" + str(imets)] = path[:-1]
imets = imets + 1
sho_logger.info("Found an Imet device on port: %s",path)
status=0
else:
pass
if imets==0:
sho_logger.error("No Imet devices found.")
else:
sho_logger.info("Found {} Imet devices".format(imets))
return device_dict
| 31.381818 | 164 | 0.618192 |
31acd399985d83122352ad6b8e7282bfbeb0a214 | 1,754 | py | Python | libs/external_libs/docutils-0.4/test/test_transforms/test_peps.py | google-code-export/django-hotclub | d783a5bbcc06816289565f3eae6d99461188ca4a | [
"MIT"
] | 3 | 2015-12-25T14:45:36.000Z | 2016-11-28T09:58:03.000Z | libs/external_libs/docutils-0.4/test/test_transforms/test_peps.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | null | null | null | libs/external_libs/docutils-0.4/test/test_transforms/test_peps.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 3915 $
# Date: $Date: 2005-10-02 03:06:42 +0200 (Sun, 02 Oct 2005) $
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.peps.
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.peps import TargetNotes
from docutils.parsers.rst import Parser
totest = {}
totest['target_notes'] = ((TargetNotes,), [
["""\
No references or targets exist, therefore
no "References" section should be generated.
""",
"""\
<document source="test data">
<paragraph>
No references or targets exist, therefore
no "References" section should be generated.
"""],
["""\
A target exists, here's the reference_.
A "References" section should be generated.
.. _reference: http://www.example.org
""",
"""\
<document source="test data">
<paragraph>
A target exists, here's the \n\
<reference name="reference" refname="reference">
reference
\n\
<footnote_reference auto="1" ids="id3" refname="TARGET_NOTE: id2">
.
A "References" section should be generated.
<target ids="reference" names="reference" refuri="http://www.example.org">
<section ids="id1">
<title>
References
<footnote auto="1" ids="id2" names="TARGET_NOTE:\ id2">
<paragraph>
<reference refuri="http://www.example.org">
http://www.example.org
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| 25.42029 | 78 | 0.646522 |
31ad8e6cefd31380ff5fa1bdef5437fd290e10f2 | 380 | py | Python | rental_property/migrations/0011_alter_rentalunit_options.py | shumwe/rental-house-management-system | f97f22afa8bc2740ed08baa387c74b93e02fac0c | [
"MIT"
] | 1 | 2022-03-16T13:29:30.000Z | 2022-03-16T13:29:30.000Z | rental_property/migrations/0011_alter_rentalunit_options.py | shumwe/rental-house-management-system | f97f22afa8bc2740ed08baa387c74b93e02fac0c | [
"MIT"
] | null | null | null | rental_property/migrations/0011_alter_rentalunit_options.py | shumwe/rental-house-management-system | f97f22afa8bc2740ed08baa387c74b93e02fac0c | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-03-15 22:43
from django.db import migrations
| 21.111111 | 61 | 0.628947 |
31ade7fa4d1318ceab82ad2826fc1a70514e9372 | 951 | py | Python | AxesFrame.py | Toyuri453/RSSP-Python-demo | 0adf92ad765b5a9334d7e2830611b98c8c4eb26d | [
"MIT"
] | 1 | 2021-05-22T18:06:49.000Z | 2021-05-22T18:06:49.000Z | AxesFrame.py | Toyuri453/RSSP-Python-demo | 0adf92ad765b5a9334d7e2830611b98c8c4eb26d | [
"MIT"
] | null | null | null | AxesFrame.py | Toyuri453/RSSP-Python-demo | 0adf92ad765b5a9334d7e2830611b98c8c4eb26d | [
"MIT"
] | null | null | null | import Terminal
| 50.052632 | 138 | 0.681388 |
31ae9dcd46623a64820f48a76a6115e399610104 | 1,583 | py | Python | api/generate.py | almeida-matheus/playlist-reader | c09393395a7a28d104a2bad28e3af4a8c23f8adf | [
"MIT"
] | null | null | null | api/generate.py | almeida-matheus/playlist-reader | c09393395a7a28d104a2bad28e3af4a8c23f8adf | [
"MIT"
] | null | null | null | api/generate.py | almeida-matheus/playlist-reader | c09393395a7a28d104a2bad28e3af4a8c23f8adf | [
"MIT"
] | null | null | null | import re
from bs4 import BeautifulSoup # beautifulsoup4
import requests # requests
HEADER = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
def catch_info(base,pattern,str_add=''):
'''base text, pattern to search, string to increment if necessary'''
array = []
for match in pattern.finditer(base.prettify()):
array.append(str_add+match.group(1))
return list(dict.fromkeys(array)) # set(array_video)
# response = generate('PLMKi-ss_sEoOZw9TB4iCrevTK60uY8wg0')
# print(response) | 37.690476 | 141 | 0.651927 |
31ae9eb018cffff3e9fe4e96c1a466a559766558 | 1,118 | py | Python | homeassistant/components/deconz/const.py | hoverduck/core | 9fa23f6479e9cf8aed3fa6d2980ddc98dae06e7b | [
"Apache-2.0"
] | 1 | 2020-01-08T21:47:59.000Z | 2020-01-08T21:47:59.000Z | homeassistant/components/deconz/const.py | hoverduck/core | 9fa23f6479e9cf8aed3fa6d2980ddc98dae06e7b | [
"Apache-2.0"
] | 39 | 2020-08-31T14:55:47.000Z | 2022-03-31T06:02:16.000Z | homeassistant/components/deconz/const.py | stodev-com-br/home-assistant | 944d7b9d7e59ad878ae9f75b80f4cd418bad8296 | [
"Apache-2.0"
] | null | null | null | """Constants for the deCONZ component."""
import logging
LOGGER = logging.getLogger(__package__)
DOMAIN = "deconz"
CONF_BRIDGE_ID = "bridgeid"
CONF_GROUP_ID_BASE = "group_id_base"
DEFAULT_PORT = 80
DEFAULT_ALLOW_CLIP_SENSOR = False
DEFAULT_ALLOW_DECONZ_GROUPS = True
DEFAULT_ALLOW_NEW_DEVICES = True
CONF_ALLOW_CLIP_SENSOR = "allow_clip_sensor"
CONF_ALLOW_DECONZ_GROUPS = "allow_deconz_groups"
CONF_ALLOW_NEW_DEVICES = "allow_new_devices"
CONF_MASTER_GATEWAY = "master"
SUPPORTED_PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"light",
"scene",
"sensor",
"switch",
]
NEW_GROUP = "groups"
NEW_LIGHT = "lights"
NEW_SCENE = "scenes"
NEW_SENSOR = "sensors"
ATTR_DARK = "dark"
ATTR_OFFSET = "offset"
ATTR_ON = "on"
ATTR_VALVE = "valve"
DAMPERS = ["Level controllable output"]
WINDOW_COVERS = ["Window covering device", "Window covering controller"]
COVER_TYPES = DAMPERS + WINDOW_COVERS
POWER_PLUGS = ["On/Off light", "On/Off plug-in unit", "Smart plug"]
SIRENS = ["Warning device"]
SWITCH_TYPES = POWER_PLUGS + SIRENS
CONF_ANGLE = "angle"
CONF_GESTURE = "gesture"
CONF_XY = "xy"
| 21.5 | 72 | 0.739714 |
31aed2d6bc8b935fd6033025428a672731040be9 | 1,898 | py | Python | course_app/api/views.py | maks-nurgazy/diploma-project | 66889488ffaa0269e1be2df6f6c76a3ca68a3cfb | [
"MIT"
] | null | null | null | course_app/api/views.py | maks-nurgazy/diploma-project | 66889488ffaa0269e1be2df6f6c76a3ca68a3cfb | [
"MIT"
] | null | null | null | course_app/api/views.py | maks-nurgazy/diploma-project | 66889488ffaa0269e1be2df6f6c76a3ca68a3cfb | [
"MIT"
] | null | null | null | import json
from rest_framework.generics import ListAPIView, get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from course_app.api.serializers import CourseSerializer
from course_app.models import Course, Enrolled
from users.api.serializers import StudentSerializer
from users.models import Student
| 28.757576 | 74 | 0.714436 |
31b0a81b7e41eaa16ffc9d2a726e4978e07e1575 | 9,005 | py | Python | service/repository/repository_controller.py | yutiansut/cilantro | 3fa579999e7d5a6d6041ccc7e309c667fc7eac90 | [
"Apache-2.0"
] | 3 | 2019-09-04T12:40:33.000Z | 2021-12-28T16:33:27.000Z | service/repository/repository_controller.py | yutiansut/cilantro | 3fa579999e7d5a6d6041ccc7e309c667fc7eac90 | [
"Apache-2.0"
] | 97 | 2018-05-29T13:27:04.000Z | 2021-11-02T11:03:33.000Z | service/repository/repository_controller.py | yutiansut/cilantro | 3fa579999e7d5a6d6041ccc7e309c667fc7eac90 | [
"Apache-2.0"
] | 16 | 2018-04-25T11:39:21.000Z | 2019-12-16T14:37:39.000Z | import os
import json
import logging
import yaml
from flask import Blueprint, jsonify, send_file, request, redirect
from service.errors import ApiError
from utils.repository import generate_repository_path, \
list_objects_in_repository
from utils.list_dir import list_dir
repository_controller = Blueprint('repository', __name__)
repository_dir = os.environ['REPOSITORY_DIR']
metadata_file = 'meta.json'
representation_dir = 'data'
sub_object_dir = 'parts'
viewers_config = os.path.join(os.environ['CONFIG_DIR'], "viewers.yml")
with open(viewers_config, 'r', encoding="utf-8") as viewers_file:
viewers = yaml.safe_load(viewers_file)
def handle_file_request(path):
if request.headers.get('Accept') == '*/*':
return send_file(path)
elif request.accept_mimetypes.accept_html:
ext = os.path.splitext(path)[1][1:]
if ext in viewers:
url = viewers[ext] + path[len(repository_dir):]
return redirect(url, code=303)
return send_file(path)
| 28.769968 | 79 | 0.608329 |
31b23312e6643e95278a2225ec84f190096c74fe | 69 | py | Python | src/python_import/C/cc.py | matiastang/matias-python | b7785217e5d386c01198305751ecd562259ea2b7 | [
"MIT"
] | null | null | null | src/python_import/C/cc.py | matiastang/matias-python | b7785217e5d386c01198305751ecd562259ea2b7 | [
"MIT"
] | null | null | null | src/python_import/C/cc.py | matiastang/matias-python | b7785217e5d386c01198305751ecd562259ea2b7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#coding=utf-8 | 13.8 | 19 | 0.681159 |
31b24a17c488a7eb71036a8c8f97645c1213787a | 24,392 | py | Python | plugin.audio.podcasts/addon.py | stobb3s/kodi-addon-podcast | de834d10031e372996ec34b2cd989a6d105168a8 | [
"MIT"
] | null | null | null | plugin.audio.podcasts/addon.py | stobb3s/kodi-addon-podcast | de834d10031e372996ec34b2cd989a6d105168a8 | [
"MIT"
] | null | null | null | plugin.audio.podcasts/addon.py | stobb3s/kodi-addon-podcast | de834d10031e372996ec34b2cd989a6d105168a8 | [
"MIT"
] | null | null | null | from datetime import datetime
import base64
import os
import re
import requests
import sys
import urllib.parse
import xmltodict
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
__PLUGIN_ID__ = "plugin.audio.podcasts"
# see https://forum.kodi.tv/showthread.php?tid=112916
_MONTHS = ["Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
GPODDER_API = {
"login": "%s/api/2/auth/%s/login.json",
"subscriptions": "%s/subscriptions/%s.%s"
}
settings = xbmcaddon.Addon(id=__PLUGIN_ID__)
addon_dir = xbmcvfs.translatePath(settings.getAddonInfo('path'))
if __name__ == '__main__':
mediathek = Mediathek()
if sys.argv[1] == "import_gpodder_subscriptions":
mediathek.import_gpodder_subscriptions()
elif sys.argv[1] == "import_opml":
mediathek.import_opml()
elif sys.argv[1] == "download_gpodder_subscriptions":
mediathek.download_gpodder_subscriptions()
elif sys.argv[1] == "unassign_opml":
mediathek.unassign_opml()
else:
mediathek.handle(sys.argv)
| 32.17942 | 142 | 0.508158 |
31b296cff20ef1265ad6524a7cf4ad27623881bc | 2,832 | py | Python | .venv/Lib/site-packages/lemoncheesecake/reporting/savingstrategy.py | yadavdeepa365/HUDL_PYTHON | e1d5d264e3748f0add18258496f5a850e16b7ee6 | [
"MIT"
] | 34 | 2017-06-12T18:50:36.000Z | 2021-11-29T01:59:07.000Z | .venv/Lib/site-packages/lemoncheesecake/reporting/savingstrategy.py | yadavdeepa365/HUDL_PYTHON | e1d5d264e3748f0add18258496f5a850e16b7ee6 | [
"MIT"
] | 25 | 2017-12-07T13:35:29.000Z | 2022-03-10T01:27:58.000Z | .venv/Lib/site-packages/lemoncheesecake/reporting/savingstrategy.py | yadavdeepa365/HUDL_PYTHON | e1d5d264e3748f0add18258496f5a850e16b7ee6 | [
"MIT"
] | 4 | 2019-05-05T03:19:00.000Z | 2021-10-06T13:12:05.000Z | import re
import time
from lemoncheesecake.events import TestSessionSetupEndEvent, TestSessionTeardownEndEvent, \
TestEndEvent, SuiteSetupEndEvent, SuiteTeardownEndEvent, SuiteEndEvent, SteppedEvent
from lemoncheesecake.reporting.report import ReportLocation
DEFAULT_REPORT_SAVING_STRATEGY = "at_each_failed_test"
| 31.120879 | 112 | 0.715042 |
31b3246b48b5cc2ea21a0461162a64666ab485f1 | 4,676 | py | Python | genshin/models/genshin/chronicle/notes.py | thesadru/genshin.py | 806b8d0dd059a06605e66dead917fdf550a552bc | [
"MIT"
] | 63 | 2021-10-04T19:53:54.000Z | 2022-03-30T07:21:03.000Z | genshin/models/genshin/chronicle/notes.py | thesadru/genshin.py | 806b8d0dd059a06605e66dead917fdf550a552bc | [
"MIT"
] | 17 | 2021-11-16T20:42:52.000Z | 2022-03-31T10:11:52.000Z | genshin/models/genshin/chronicle/notes.py | thesadru/genshin.py | 806b8d0dd059a06605e66dead917fdf550a552bc | [
"MIT"
] | 10 | 2021-10-16T22:41:41.000Z | 2022-02-19T17:55:23.000Z | """Genshin chronicle notes."""
import datetime
import typing
import pydantic
from genshin.models.genshin import character
from genshin.models.model import Aliased, APIModel
__all__ = ["Expedition", "ExpeditionCharacter", "Notes"]
| 34.131387 | 114 | 0.700385 |
31b5fadfad64338920b6d7434a87ab13e7d9dd53 | 1,222 | py | Python | src/the_tale/the_tale/common/bbcode/renderer.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/common/bbcode/renderer.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/common/bbcode/renderer.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
| 31.333333 | 144 | 0.561375 |
31b8b818299558855ad3e395eb84d71adc230d9a | 6,860 | py | Python | tests/python/unittest/test_meta_schedule_custom_rule_winograd_cpu.py | psrivas2/relax | 4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a | [
"Apache-2.0"
] | 11 | 2021-11-02T00:49:16.000Z | 2021-11-19T02:17:00.000Z | tests/python/unittest/test_meta_schedule_custom_rule_winograd_cpu.py | psrivas2/relax | 4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a | [
"Apache-2.0"
] | 16 | 2021-11-02T00:17:12.000Z | 2021-11-21T20:47:52.000Z | tests/python/unittest/test_meta_schedule_custom_rule_winograd_cpu.py | psrivas2/relax | 4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a | [
"Apache-2.0"
] | 4 | 2021-11-05T18:17:23.000Z | 2021-11-11T06:22:00.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm
from tvm import meta_schedule as ms
from tvm.ir import IRModule
from tvm.meta_schedule.testing.conv2d_winograd_cpu import conv2d_winograd_cpu
from tvm.target import Target
from tvm.tir.schedule import Schedule, Trace
if __name__ == "__main__":
test_conv2d_winograd_cpu()
| 32.358491 | 85 | 0.559329 |
31b9a252afcfedd5b4624ba9c3b0dea6a7505e81 | 10,253 | py | Python | model_zoo/official/cv/FCN8s/src/nets/FCN8s.py | LottieWang/mindspore | 1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0 | [
"Apache-2.0"
] | null | null | null | model_zoo/official/cv/FCN8s/src/nets/FCN8s.py | LottieWang/mindspore | 1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0 | [
"Apache-2.0"
] | null | null | null | model_zoo/official/cv/FCN8s/src/nets/FCN8s.py | LottieWang/mindspore | 1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
from mindspore.ops import operations as P
| 48.592417 | 103 | 0.609968 |
31b9c954ad4a3b83232dea2da9aa347a1d45c611 | 1,208 | py | Python | scripts/seqrun_processing/sync_seqrun_data_from_remote.py | imperial-genomics-facility/data-management-python | 7b867d8d4562a49173d0b823bdc4bf374a3688f0 | [
"Apache-2.0"
] | 7 | 2018-05-08T07:28:08.000Z | 2022-02-21T14:56:49.000Z | scripts/seqrun_processing/sync_seqrun_data_from_remote.py | imperial-genomics-facility/data-management-python | 7b867d8d4562a49173d0b823bdc4bf374a3688f0 | [
"Apache-2.0"
] | 15 | 2021-08-19T12:32:20.000Z | 2022-02-09T19:52:51.000Z | scripts/seqrun_processing/sync_seqrun_data_from_remote.py | imperial-genomics-facility/data-management-python | 7b867d8d4562a49173d0b823bdc4bf374a3688f0 | [
"Apache-2.0"
] | 2 | 2017-05-12T15:20:10.000Z | 2020-05-07T16:25:11.000Z | #!/usr/bin/env python
import argparse
from igf_data.task_tracking.igf_slack import IGF_slack
from igf_data.process.data_transfer.sync_seqrun_data_on_remote import Sync_seqrun_data_from_remote
parser = argparse.ArgumentParser()
parser.add_argument('-r','--remote_server', required=True, help='Remote server address')
parser.add_argument('-p','--remote_base_path', required=True, help='Seqrun directory path in remote dir')
parser.add_argument('-d','--dbconfig', required=True, help='Database configuration file path')
parser.add_argument('-o','--output_dir', required=True, help='Local output directory path')
parser.add_argument('-n','--slack_config', required=True, help='Slack configuration file path')
args = parser.parse_args()
remote_server = args.remote_server
remote_base_path = args.remote_base_path
dbconfig = args.dbconfig
output_dir = args.output_dir
slack_config = args.slack_config
if __name__=='__main__':
try:
slack_obj=IGF_slack(slack_config=slack_config)
## FIX ME
except Exception as e:
message = 'Error while syncing sequencing run directory from remote server: {0}'.format(e)
slack_obj.post_message_to_channel(message,reaction='fail')
raise ValueError(message)
| 43.142857 | 105 | 0.78394 |
31ba54fbc1b1ed1f7e053b99d91ae0c4606e4d0f | 314 | py | Python | pydashlite/arrays/sum_by.py | glowlex/pydashlite | cbc96478fa610aeae95b5584b406aa0c35b89db1 | [
"MIT"
] | null | null | null | pydashlite/arrays/sum_by.py | glowlex/pydashlite | cbc96478fa610aeae95b5584b406aa0c35b89db1 | [
"MIT"
] | null | null | null | pydashlite/arrays/sum_by.py | glowlex/pydashlite | cbc96478fa610aeae95b5584b406aa0c35b89db1 | [
"MIT"
] | null | null | null | from typing import Callable, Iterable, TypeVar
T = TypeVar('T')
Num = TypeVar('Num', int, float)
| 28.545455 | 90 | 0.646497 |
31ba5edab7671efdaef9d530b3fadbb3b92a5249 | 344 | py | Python | Ten_Most_Common_Words.py | mcjohnchristopher/Python_Samples | 738f3b7d9baa7f4e396647f380118eba66ea645c | [
"CC0-1.0"
] | null | null | null | Ten_Most_Common_Words.py | mcjohnchristopher/Python_Samples | 738f3b7d9baa7f4e396647f380118eba66ea645c | [
"CC0-1.0"
] | null | null | null | Ten_Most_Common_Words.py | mcjohnchristopher/Python_Samples | 738f3b7d9baa7f4e396647f380118eba66ea645c | [
"CC0-1.0"
] | null | null | null | fhand = (romeo.txt)
counts = dict()
for line in fhand:
words = line.split()
for word in words():
count word = count.get(word, 0) + 1
st = list
for Key,Value in count.items():
st.append((val,key))
st.sort(reverse = true)
for val,key in st[:10]:
print key, val
#Using Sorted Function
sorted [(v,k) for k,v in c.items()]: | 20.235294 | 38 | 0.622093 |
31bc16b72d793c3793926c951b8b26eb5b85e70b | 43 | py | Python | python/testData/intentions/convertLambdaToFunction.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/convertLambdaToFunction.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/convertLambdaToFunction.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | newlist = lambda x<caret>, y: (x+y)/y
x = 1 | 21.5 | 37 | 0.581395 |
31bc57d9152ec85878460b40dfe42e1115dfd96e | 615 | py | Python | src/grpc_client.py | thealphadollar/py-grpcio-pg | aed6de047e4843f3bdf86184a0a2c5a1ecd6beb1 | [
"MIT"
] | null | null | null | src/grpc_client.py | thealphadollar/py-grpcio-pg | aed6de047e4843f3bdf86184a0a2c5a1ecd6beb1 | [
"MIT"
] | null | null | null | src/grpc_client.py | thealphadollar/py-grpcio-pg | aed6de047e4843f3bdf86184a0a2c5a1ecd6beb1 | [
"MIT"
] | null | null | null | import grpc
from consts import PORT, SERVER_CERT
from grpc_generated_files import api_pb2, api_pb2_grpc
if __name__ == "__main__":
with open(SERVER_CERT, 'rb') as f:
server_cert = f.read()
creds = grpc.ssl_channel_credentials(server_cert)
# the server IP should be in the common name of the certificate
channel = grpc.secure_channel(f'localhost:{PORT}', creds)
stub = api_pb2_grpc.ApiStub(channel)
main(stub)
| 25.625 | 67 | 0.692683 |
31bd00426914dc97a2be62873f494b2813748a77 | 1,432 | py | Python | 0188.Best Time to Buy and Sell Stock IV/solution.py | zhlinh/leetcode | 6dfa0a4df9ec07b2c746a13c8257780880ea04af | [
"Apache-2.0"
] | null | null | null | 0188.Best Time to Buy and Sell Stock IV/solution.py | zhlinh/leetcode | 6dfa0a4df9ec07b2c746a13c8257780880ea04af | [
"Apache-2.0"
] | null | null | null | 0188.Best Time to Buy and Sell Stock IV/solution.py | zhlinh/leetcode | 6dfa0a4df9ec07b2c746a13c8257780880ea04af | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-23
Last_modify: 2016-03-23
******************************************
'''
'''
Say you have an array for which the ith element is
the price of a given stock on day i.
Design an algorithm to find the maximum profit.
You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time
(ie, you must sell the stock before you buy again).
Credits:
Special thanks to @Freezen for adding this problem and creating all test cases.
'''
| 26.518519 | 79 | 0.515363 |
31bdffc8c81e843699509af2486f317c1a1c36b7 | 35,087 | py | Python | gips/gistmodel/post_processing.py | accsc/gips | 6b20b2b0fa76ee24b04237b1edd5c8a26738d460 | [
"MIT"
] | 1 | 2021-04-24T10:29:39.000Z | 2021-04-24T10:29:39.000Z | gips/gistmodel/post_processing.py | accsc/gips | 6b20b2b0fa76ee24b04237b1edd5c8a26738d460 | [
"MIT"
] | null | null | null | gips/gistmodel/post_processing.py | accsc/gips | 6b20b2b0fa76ee24b04237b1edd5c8a26738d460 | [
"MIT"
] | 2 | 2021-02-16T14:18:59.000Z | 2021-06-04T05:09:22.000Z | import numpy as np
import copy
from gips import FLOAT
from gips import DOUBLE | 38.899113 | 109 | 0.378545 |
31be5bcba5067c3d0f88dba211c9dc9337d0bf13 | 2,560 | py | Python | src/Cogs/InfoCog.py | kodyVS/Discord-Bot-Development | 389bf69871adbe289f162ddbeeaf681023ca1f02 | [
"MIT"
] | 5 | 2020-05-27T20:03:45.000Z | 2020-06-24T11:27:26.000Z | src/Cogs/InfoCog.py | kodyVS/Discord-Bot-Development | 389bf69871adbe289f162ddbeeaf681023ca1f02 | [
"MIT"
] | 11 | 2020-05-28T10:56:26.000Z | 2020-07-02T13:38:02.000Z | src/Cogs/InfoCog.py | kodyVS/Discord-Bot-Development | 389bf69871adbe289f162ddbeeaf681023ca1f02 | [
"MIT"
] | 3 | 2020-05-28T20:31:02.000Z | 2020-06-17T23:51:51.000Z | from discord.ext import commands
import discord
import requests
from bs4 import BeautifulSoup
# work in progress! more languages welcome!
| 49.230769 | 210 | 0.632031 |
31beb5620648da5af3d9f0847e1c1d7a84954a2a | 451 | py | Python | Python/List/37.drop.py | angelmpalomares/ModelAndLanguagesForBioInformatics | 0b981bfcdc2a58ad72da3513e783ef75e53c205c | [
"MIT"
] | null | null | null | Python/List/37.drop.py | angelmpalomares/ModelAndLanguagesForBioInformatics | 0b981bfcdc2a58ad72da3513e783ef75e53c205c | [
"MIT"
] | 1 | 2021-06-08T07:44:38.000Z | 2021-06-08T07:53:10.000Z | Python/List/37.drop.py | angelmpalomares/ModelAndLanguagesForBioInformatics | 0b981bfcdc2a58ad72da3513e783ef75e53c205c | [
"MIT"
] | 2 | 2021-04-11T10:13:57.000Z | 2021-06-07T23:20:31.000Z | def drop(i_list: list,n:int) -> list:
"""
Drop at multiple of n from the list
:param n: Drop from the list i_list every N element
:param i_list: The source list
:return: The returned list
"""
assert(n>0)
_shallow_list = []
k=1
for element in i_list:
if k % n != 0:
_shallow_list.append(element)
k+=1
return _shallow_list
if __name__ == "__main__":
print(drop([1,2,3,4,5],6)) | 25.055556 | 55 | 0.585366 |
31bebc7c278939b7860a6be8a6a8aa404c030728 | 7,929 | py | Python | cil/balanced_experience_replay.py | itaicaspi-intel/advanced-coach | 2f1d15a8c75d9e3c1742fc553daad5e52f4d5188 | [
"Apache-2.0"
] | 1 | 2019-02-18T05:57:42.000Z | 2019-02-18T05:57:42.000Z | cil/balanced_experience_replay.py | itaicaspi-intel/advanced-coach | 2f1d15a8c75d9e3c1742fc553daad5e52f4d5188 | [
"Apache-2.0"
] | null | null | null | cil/balanced_experience_replay.py | itaicaspi-intel/advanced-coach | 2f1d15a8c75d9e3c1742fc553daad5e52f4d5188 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
import random
from enum import Enum
from typing import List, Tuple, Any, Union
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters, ExperienceReplay
from rl_coach.schedules import Schedule, ConstantSchedule
"""
A replay buffer which allows sampling batches which are balanced in terms of the classes that are sampled
"""
| 46.098837 | 119 | 0.67436 |
31c2501833007cbf35b7052dce9d35c5762beec9 | 271 | py | Python | tools/schedprof/schedprof/mutex.py | ivochkin/dfk | 9d38cc657c18e065e2249865244eba9067b49b2a | [
"MIT"
] | 1 | 2016-05-23T16:18:41.000Z | 2016-05-23T16:18:41.000Z | tools/schedprof/schedprof/mutex.py | ivochkin/dfk | 9d38cc657c18e065e2249865244eba9067b49b2a | [
"MIT"
] | null | null | null | tools/schedprof/schedprof/mutex.py | ivochkin/dfk | 9d38cc657c18e065e2249865244eba9067b49b2a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from schedprof.enumerated_instance import EnumeratedInstance
| 22.583333 | 60 | 0.678967 |
31c2954ee75af37e6605445fd453d42700c71a8e | 1,606 | py | Python | openpype/hosts/houdini/plugins/publish/increment_current_file.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | 87 | 2021-05-07T08:40:46.000Z | 2022-03-19T00:36:25.000Z | openpype/hosts/houdini/plugins/publish/increment_current_file.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | 1,019 | 2021-04-26T06:22:56.000Z | 2022-03-31T16:30:43.000Z | openpype/hosts/houdini/plugins/publish/increment_current_file.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | 33 | 2021-04-29T12:35:54.000Z | 2022-03-25T14:48:42.000Z | import pyblish.api
import avalon.api
from openpype.api import version_up
from openpype.action import get_errored_plugins_from_data
| 30.884615 | 67 | 0.643213 |
31c40d9158227b21f8cb192d674e54997bf631fc | 2,586 | py | Python | 968 Binary Tree Cameras.py | krishna13052001/LeetCode | cd6ec626bea61f0bd9e8493622074f9e69a7a1c3 | [
"MIT"
] | 872 | 2015-06-15T12:02:41.000Z | 2022-03-30T08:44:35.000Z | 968 Binary Tree Cameras.py | nadeemshaikh-github/LeetCode | 3fb14aeea62a960442e47dfde9f964c7ffce32be | [
"MIT"
] | 8 | 2015-06-21T15:11:59.000Z | 2022-02-01T11:22:34.000Z | 968 Binary Tree Cameras.py | nadeemshaikh-github/LeetCode | 3fb14aeea62a960442e47dfde9f964c7ffce32be | [
"MIT"
] | 328 | 2015-06-28T03:10:35.000Z | 2022-03-29T11:05:28.000Z | #!/usr/bin/python3
"""
Given a binary tree, we install cameras on the nodes of the tree.
Each camera at a node can monitor its parent, itself, and its immediate children.
Calculate the minimum number of cameras needed to monitor all nodes of the tree.
Example 1:
Input: [0,0,null,0,0]
Output: 1
Explanation: One camera is enough to monitor all nodes if placed as shown.
Example 2:
Input: [0,0,null,0,null,0,null,null,0]
Output: 2
Explanation: At least two cameras are needed to monitor all nodes of the tree.
The above image shows one of the valid configurations of camera placement.
Note:
The number of nodes in the given tree will be in the range [1, 1000].
Every node has value 0.
"""
# Definition for a binary tree node.
| 23.944444 | 81 | 0.587394 |
31c5aa8dd90d83d5636e20aed612d574a8d8b309 | 308 | py | Python | examples/get_tiktoks_by_sound.py | twitter-79/TikTok-Api | 366fd2b157dfa66586aa6edc91c50ccf1cf00785 | [
"MIT"
] | 2,095 | 2019-05-26T17:07:02.000Z | 2022-03-31T11:42:32.000Z | examples/get_tiktoks_by_sound.py | twitter-79/TikTok-Api | 366fd2b157dfa66586aa6edc91c50ccf1cf00785 | [
"MIT"
] | 633 | 2019-06-11T06:42:03.000Z | 2022-03-31T11:43:42.000Z | examples/get_tiktoks_by_sound.py | twitter-79/TikTok-Api | 366fd2b157dfa66586aa6edc91c50ccf1cf00785 | [
"MIT"
] | 630 | 2019-09-30T18:11:58.000Z | 2022-03-30T16:23:01.000Z | from TikTokApi import TikTokApi
api = TikTokApi.get_instance()
count = 30
# You can find this from a tiktok getting method in another way or find songs from the discoverMusic method.
sound_id = "6601861313180207878"
tiktoks = api.by_sound(sound_id, count=count)
for tiktok in tiktoks:
print(tiktok)
| 22 | 108 | 0.772727 |
31c5c0e1d9a4cbb4ae462748e1bb8e68224eb9d2 | 4,464 | py | Python | code/SimPleAC_pof_paperplots.py | 1ozturkbe/robustSPpaper | d90f01945f96c5bd45f3518665e52d920820e4d7 | [
"MIT"
] | null | null | null | code/SimPleAC_pof_paperplots.py | 1ozturkbe/robustSPpaper | d90f01945f96c5bd45f3518665e52d920820e4d7 | [
"MIT"
] | 7 | 2018-06-25T14:51:29.000Z | 2019-06-26T18:20:12.000Z | code/SimPleAC_pof_paperplots.py | 1ozturkbe/robustSPpaper | d90f01945f96c5bd45f3518665e52d920820e4d7 | [
"MIT"
] | null | null | null | from builtins import str
from builtins import range
from robust.simulations.simulate import filter_gamma_result_dict
from SimPleAC_save import load_obj
import pickle as pickle
import numpy as np
import matplotlib.pyplot as plt
from SimPleAC_pof_simulate import pof_parameters
if __name__ == "__main__":
# Retrieving pof parameters
[model, methods, gammas, number_of_iterations,
min_num_of_linear_sections, max_num_of_linear_sections, verbosity, linearization_tolerance,
number_of_time_average_solves, uncertainty_sets, nominal_solution, directly_uncertain_vars_subs, parallel,
nominal_number_of_constraints, nominal_solve_time] = pof_parameters()
method = methods[0] # only care about Best Pairs
# Loading results
margin = {}
nGammas = nmargins = len(gammas)
margins = gammas
margin['solutions'] = {}
for i in range(nmargins):
margin['solutions'][margins[i]] = pickle.load(open("marginResults/" +
str(margins[i]), 'rb'))
margin['number_of_constraints'] = load_obj('marginnumber_of_constraints', 'marginResults')
margin['simulation_results'] = load_obj('marginsimulation_results', 'marginResults')
gamma = {}
gamma['solutions'] = {}
for i in range(nGammas):
for j in range(len(methods)):
for k in range((len(uncertainty_sets))):
gamma['solutions'][gammas[i], methods[j]['name'], uncertainty_sets[k]] = pickle.load(open(
"gammaResults\\" + str((gammas[i], methods[j]['name'], uncertainty_sets[k])), 'rb'))
gamma['solve_times'] = load_obj('gammasolve_times', 'gammaResults')
gamma['simulation_results'] = load_obj('gammasimulation_results', 'gammaResults')
gamma['number_of_constraints'] = load_obj('gammanumber_of_constraints', 'gammaResults')
# Plotting of cost and probability of failure
objective_name = 'Total fuel weight'
objective_units = 'N'
title = ''
filteredResults = [margin['solutions'],
filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'box'),
filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'ellipsoidal')]
filteredSimulations = [margin['simulation_results'],
filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'box'),
filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'ellipsoidal')]
objective_varkey = 'W_{f_m}'
legend_keys = ['margins', 'box', 'ellipsoidal']
edgecolors = ['#FFBF00', '#CC0000', '#008000']
facecolors = ['#FFE135','#FF2052', '#8DB600']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
lines = []
mincost = 1e10
maxcost = 0
for i in range(len(legend_keys)):
sims = list(filteredSimulations[i].items())
pofs = []
objective_costs = []
objective_stddev = []
for j in sims:
pofs.append(j[1][0])
objective_costs.append(j[1][1])
objective_stddev.append(j[1][2])
mincost = np.min([mincost] + objective_costs)
maxcost = np.max([maxcost] + objective_costs)
lines.append(ax1.plot(gammas, objective_costs, color=edgecolors[i], label=legend_keys[i] + ', cost'))
inds = np.nonzero(np.ones(len(gammas)) - pofs)[0]
uppers = [objective_costs[ind] + objective_stddev[ind] for ind in inds]
lowers = [objective_costs[ind] - objective_stddev[ind] for ind in inds]
x = [gammas[ind] for ind in inds]
ax1.fill_between(x, lowers, uppers,
alpha=0.5, edgecolor = edgecolors[i], facecolor = facecolors[i])
lines.append(ax2.plot(gammas, pofs, color=edgecolors[i], label=legend_keys[i] + ', PoF'))
ax1.set_xlabel(r'Uncertainty Set Scaling Factor $\Gamma$', fontsize=12)
ax1.set_ylabel('Cost [' + objective_name + ' (' + objective_units.capitalize() + ')]', fontsize=12)
ax2.set_ylabel("Probability of Failure", fontsize=12)
ax1.set_ylim([mincost, maxcost])
ax2.set_ylim([0, 1])
plt.title(title, fontsize=12)
labs = [lines[l][0].get_label() for l in [1,3,5,0,2,4]]
ax1.legend(labs, loc="lower right", fontsize=9, numpoints=1)
# ax1.legend(loc="lower right", fontsize=10, numpoints=1)
# fig.legend(loc="lower right", fontsize=10, numpoints=1)
plt.show()
| 49.054945 | 124 | 0.641801 |
31c6e6ace01eea05877a86d1f6316d5a911da292 | 588 | py | Python | test/show-cifar10.py | tom01h/deep-learning-from-scratch | acb3c31976cd736b4abd21c3e8ab81c3bf0eb9bb | [
"MIT"
] | 3 | 2018-10-11T16:19:18.000Z | 2022-01-16T07:48:06.000Z | test/show-cifar10.py | tom01h/deep-learning-from-scratch | acb3c31976cd736b4abd21c3e8ab81c3bf0eb9bb | [
"MIT"
] | null | null | null | test/show-cifar10.py | tom01h/deep-learning-from-scratch | acb3c31976cd736b4abd21c3e8ab81c3bf0eb9bb | [
"MIT"
] | null | null | null | # coding: utf-8
import sys, os
sys.path.append(os.pardir) #
import numpy as np
from dataset.cifar10 import load_cifar10
from PIL import Image
np.set_printoptions(threshold=100)
(x_train, t_train), (x_test, t_test) = load_cifar10(flatten=False)
sample_image = x_test[0:100].reshape((10, 10, 3, 32, 32)).transpose((0, 3, 1, 4, 2)).reshape((320, 320, 3)) # 100
Image.fromarray(np.uint8(sample_image*255)).save('sample.png')
print(t_test[0:100].reshape(10,10))
#pil_img = Image.fromarray(np.uint8(sample_image*255))
#pil_img.show()
| 34.588235 | 128 | 0.727891 |
31c78d6966a9d84a523a15b22e795f490c2201f9 | 44 | py | Python | vertex-server/signals/__init__.py | aoswalt/greenlite-hardware | 056ed78829519f49adab60dbcf67878243fe764e | [
"MIT"
] | null | null | null | vertex-server/signals/__init__.py | aoswalt/greenlite-hardware | 056ed78829519f49adab60dbcf67878243fe764e | [
"MIT"
] | 1 | 2016-11-01T23:55:07.000Z | 2016-11-01T23:55:07.000Z | vertex-server/signals/__init__.py | aoswalt/greenlite-hardware | 056ed78829519f49adab60dbcf67878243fe764e | [
"MIT"
] | null | null | null | from . import lights
from . import schedule
| 14.666667 | 22 | 0.772727 |
31c7910d7253d24e22e70937e36be79e678386eb | 10,533 | py | Python | PWWS/fool.py | ForeverZyh/ASCC | 2d76d679889953501c469221a37d486e7ee42ded | [
"MIT"
] | 21 | 2021-03-22T07:14:29.000Z | 2022-03-24T02:05:25.000Z | PWWS/fool.py | ForeverZyh/ASCC | 2d76d679889953501c469221a37d486e7ee42ded | [
"MIT"
] | 2 | 2021-04-07T11:31:01.000Z | 2022-01-10T03:41:10.000Z | PWWS/fool.py | ForeverZyh/ASCC | 2d76d679889953501c469221a37d486e7ee42ded | [
"MIT"
] | 4 | 2021-05-05T18:44:13.000Z | 2021-07-29T03:09:50.000Z | # coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
import os
import numpy as np
from read_files import split_imdb_files, split_yahoo_files, split_agnews_files
from word_level_process import word_process, get_tokenizer
from char_level_process import char_process
from neural_networks import word_cnn, char_cnn, bd_lstm, lstm
from adversarial_tools import ForwardGradWrapper, adversarial_paraphrase
import tensorflow as tf
from keras import backend as K
import time
from unbuffered import Unbuffered
sys.stdout = Unbuffered(sys.stdout)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
parser = argparse.ArgumentParser(
description='Craft adversarial examples for a text classifier.')
parser.add_argument('--clean_samples_cap',
help='Amount of clean(test) samples to fool',
type=int, default=1000)
parser.add_argument('-m', '--model',
help='The model of text classifier',
choices=['word_cnn', 'char_cnn', 'word_lstm', 'word_bdlstm'],
default='word_cnn')
parser.add_argument('-d', '--dataset',
help='Data set',
choices=['imdb', 'agnews', 'yahoo'],
default='imdb')
parser.add_argument('-l', '--level',
help='The level of process dataset',
choices=['word', 'char'],
default='word')
if __name__ == '__main__':
args = parser.parse_args()
fool_text_classifier()
| 46.606195 | 122 | 0.619102 |
31c871b146933705ca94093543636c2b4a72c392 | 22,970 | py | Python | test_training_data.py | miermans/gym-2048 | 39f2cf375ef936284677a97b373aa2b97c8e45fc | [
"MIT"
] | null | null | null | test_training_data.py | miermans/gym-2048 | 39f2cf375ef936284677a97b373aa2b97c8e45fc | [
"MIT"
] | 2 | 2021-05-26T20:24:09.000Z | 2021-05-27T08:44:54.000Z | test_training_data.py | miermans/gym-2048 | 39f2cf375ef936284677a97b373aa2b97c8e45fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
import numpy as np
import os
import pytest
import tempfile
import training_data
if __name__ == '__main__':
import pytest
pytest.main()
| 44.173077 | 101 | 0.428515 |
31c9193304e5d2e005f4e4e2afb188aa62cfb25b | 2,218 | py | Python | tests/test_db_exam_psd.py | awenhaowenchao/bee | cac7522f8994aa3067c6e0a1bb3613de5c577129 | [
"MIT"
] | 4 | 2019-11-12T05:01:42.000Z | 2022-02-23T01:52:11.000Z | tests/test_db_exam_psd.py | awenhaowenchao/bee | cac7522f8994aa3067c6e0a1bb3613de5c577129 | [
"MIT"
] | 6 | 2021-03-19T08:13:39.000Z | 2022-03-02T15:00:19.000Z | tests/test_db_exam_psd.py | awenhaowenchao/bee | cac7522f8994aa3067c6e0a1bb3613de5c577129 | [
"MIT"
] | null | null | null | from datetime import datetime
from bee import Psd, CX, On, T
from bee import Model, IntegerField, StringField, DateTimeField, Equal, W, C
db_exam = Psd.open("exam")
# 1) sing table count search, SELECT COUNT(*) AS COUNT FROM t_teacher
with db_exam.connection() as conn:
teacher_count = db_exam.Select(*CX("COUNT(*)", "COUNT")).From("t_teacher").int()
print("total techer count is %s" % teacher_count)
# 2) sing table search, SELECT * FROM t_teacher
with db_exam.connection() as conn:
teachers = db_exam.Select(*CX("*")).From("t_teacher").list()
print(teachers)
# 3) sing table search, SELECT * FROM t_teacher convert values to model of Teacher
with db_exam.connection() as conn:
teachers = db_exam.Select(*CX("*")).From("t_teacher").list(Teacher)
print(teachers)
# 4) sing table search, SELECT * FROM t_teacher WHERE id=? convert values to model of Teacher
with db_exam.connection() as conn:
teachers = db_exam.Select(*CX("*")).From("t_teacher").Where(W().equal("id", 1004)).list(Teacher)
print(teachers)
# 5) tow table Join search, SELECT DISTINCT id,cid,score FROM t_student JOIN t_sc ON id=sid WHERE id=?
with db_exam.connection() as conn:
result = db_exam.Query(C("id", "cid", "score"), True)\
.From("t_student")\
.Join("t_sc", On("id", "sid"))\
.Where(Equal("id", 1001))\
.list()
print(result)
#or use alias mode like 'SELECT DISTINCT s.id,sc.cid,sc.score FROM t_student AS s JOIN t_sc AS sc ON s.id=sc.sid WHERE s.id=?'
with db_exam.connection() as conn:
result = db_exam.Query(C("s.id", "sc.cid", "sc.score"), True)\
.From(T("t_student", "s"))\
.Join(T("t_sc", "sc"), On("s.id", "sc.sid"))\
.Where(Equal("s.id", 1001))\
.list()
print(result)
# 6) with transaction
with db_exam.transaction():
# insert sql
# update sql
# raise exception
# update Sql
pass
# 7) sing table search, SELECT * FROM t_student limit 0, 5
with db_exam.connection() as conn:
students = db_exam.Select(*CX("*")).From("t_student").limit(1, 5).list()
print(students)
| 31.239437 | 126 | 0.654193 |
31cb176f4032d56f3c4634406ddd887dbecb2fe6 | 19,781 | py | Python | slybot/slybot/plugins/scrapely_annotations/builder.py | coolkunal64/ht | b7c52d5604dd75ea4086a6ff92eaa2db85bb145c | [
"BSD-3-Clause"
] | null | null | null | slybot/slybot/plugins/scrapely_annotations/builder.py | coolkunal64/ht | b7c52d5604dd75ea4086a6ff92eaa2db85bb145c | [
"BSD-3-Clause"
] | null | null | null | slybot/slybot/plugins/scrapely_annotations/builder.py | coolkunal64/ht | b7c52d5604dd75ea4086a6ff92eaa2db85bb145c | [
"BSD-3-Clause"
] | null | null | null | import json
from scrapy import Selector
from scrapy.utils.spider import arg_to_iter
from scrapely.htmlpage import parse_html, HtmlTag, HtmlDataFragment
from collections import defaultdict
from itertools import tee, count, groupby
from operator import itemgetter
from slybot.utils import (serialize_tag, add_tagids, remove_tagids, TAGID,
OPEN_TAG, CLOSE_TAG, UNPAIRED_TAG, GENERATEDTAGID)
from .migration import _get_parent, short_guid
def _get_data_id(annotation):
"""Get id (a str) of an annotation."""
if isinstance(annotation, HtmlTag):
return annotation.attributes[TAGID]
| 41.210417 | 79 | 0.553258 |
31ce1f02b533697e41ca279b2476dd124fe63eb7 | 9,840 | py | Python | scripts/slave/recipes/mojo.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/mojo.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/mojo.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T09:16:32.000Z | 2020-07-22T09:16:32.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'adb',
'depot_tools/bot_update',
'depot_tools/gclient',
'goma',
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'recipe_engine/url',
'depot_tools/tryserver',
]
| 32.582781 | 80 | 0.613821 |
31d0500f716a9df50359013b1a61c1658f74b81f | 3,101 | py | Python | lib/navigation/AreaFighting.py | sadnecc/pb | 3142764e350f92034eb671cfc3d1dae42dea0f0a | [
"MIT"
] | null | null | null | lib/navigation/AreaFighting.py | sadnecc/pb | 3142764e350f92034eb671cfc3d1dae42dea0f0a | [
"MIT"
] | null | null | null | lib/navigation/AreaFighting.py | sadnecc/pb | 3142764e350f92034eb671cfc3d1dae42dea0f0a | [
"MIT"
] | 1 | 2021-05-30T11:15:02.000Z | 2021-05-30T11:15:02.000Z | # -*- coding:utf8 -*-
import random
import time
from lib.navigation.PathFinding import Pathfinding
from lib.control.Control import Control
from lib.unit.Player import Player
from lib.struct.CoordiPoint import CoordiPoint
#
| 43.676056 | 137 | 0.525637 |
31d09d41c952173a6ae2b73dccad4ea1fbc25f01 | 722 | py | Python | compress.py | willemwouters/PhotoboothPi | 7ef65d1411af15ea51e23ea8ddbd598affd2680d | [
"Beerware"
] | null | null | null | compress.py | willemwouters/PhotoboothPi | 7ef65d1411af15ea51e23ea8ddbd598affd2680d | [
"Beerware"
] | null | null | null | compress.py | willemwouters/PhotoboothPi | 7ef65d1411af15ea51e23ea8ddbd598affd2680d | [
"Beerware"
] | null | null | null | import os
import time
import sys
if(len(sys.argv) is 1):
path="/home/pi/storage/"
else:
path=sys.argv[1]
try:
arr=[]
for filename in os.listdir(path):
if("2018-09" in filename):
arr.append(filename)
for f in arr:
filen = os.path.splitext(f)[0]
if(("%s.h264" % filen) in arr) and (("%s.mp3" % filen) in arr and ("%s.mp4" % filen) not in arr):
if(("%s.h264" % filen) == f):
time.sleep(1)
os.system("ffmpeg -i %s -i %s -c:v copy -c:a aac -strict experimental %s" % (path + f, path + filen + ".mp3", path + filen + ".mp4"))
os.system("rm %s %s" % (path + filen + ".mp3", path + f))
except:
print "d" | 30.083333 | 149 | 0.50831 |
31d152e1371b0f6b06f2bd25172cc000079294dd | 71 | py | Python | html/en/reference/graphs/sage/graphs/graph_plot-2.py | sagemath/documentation | 65dcf569b6e95bfae7c76b40a46af3a9f77479f4 | [
"Apache-2.0"
] | 10 | 2015-05-17T10:52:08.000Z | 2022-03-28T12:15:09.000Z | html/en/reference/graphs/sage/graphs/graph_plot-2.py | sagemath/documentation | 65dcf569b6e95bfae7c76b40a46af3a9f77479f4 | [
"Apache-2.0"
] | 19 | 2015-05-15T17:06:31.000Z | 2021-08-25T09:13:17.000Z | html/en/reference/graphs/sage/graphs/graph_plot-2.py | sagemath/documentation | 65dcf569b6e95bfae7c76b40a46af3a9f77479f4 | [
"Apache-2.0"
] | 21 | 2015-12-15T21:19:29.000Z | 2022-01-03T14:24:20.000Z | petersen_spring = Graph(':I`ES@obGkqegW~')
sphinx_plot(petersen_spring) | 35.5 | 42 | 0.802817 |
31d15ba780a95a70da6a42fd922fdf3f8a69aedc | 77 | py | Python | module00/ex05/kata00.py | MedAymenF/42AI-Python-bootcamp | 41af2221b95b305ee08ee8f582e68700f1a8c32b | [
"Apache-2.0"
] | null | null | null | module00/ex05/kata00.py | MedAymenF/42AI-Python-bootcamp | 41af2221b95b305ee08ee8f582e68700f1a8c32b | [
"Apache-2.0"
] | null | null | null | module00/ex05/kata00.py | MedAymenF/42AI-Python-bootcamp | 41af2221b95b305ee08ee8f582e68700f1a8c32b | [
"Apache-2.0"
] | null | null | null | t = (19, 42, 21)
print(f"The {len(t)} numbers are: {t[0]}, {t[1]}, {t[2]}")
| 19.25 | 58 | 0.467532 |
31d16107e52098b68243258cade721f1a3c378e4 | 3,002 | py | Python | examples/cochrane-simplification/log_regression/bow_newsela_lm_tokens.py | AshOlogn/transformers | c4b8c360d4aa78642f4a815ddd2ba9c9fa304c8d | [
"Apache-2.0"
] | null | null | null | examples/cochrane-simplification/log_regression/bow_newsela_lm_tokens.py | AshOlogn/transformers | c4b8c360d4aa78642f4a815ddd2ba9c9fa304c8d | [
"Apache-2.0"
] | null | null | null | examples/cochrane-simplification/log_regression/bow_newsela_lm_tokens.py | AshOlogn/transformers | c4b8c360d4aa78642f4a815ddd2ba9c9fa304c8d | [
"Apache-2.0"
] | null | null | null | import json
import os
from os.path import join
from random import shuffle
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler, normalize
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split
from sklearn.metrics import accuracy_score
from transformers import BertTokenizer, BertConfig, BartTokenizer
print(simple_term_counts())
| 31.93617 | 110 | 0.672552 |
31d1dcdc84557e4ef3daa2e742b3df53f7c45b0e | 1,569 | py | Python | fdrtd_server/exceptions.py | UNakade/server | f659524242d01fe67f9801ab41fabf46640ad590 | [
"MIT"
] | null | null | null | fdrtd_server/exceptions.py | UNakade/server | f659524242d01fe67f9801ab41fabf46640ad590 | [
"MIT"
] | null | null | null | fdrtd_server/exceptions.py | UNakade/server | f659524242d01fe67f9801ab41fabf46640ad590 | [
"MIT"
] | null | null | null | import logging as _logging
| 22.414286 | 78 | 0.684512 |
31d2496126215c595f14a661c15b593c05970c11 | 3,081 | py | Python | TCU/usageexample/automationexample.py | p--q/TCU | 51e2569cdc7553344f2ce279107a39677c3b106e | [
"BSD-3-Clause"
] | null | null | null | TCU/usageexample/automationexample.py | p--q/TCU | 51e2569cdc7553344f2ce279107a39677c3b106e | [
"BSD-3-Clause"
] | null | null | null | TCU/usageexample/automationexample.py | p--q/TCU | 51e2569cdc7553344f2ce279107a39677c3b106e | [
"BSD-3-Clause"
] | null | null | null | #!/opt/libreoffice5.4/program/python
# -*- coding: utf-8 -*-
import unohelper # (uno)
g_exportedScripts = macro, #
if __name__ == "__main__": #
XSCRIPTCONTEXT = automation() # XSCRIPTCONTEXT
macro() #
| 50.508197 | 162 | 0.73937 |
31d28cfa4763d607d589139656b5abdc86e64785 | 77 | py | Python | last_char.py | AkhilaSaiBejjarapu/Python | 238cc7692cf2e93eb585a03967b8d688ee3760f2 | [
"MIT"
] | null | null | null | last_char.py | AkhilaSaiBejjarapu/Python | 238cc7692cf2e93eb585a03967b8d688ee3760f2 | [
"MIT"
] | null | null | null | last_char.py | AkhilaSaiBejjarapu/Python | 238cc7692cf2e93eb585a03967b8d688ee3760f2 | [
"MIT"
] | null | null | null | word=input()
last_letter=(len(word)-1)
result=word[last_letter]
print(result) | 19.25 | 25 | 0.779221 |
31d43ead09e1c7effc26eae228b072a20a8b0310 | 3,261 | py | Python | simple_retry/decorators.py | nicolasmota/retry_decorator | 65eab450e65fe8c08d07cd213628e655baa5ae55 | [
"MIT"
] | 11 | 2018-03-06T17:09:50.000Z | 2018-10-26T04:31:50.000Z | simple_retry/decorators.py | nicolasmota/retry_decorator | 65eab450e65fe8c08d07cd213628e655baa5ae55 | [
"MIT"
] | 9 | 2018-03-06T03:56:44.000Z | 2018-10-26T04:48:42.000Z | simple_retry/decorators.py | nicolasmota/retry_decorator | 65eab450e65fe8c08d07cd213628e655baa5ae55 | [
"MIT"
] | 2 | 2018-03-15T03:11:14.000Z | 2018-07-07T17:11:06.000Z | import time
from functools import wraps
import asyncio
from simple_retry.simple_retry.helpers import (
format_retry_message,
has_retries_to_go,
log_message
)
| 26.512195 | 77 | 0.462435 |
31d44c5f099da57a280d3e04440215f00f79e111 | 153 | py | Python | environment.py | bopopescu/cbrc-devteam-blog | eb4f7977d112b1ee692dad60ed46802d2ee243f4 | [
"Apache-2.0"
] | null | null | null | environment.py | bopopescu/cbrc-devteam-blog | eb4f7977d112b1ee692dad60ed46802d2ee243f4 | [
"Apache-2.0"
] | null | null | null | environment.py | bopopescu/cbrc-devteam-blog | eb4f7977d112b1ee692dad60ed46802d2ee243f4 | [
"Apache-2.0"
] | 1 | 2020-07-24T03:59:01.000Z | 2020-07-24T03:59:01.000Z | # application environment
import settings
import sys
sys.path.append(settings.app_home_dir)
sys.path.append(settings.app_settings["app_lib_dir"])
| 21.857143 | 54 | 0.797386 |
31d4b6913b04eb19080a816c2290d803b8ff2f23 | 8,618 | py | Python | cogeo_mosaic/backends/base.py | drnextgis/cogeo-mosaic | 034d0124a2da894c2bb432b1c0cebba7f716edbd | [
"MIT"
] | null | null | null | cogeo_mosaic/backends/base.py | drnextgis/cogeo-mosaic | 034d0124a2da894c2bb432b1c0cebba7f716edbd | [
"MIT"
] | null | null | null | cogeo_mosaic/backends/base.py | drnextgis/cogeo-mosaic | 034d0124a2da894c2bb432b1c0cebba7f716edbd | [
"MIT"
] | null | null | null | """cogeo_mosaic.backend.base: base Backend class."""
import abc
import itertools
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import attr
import mercantile
from cachetools import TTLCache, cached
from cachetools.keys import hashkey
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS
from rio_tiler.errors import PointOutsideBounds
from rio_tiler.io import BaseReader, COGReader
from rio_tiler.models import ImageData
from rio_tiler.mosaic import mosaic_reader
from rio_tiler.tasks import multi_values
from cogeo_mosaic.backends.utils import find_quadkeys, get_hash
from cogeo_mosaic.cache import cache_config
from cogeo_mosaic.errors import NoAssetFoundError
from cogeo_mosaic.models import Info, Metadata
from cogeo_mosaic.mosaic import MosaicJSON
from cogeo_mosaic.utils import bbox_union
| 36.058577 | 137 | 0.636343 |
31d61f0a33b68e1cb755859a34a3948798308cb2 | 5,190 | py | Python | userge/core/methods/decorators/on_filters.py | wildyvpn-network/bot | 87459495000bd6004b8f62a9cb933c164da9ef29 | [
"MIT"
] | null | null | null | userge/core/methods/decorators/on_filters.py | wildyvpn-network/bot | 87459495000bd6004b8f62a9cb933c164da9ef29 | [
"MIT"
] | null | null | null | userge/core/methods/decorators/on_filters.py | wildyvpn-network/bot | 87459495000bd6004b8f62a9cb933c164da9ef29 | [
"MIT"
] | null | null | null | # pylint: disable=missing-module-docstring
#
# Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/uaudith/Userge/blob/master/LICENSE >
#
# All rights reserved.
__all__ = ['OnFilters']
from pyrogram.filters import Filter as RawFilter
from ... import types
from . import RawDecorator
| 43.613445 | 89 | 0.527746 |
31d79e6d0a59cc3302d9155c1c4c15215d0a9e1b | 1,387 | py | Python | pygromos/tests/test_submission/test_hpc_queuing_submission_scheduling.py | pultar/PyGromosTools | 3c104c560c2e654972a036e2060b120ade96f655 | [
"MIT"
] | 13 | 2021-03-17T09:29:37.000Z | 2022-01-14T20:42:16.000Z | pygromos/tests/test_submission/test_hpc_queuing_submission_scheduling.py | pultar/PyGromosTools | 3c104c560c2e654972a036e2060b120ade96f655 | [
"MIT"
] | 185 | 2021-03-03T14:24:55.000Z | 2022-03-31T18:39:29.000Z | pygromos/tests/test_submission/test_hpc_queuing_submission_scheduling.py | pultar/PyGromosTools | 3c104c560c2e654972a036e2060b120ade96f655 | [
"MIT"
] | 13 | 2021-03-03T14:18:06.000Z | 2022-02-17T09:48:55.000Z | import unittest, tempfile
from pygromos.simulations.hpc_queuing.job_scheduling.schedulers import simulation_scheduler
from pygromos.data.simulation_parameters_templates import template_md
from pygromos.data.topology_templates import blank_topo_template
from pygromos.simulations.hpc_queuing.submission_systems import DUMMY
from pygromos.files.gromos_system.gromos_system import Gromos_System
from pygromos.tests.in_testfiles import in_test_file_path
from pygromos.tests.test_files import out_test_root_dir
| 46.233333 | 114 | 0.746215 |
31d7a9d787341b47673ced552899077d803f3aa3 | 1,934 | py | Python | tutorials.py | Xython/pattern-matching | 17ccdb68189353f1c63032013f5ef6f1ca4c0902 | [
"MIT"
] | 20 | 2017-12-31T05:45:47.000Z | 2021-05-15T22:08:21.000Z | tutorials.py | Xython/Destruct.py | 17ccdb68189353f1c63032013f5ef6f1ca4c0902 | [
"MIT"
] | null | null | null | tutorials.py | Xython/Destruct.py | 17ccdb68189353f1c63032013f5ef6f1ca4c0902 | [
"MIT"
] | 1 | 2018-01-12T04:54:19.000Z | 2018-01-12T04:54:19.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 30 17:03:01 2017
@author: misakawa
"""
from pattern_matching import Match, when, var, T, t, _, overwrite
from numpy.random import randint
class Bound1:
pass
assert add(1, 1) == 2
assert add(Bound2()) == 2
assert add(Bound3()) == 3
assert add(1, Bound1(), 'last') == 'last'
m = Match(1, 2, (3, int))
[a, b, c] = m.case(var[int], var, *var[tuple]).get
assert a == 1 and b == 2 and c == ((3, int), )
[c2] = m.case((_, _, (_, var.when(is_type)))).get
assert c2 == int
assert summary(list(range(100))) == 4950
qsort(randint(0, 500, size=(1200, )))
assert trait_test(1) == 1
assert trait_test(Population()) == 1000
| 14.763359 | 74 | 0.588418 |
31d8a178060a23e17a236c26a55e351c521d366e | 3,823 | py | Python | RepositoryBootstrap/EnvironmentDiffs.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | null | null | null | RepositoryBootstrap/EnvironmentDiffs.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | 1 | 2018-06-08T06:45:16.000Z | 2018-06-08T06:45:16.000Z | RepositoryBootstrap/EnvironmentDiffs.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | 1 | 2018-06-08T04:15:17.000Z | 2018-06-08T04:15:17.000Z | # ----------------------------------------------------------------------
# |
# | EnvironmentDiffs.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-06-02 22:19:34
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Displays changes made by an environment during activation."""
import json
import os
import sys
import textwrap
import six
import CommonEnvironment
from CommonEnvironment import CommandLine
from CommonEnvironment.Shell.All import CurrentShell
from RepositoryBootstrap import Constants
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GetOriginalEnvironment():
# Get the original environment
generated_dir = os.getenv(Constants.DE_REPO_GENERATED_NAME)
assert os.path.isdir(generated_dir), generated_dir
original_environment_filename = os.path.join(generated_dir, Constants.GENERATED_ACTIVATION_ORIGINAL_ENVIRONMENT_FILENAME)
assert os.path.isfile(original_environment_filename), original_environment_filename
with open(original_environment_filename) as f:
return json.load(f)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(CommandLine.Main())
except KeyboardInterrupt: pass
| 36.409524 | 126 | 0.437876 |
31da9f9cf7a9feda53f89aeafcbeadfbe26ac626 | 7,837 | py | Python | tests/model/test_ocrd_mets.py | wrznr/pyocrd | 25c4dd8c60285b7877803e2b627d72c8c0a4ab1e | [
"Apache-2.0"
] | null | null | null | tests/model/test_ocrd_mets.py | wrznr/pyocrd | 25c4dd8c60285b7877803e2b627d72c8c0a4ab1e | [
"Apache-2.0"
] | null | null | null | tests/model/test_ocrd_mets.py | wrznr/pyocrd | 25c4dd8c60285b7877803e2b627d72c8c0a4ab1e | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from os.path import join
from tests.base import TestCase, main, assets, copy_of_directory
from ocrd_utils import (
initLogging,
VERSION,
MIMETYPE_PAGE
)
from ocrd_models import OcrdMets
# pylint: disable=protected-access,deprecated-method,too-many-public-methods
if __name__ == '__main__':
main()
| 44.528409 | 139 | 0.643486 |
31db05913c960fafbf96871656aa566e21ebbd4d | 7,862 | py | Python | robo_gym/envs/ur/ur_avoidance_basic.py | psFournier/robo-gym | 0e67a36c0cbeac885c53b92de8f3f1f13e286c9a | [
"MIT"
] | 236 | 2020-04-15T10:50:45.000Z | 2022-03-31T14:28:52.000Z | robo_gym/envs/ur/ur_avoidance_basic.py | psFournier/robo-gym | 0e67a36c0cbeac885c53b92de8f3f1f13e286c9a | [
"MIT"
] | 36 | 2020-07-13T17:11:32.000Z | 2022-02-21T14:01:33.000Z | robo_gym/envs/ur/ur_avoidance_basic.py | psFournier/robo-gym | 0e67a36c0cbeac885c53b92de8f3f1f13e286c9a | [
"MIT"
] | 51 | 2020-04-24T08:58:31.000Z | 2022-03-18T17:14:23.000Z | """
Environment for basic obstacle avoidance controlling a robotic arm from UR.
In this environment the obstacle is only moving up and down in a vertical line in front of the robot.
The goal is for the robot to stay within a predefined minimum distance to the moving obstacle.
When feasible the robot should continue to the original configuration,
otherwise wait for the obstacle to move away before proceeding
"""
import numpy as np
from typing import Tuple
from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2
from robo_gym.envs.simulation_wrapper import Simulation
from robo_gym.envs.ur.ur_base_avoidance_env import URBaseAvoidanceEnv
# base, shoulder, elbow, wrist_1, wrist_2, wrist_3
JOINT_POSITIONS = [-1.57, -1.31, -1.31, -2.18, 1.57, 0.0]
DEBUG = True
MINIMUM_DISTANCE = 0.3 # the distance [cm] the robot should keep to the obstacle
# roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving | 47.077844 | 197 | 0.672602 |
31dbeeeb585ae91b3ec528faf0591108ed8cc73b | 848 | py | Python | hear_me_django_app/accounts/management/commands/initial_users.py | kamil1marczak/hear_me_django_app | 2a567c15acddbf6bf183c6c637a3785c2a9c9c5c | [
"MIT"
] | null | null | null | hear_me_django_app/accounts/management/commands/initial_users.py | kamil1marczak/hear_me_django_app | 2a567c15acddbf6bf183c6c637a3785c2a9c9c5c | [
"MIT"
] | null | null | null | hear_me_django_app/accounts/management/commands/initial_users.py | kamil1marczak/hear_me_django_app | 2a567c15acddbf6bf183c6c637a3785c2a9c9c5c | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.core.management.base import BaseCommand
from ._private import populate_user
User = get_user_model()
| 36.869565 | 118 | 0.714623 |
31dd0da78d51189eef9e478f249f06c8a43016ca | 1,789 | py | Python | config/constants.py | flopezag/fiware-tsc-dashboard | af80673707c9b2fb85c9f4aa12bce12a20ef4431 | [
"Apache-2.0"
] | null | null | null | config/constants.py | flopezag/fiware-tsc-dashboard | af80673707c9b2fb85c9f4aa12bce12a20ef4431 | [
"Apache-2.0"
] | 37 | 2017-02-23T09:08:58.000Z | 2019-08-13T09:34:40.000Z | config/constants.py | flopezag/fiware-tsc-dashboard | af80673707c9b2fb85c9f4aa12bce12a20ef4431 | [
"Apache-2.0"
] | 2 | 2017-12-19T15:06:33.000Z | 2019-05-02T17:24:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##
# Copyright 2017 FIWARE Foundation, e.V.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
__author__ = 'fla'
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
APPLICATION_NAME = 'TSC Enablers Dashboard'
CREDENTIAL_DIR = '.credentials'
CREDENTIAL_FILE = 'sheets.googleapis.com.json'
DB_NAME = 'enablers-dashboard.db'
DB_FOLDER = 'dbase'
LOG_FILE = 'tsc-dashboard.log'
# We need to add 16 rows in the number of enablers list corresponding to:
# - Title
# - Report date
# - Data sources updated on
# - Source
# - Units
# - Enabler Impl
# - INCUBATED
# - DEVELOPMENT
# - SUPPORT
# - DEPRECATED
# - And 6 extra blank rows between them
FIXED_ROWS = 16
# We keep the firsts row without change in the sheet (sheet title)
INITIAL_ROW = 2
# The number of columns to delete corresponds to:
# Source, Catalogue, ReadTheDocs, Docker, GitHub, Coverall, Academy, HelpDesk, Backlog, GitHub_Open_Issues,
# GitHub_Closed_Issues, GitHub_Adopters, GitHub_Adopters_Open_Issues, GitHub_Adopters_Closed_Issues,
# GitHub_Comits, GitHub_Forks, GitHub_Watchers, GitHub_Stars, Jira_WorkItem_Not_Closed, Jira_WorkItem_Closed
# + Extra 2 = 22
FIXED_COLUMNS = 22
# We start to delete from the initial column
INITIAL_COLUMN = 1
| 31.946429 | 108 | 0.755729 |
31dd6e6741a804d90f5239811383ca0cdca9f19d | 12,218 | py | Python | tensornetwork/backends/backend_test.py | ashoknar/TensorNetwork | 82636b75a0c53b5447c84d9a4e85226fe0e6f43a | [
"Apache-2.0"
] | null | null | null | tensornetwork/backends/backend_test.py | ashoknar/TensorNetwork | 82636b75a0c53b5447c84d9a4e85226fe0e6f43a | [
"Apache-2.0"
] | null | null | null | tensornetwork/backends/backend_test.py | ashoknar/TensorNetwork | 82636b75a0c53b5447c84d9a4e85226fe0e6f43a | [
"Apache-2.0"
] | null | null | null | """Tests for graphmode_tensornetwork."""
import builtins
import sys
import pytest
import numpy as np
from tensornetwork import connect, contract, Node
from tensornetwork.backends.base_backend import BaseBackend
from tensornetwork.backends import backend_factory
def test_base_backend_name():
backend = BaseBackend()
assert backend.name == "base backend"
def test_base_backend_tensordot_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.tensordot(np.ones((2, 2)), np.ones((2, 2)), axes=[[0], [0]])
def test_base_backend_reshape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.reshape(np.ones((2, 2)), (4, 1))
def test_base_backend_transpose_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.transpose(np.ones((2, 2)), [0, 1])
def test_base_backend_slice_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.slice(np.ones((2, 2)), (0, 1), (1, 1))
def test_base_backend_svd_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.svd_decomposition(np.ones((2, 2)), 0)
def test_base_backend_qr_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.qr_decomposition(np.ones((2, 2)), 0)
def test_base_backend_rq_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.rq_decomposition(np.ones((2, 2)), 0)
def test_base_backend_shape_concat_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_concat([np.ones((2, 2)), np.ones((2, 2))], 0)
def test_base_backend_shape_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tensor(np.ones((2, 2)))
def test_base_backend_shape_tuple_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tuple(np.ones((2, 2)))
def test_base_backend_shape_prod_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_prod(np.ones((2, 2)))
def test_base_backend_sqrt_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sqrt(np.ones((2, 2)))
def test_base_backend_diag_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.diag(np.ones((2, 2)))
def test_base_backend_convert_to_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.convert_to_tensor(np.ones((2, 2)))
def test_base_backend_trace_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.trace(np.ones((2, 2)))
def test_base_backend_outer_product_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.outer_product(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_einsul_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.einsum("ii", np.ones((2, 2)))
def test_base_backend_norm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.norm(np.ones((2, 2)))
def test_base_backend_eye_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eye(2, dtype=np.float64)
def test_base_backend_ones_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.ones((2, 2), dtype=np.float64)
def test_base_backend_zeros_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.zeros((2, 2), dtype=np.float64)
def test_base_backend_randn_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.randn((2, 2))
def test_base_backend_random_uniforl_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.random_uniform((2, 2))
def test_base_backend_conj_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.conj(np.ones((2, 2)))
def test_base_backend_eigh_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigh(np.ones((2, 2)))
def test_base_backend_eigs_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigs(np.ones((2, 2)))
def test_base_backend_eigs_lanczos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigsh_lanczos(lambda x: x, np.ones((2)))
def test_base_backend_addition_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.addition(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_subtraction_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.subtraction(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_multiply_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.multiply(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_divide_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.divide(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_index_update_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.index_update(np.ones((2, 2)), np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_inv_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.inv(np.ones((2, 2)))
def test_base_backend_sin_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sin(np.ones((2, 2)))
def test_base_backend_cos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.cos(np.ones((2, 2)))
def test_base_backend_exp_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.exp(np.ones((2, 2)))
def test_base_backend_log_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.log(np.ones((2, 2)))
def test_base_backend_expm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.expm(np.ones((2, 2)))
def test_base_backend_sparse_shape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sparse_shape(np.ones((2, 2)))
def test_base_backend_broadcast_right_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_right_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_broadcast_left_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_left_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_backend_instantiation(backend):
backend1 = backend_factory.get_backend(backend)
backend2 = backend_factory.get_backend(backend)
assert backend1 is backend2
| 29.8 | 77 | 0.761581 |
31dd79c83f754d036eb084c170cefc01374db92c | 633 | py | Python | src/GUI/Plotter.py | sbooeshaghi/pegasus | 32ca075b38a72a7955209657a8326ac749f658a3 | [
"BSD-2-Clause"
] | 1 | 2021-08-31T13:30:25.000Z | 2021-08-31T13:30:25.000Z | src/GUI/Plotter.py | pachterlab/pegasus | 32ca075b38a72a7955209657a8326ac749f658a3 | [
"BSD-2-Clause"
] | 1 | 2020-10-27T16:42:55.000Z | 2020-10-27T16:42:55.000Z | src/GUI/Plotter.py | pachterlab/pegasus | 32ca075b38a72a7955209657a8326ac749f658a3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pyqtgraph as pg
import numpy as np
if __name__ == '__main__':
w = CustomWidget()
w.show() | 31.65 | 77 | 0.63981 |
31de64f0189fb656e61e3cf8d36bbc5c08efed8c | 2,733 | py | Python | tests/test_collapsible.py | TehMillhouse/sphinxawesome-theme | 5130b8b4c2546ceaccf37353fa6a0bfb4526303c | [
"MIT"
] | 17 | 2020-07-10T12:05:07.000Z | 2022-03-08T03:40:49.000Z | tests/test_collapsible.py | TehMillhouse/sphinxawesome-theme | 5130b8b4c2546ceaccf37353fa6a0bfb4526303c | [
"MIT"
] | 475 | 2020-05-22T09:44:25.000Z | 2022-03-27T08:01:23.000Z | tests/test_collapsible.py | TehMillhouse/sphinxawesome-theme | 5130b8b4c2546ceaccf37353fa6a0bfb4526303c | [
"MIT"
] | 10 | 2020-12-23T11:14:57.000Z | 2022-02-13T08:51:02.000Z | """Tests for collapsible definition lists.
When the option ``html_collapsible_definitions``
is ``True``, some HTML classes should be added
to some definition lists but not all of them.
"""
from pathlib import Path
import pytest
from sphinx.application import Sphinx
from .util import parse_html
| 30.032967 | 80 | 0.642883 |
31df4d7e972bd1519fc475be70b05e383b709299 | 1,618 | py | Python | Iris Network/Conclusion/task.py | jetbrains-academy/Machine-Learning-101 | 7b583dbff1e90115296dcaeac78ca88363c158c9 | [
"MIT"
] | null | null | null | Iris Network/Conclusion/task.py | jetbrains-academy/Machine-Learning-101 | 7b583dbff1e90115296dcaeac78ca88363c158c9 | [
"MIT"
] | 10 | 2021-11-22T16:51:52.000Z | 2022-02-14T12:57:57.000Z | Iris Network/Conclusion/task.py | jetbrains-academy/Machine-Learning-101 | 7b583dbff1e90115296dcaeac78ca88363c158c9 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
if __name__ == '__main__':
X, y = read_data('iris.csv')
# comment the following line if you don't need the plot anymore
plot_data(X, y)
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
output = nn.feedforward(X_train)
print(output)
print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}')
nn.backward(X_train, y_train, output)
print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}')
nn.train(X_train, y_train)
print("Accuracy:")
print(accuracy(nn, X_test, y_test))
| 33.708333 | 105 | 0.65513 |
31e177fb5a84a661f6f3ed3c32e0ead9540dfcd1 | 1,160 | py | Python | agentless/crypto.py | tinyauth/agentless | 50f30dbb11007fd58c057a38c61783bff282603f | [
"Apache-2.0"
] | null | null | null | agentless/crypto.py | tinyauth/agentless | 50f30dbb11007fd58c057a38c61783bff282603f | [
"Apache-2.0"
] | null | null | null | agentless/crypto.py | tinyauth/agentless | 50f30dbb11007fd58c057a38c61783bff282603f | [
"Apache-2.0"
] | null | null | null | from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
backend = default_backend()
| 25.777778 | 66 | 0.712931 |
31e18d81d721e6fc0a0c74da919f35393478b123 | 324 | py | Python | test/programytest/storage/entities/test_nodes.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | test/programytest/storage/entities/test_nodes.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | test/programytest/storage/entities/test_nodes.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | import unittest
import unittest.mock
from programy.storage.entities.nodes import NodesStore
| 21.6 | 54 | 0.703704 |
31e1ce88e4424fa367dbbc4289f23529ddd13fe8 | 1,939 | py | Python | sphinx/source/tutorial/exercises/stocks.py | minrk/bokeh | ae4366e508355afc06b5fc62f1ee399635ab909d | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/tutorial/exercises/stocks.py | minrk/bokeh | ae4366e508355afc06b5fc62f1ee399635ab909d | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/tutorial/exercises/stocks.py | minrk/bokeh | ae4366e508355afc06b5fc62f1ee399635ab909d | [
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000",
parse_dates=['Date'])
GOOG = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# EXERCISE: turn on plot hold
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
x_axis_type = "datetime", # NOTE: only needed on first
tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first
)
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
# EXERCISE: start a new figure
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer.
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
show() # open a browser
| 34.017544 | 85 | 0.638473 |
31e29f3d6b52be28f77756b4ec61862d6adf938c | 1,828 | py | Python | nni/retiarii/converter/visualize.py | qfyin/nni | 59a1ccf8eba68b94974e84fc3834f38d851faf89 | [
"MIT"
] | 3 | 2021-02-23T14:01:43.000Z | 2021-03-29T16:19:32.000Z | nni/retiarii/converter/visualize.py | qfyin/nni | 59a1ccf8eba68b94974e84fc3834f38d851faf89 | [
"MIT"
] | 1 | 2021-01-17T08:53:56.000Z | 2021-01-17T08:53:56.000Z | nni/retiarii/converter/visualize.py | qfyin/nni | 59a1ccf8eba68b94974e84fc3834f38d851faf89 | [
"MIT"
] | 1 | 2020-12-21T11:15:54.000Z | 2020-12-21T11:15:54.000Z | import graphviz
| 43.52381 | 124 | 0.516958 |
31e44da249242c3967f376c2f079200c57cbe554 | 155 | py | Python | Python/Tests/TestData/TestDiscoverer/ConfigUnittest/Product/prefix_not_included.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 404 | 2019-05-07T02:21:57.000Z | 2022-03-31T17:03:04.000Z | Python/Tests/TestData/TestDiscoverer/ConfigUnittest/Product/prefix_not_included.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 1,672 | 2019-05-06T21:09:38.000Z | 2022-03-31T23:16:04.000Z | Python/Tests/TestData/TestDiscoverer/ConfigUnittest/Product/prefix_not_included.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 186 | 2019-05-13T03:17:37.000Z | 2022-03-31T16:24:05.000Z | import unittest
if __name__ == '__main__':
unittest.main()
| 17.222222 | 43 | 0.709677 |
31e59bd3f15670f0f52fb2ebf16c987e7332b1b1 | 885 | py | Python | customBackground.py | VisweshK/Jashmup | ca0cf639000734c5aea8583d9477af9a387f6d46 | [
"MIT"
] | null | null | null | customBackground.py | VisweshK/Jashmup | ca0cf639000734c5aea8583d9477af9a387f6d46 | [
"MIT"
] | null | null | null | customBackground.py | VisweshK/Jashmup | ca0cf639000734c5aea8583d9477af9a387f6d46 | [
"MIT"
] | null | null | null | '''
This is the class to create a scrolling background.
Because the background was so large, it was made to be a .jpg.
'''
import pygame, os
| 31.607143 | 96 | 0.638418 |
31e6ea9406db1015334a06a90ed69fe2df85ccfc | 1,705 | py | Python | src/python/squarepants/file_utils.py | ericzundel/mvn2pants | 59776864939515bc0cae28e1b89944ce55b98b21 | [
"Apache-2.0"
] | 8 | 2015-04-14T22:37:56.000Z | 2021-01-20T19:46:40.000Z | src/python/squarepants/file_utils.py | ericzundel/mvn2pants | 59776864939515bc0cae28e1b89944ce55b98b21 | [
"Apache-2.0"
] | 1 | 2016-01-13T23:19:14.000Z | 2016-01-22T22:47:48.000Z | src/python/squarepants/file_utils.py | ericzundel/mvn2pants | 59776864939515bc0cae28e1b89944ce55b98b21 | [
"Apache-2.0"
] | 3 | 2015-12-13T08:35:34.000Z | 2018-08-01T17:44:59.000Z | import os
import shutil
from contextlib import contextmanager
from tempfile import mkdtemp, mktemp
def file_pattern_exists_in_subdir(subdir, pattern):
"""Search for a file pattern recursively in a subdirectory
:param subdir: directory to search recursively
:param re.RegexObject pattern: compiled regular expression object from re.compile()
:return: True if a file with the named pattern exists in the subdirectory
:rtype: bool
"""
for (dirpath, dirnames, filenames) in os.walk(subdir):
for filename in filenames:
if pattern.match(filename):
return True
return False
def touch(fname, times=None, makedirs=False):
"""Creates the specified file at the named path (and optionally sets the time)."""
if makedirs:
directory = os.path.dirname(fname)
if not os.path.exists(directory):
os.makedirs(directory)
with open(fname, 'a'):
os.utime(fname, times)
| 26.230769 | 97 | 0.719062 |
31e76ef0dddf511d5e363ce2b9c0502413fbe8c1 | 1,366 | py | Python | docs/DSDC/miniprez/miniprez/continuous_integration.py | thoppe/Presentation_Topics | e9aba07e9ab087b44e6044c6082ba8e873a9b4fd | [
"MIT"
] | 2 | 2018-12-03T17:03:19.000Z | 2018-12-10T16:42:39.000Z | docs/DSDC/miniprez/miniprez/continuous_integration.py | thoppe/Presentation_Topics_in_NLP | e9aba07e9ab087b44e6044c6082ba8e873a9b4fd | [
"MIT"
] | 1 | 2019-02-19T15:12:19.000Z | 2019-02-19T15:12:19.000Z | docs/DSDC/miniprez/miniprez/continuous_integration.py | thoppe/Presentation_Topics_in_NLP | e9aba07e9ab087b44e6044c6082ba8e873a9b4fd | [
"MIT"
] | 1 | 2019-02-19T12:51:37.000Z | 2019-02-19T12:51:37.000Z | import asyncio
import os
from parser import miniprez_markdown, build_body
import logging
logger = logging.getLogger("miniprez")
def build_html(f_target):
"""
Build the html from the markdown.
"""
f_html_output = f_target.replace(".md", ".html")
logger.info(f"Building {f_target} to {f_html_output}")
with open(f_target) as FIN:
markdown = FIN.read()
html = miniprez_markdown(markdown)
soup = build_body(html)
with open(f_html_output, "w") as FOUT:
FOUT.write(soup.prettify())
| 24.836364 | 74 | 0.678624 |
31e9ecb8c0d331c1cbff40bfe30ea5db0aed7e97 | 3,943 | py | Python | rl_algorithms/dqn/linear.py | yonghangzhou/rl_algorithms | fe373bf77c9007e4e1d7134e1d610131125fa4b7 | [
"MIT"
] | 1 | 2020-11-12T07:48:49.000Z | 2020-11-12T07:48:49.000Z | rl_algorithms/dqn/linear.py | yonghangzhou/rl_algorithms | fe373bf77c9007e4e1d7134e1d610131125fa4b7 | [
"MIT"
] | null | null | null | rl_algorithms/dqn/linear.py | yonghangzhou/rl_algorithms | fe373bf77c9007e4e1d7134e1d610131125fa4b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Linear module for dqn algorithms
- Author: Kyunghwan Kim
- Contact: kh.kim@medipixel.io
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rl_algorithms.common.helper_functions import numpy2floattensor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
| 33.700855 | 87 | 0.673852 |
31ea1b716a1b8a3e2fc957132ac8497e9ccd0dcb | 10,826 | py | Python | 2015/day7/2015-day7-part2.py | matt-the-ogre/advent-of-code | 7188089d4db4a99fa09ef8366137fe28d1c28205 | [
"MIT"
] | 1 | 2021-12-03T18:17:54.000Z | 2021-12-03T18:17:54.000Z | 2015/day7/2015-day7-part2.py | matt-the-ogre/advent-of-code | 7188089d4db4a99fa09ef8366137fe28d1c28205 | [
"MIT"
] | null | null | null | 2015/day7/2015-day7-part2.py | matt-the-ogre/advent-of-code | 7188089d4db4a99fa09ef8366137fe28d1c28205 | [
"MIT"
] | null | null | null | # Advent of Code - 2015 - Day 7
# --- Day 7: Some Assembly Required ---
# This year, Santa brought little Bobby Tables a set of wires and bitwise logic gates! Unfortunately, little Bobby is a little under the recommended age range, and he needs help assembling the circuit.
# Each wire has an identifier (some lowercase letters) and can carry a 16-bit signal (a number from 0 to 65535). A signal is provided to each wire by a gate, another wire, or some specific value. Each wire can only get a signal from one source, but can provide its signal to multiple destinations. A gate provides no signal until all of its inputs have a signal.
# The included instructions booklet describes how to connect the parts together: x AND y -> z means to connect wires x and y to an AND gate, and then connect its output to wire z.
# For example:
# 123 -> x means that the signal 123 is provided to wire x.
# x AND y -> z means that the bitwise AND of wire x and wire y is provided to wire z.
# p LSHIFT 2 -> q means that the value from wire p is left-shifted by 2 and then provided to wire q.
# NOT e -> f means that the bitwise complement of the value from wire e is provided to wire f.
# Other possible gates include OR (bitwise OR) and RSHIFT (right-shift). If, for some reason, you'd like to emulate the circuit instead, almost all programming languages (for example, C, JavaScript, or Python) provide operators for these gates.
# For example, here is a simple circuit:
# 123 -> x
# 456 -> y
# x AND y -> d
# x OR y -> e
# x LSHIFT 2 -> f
# y RSHIFT 2 -> g
# NOT x -> h
# NOT y -> i
# After it is run, these are the signals on the wires:
# d: 72
# e: 507
# f: 492
# g: 114
# h: 65412
# i: 65079
# x: 123
# y: 456
# In little Bobby's kit's instructions booklet (provided as your puzzle input), what signal is ultimately provided to wire a?
import time, math
startTime = time.perf_counter() # time in seconds (float)
debug = False
timing = True
unitTesting = False
# maybe a dictionary again?
# circuitStrings = {"a" : {"input" : 1, "output" : NaN}}
# parse the input text file to set up the circuitStrings inputs, then just roll through the dictionary to calculate the outputs
# how will I be sure that the output has been calculated to be the input for the next circuitStrings?
# can I assume the input file is "in order"? Probably not.
# does this mean some sort of recursion algorithm?
# maybe if I populate the outputs with 'NaN' (or Python equivalent) then check that it's not that before using it's output
# I can make it recurse through the inputs, calculating any that have fully realized inputs?
circuitStrings = []
circuitDict = {}
# unit tests, kind of
if unitTesting:
print("Unit Testing")
circuitStrings = ["123 -> x","456 -> y", "x AND y -> d", "x OR y -> e", "x LSHIFT 2 -> f", "y RSHIFT 2 -> g", "NOT x -> h", "NOT y -> i"]
else:
# read the input text file into a variable called presents
with open("2015/day7/input-part2.txt","r") as inputString:
circuitStrings = inputString.readlines()
# remove newlines
for i in range(0, len(circuitStrings)):
circuitStrings[i] = circuitStrings[i].rstrip()
# parse the input to create the dictionary
createCircuitDict()
doConnection()
# show the circuits
if debug:
for circuit in circuitDict:
print(circuit,":",circuitDict[circuit])
if unitTesting:
testPass = False
testPassOutput = {"d": {"output" : 72}, "e": {"output" : 507}, "f": {"output" : 492}, "g": {"output" : 114}, "h": {"output" : 65412}, "i": {"output" : 65079}, "x": {"output" : 123}, "y": {"output" : 456}}
for wire in testPassOutput:
testPassWire = testPassOutput[wire]["output"]
circuitWire = circuitDict[wire]["output"]
if debug:
print("wire", wire, "test:", testPassWire, "calc:", circuitWire)
testPass = testPassWire == circuitWire
if testPass is False:
break
print("testPass:", testPass)
else:
print(circuitDict["a"]["output"])
# this answer for my input is 46065 (part 1), 14134 (part 2)
endTime = time.perf_counter() # time in seconds (float)
if timing:
print("Execution took ", endTime - startTime, " seconds.")
| 42.289063 | 362 | 0.608627 |