hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a29454fa75452cb28ef7fa8567271f51c49e623f | 3,132 | py | Python | tests/run_examples.py | theGreenJedi/neon | b85ba0fbbb0458d8a8599e5ead335959b10318c1 | [
"Apache-2.0"
] | null | null | null | tests/run_examples.py | theGreenJedi/neon | b85ba0fbbb0458d8a8599e5ead335959b10318c1 | [
"Apache-2.0"
] | 3 | 2021-06-08T23:56:39.000Z | 2022-03-12T00:56:34.000Z | tests/run_examples.py | theGreenJedi/neon | b85ba0fbbb0458d8a8599e5ead335959b10318c1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# this script runs all examples and checks that they all
# run without throwing an exception
from __future__ import print_function
import os
import sys
from glob import glob
import subprocess as subp
from datetime import timedelta
from timeit import default_timer as timer
# Modify the following to suit your environment
NUM_EPOCHS = 2
BACKEND = "gpu"
SUBSET_PCT = 1
TINY_SUBSET_PCT = .1
ADDITIONAL_ARGS = ""
BASE_DATA_DIR = '~/nervana/data'
# skip - not a training example
FILES_TO_SKIP = ['examples/deep_dream.py']
# skip - need to download dataset
FILES_TO_SKIP += ['examples/imdb/train.py', 'examples/whale_calls.py', 'examples/music_genres.py']
ADD_I1K_BATCH_DIR = ['examples/alexnet.py', 'examples/imagenet_allcnn.py',
'examples/vgg_bn.py', 'examples/i1k_msra.py']
ADD_CIFAR_BATCH_DIR = ['examples/cifar10_msra.py']
ADD_UCF101_BATCH_DIR = ['examples/video-c3d/train.py']
ADD_SUBSET_PCT = ADD_I1K_BATCH_DIR + ADD_UCF101_BATCH_DIR
ADD_TINY_SUBSET_PCT = ['examples/fast-rcnn/train.py', 'examples/vgg_bn.py']
# Jenkins environment setup
if os.getenv("EXECUTOR_NUMBER"):
BASE_DATA_DIR = '/usr/local/data/jenkins'
ADDITIONAL_ARGS += "-i {}".format(os.getenv("EXECUTOR_NUMBER"))
I1K_BATCH_DIR = os.path.join(BASE_DATA_DIR, 'I1K/macrobatches')
CIFAR_BATCH_DIR = os.path.join(BASE_DATA_DIR, 'CIFAR10/macrobatches')
UCF101_BATCH_DIR = os.path.join(BASE_DATA_DIR, 'UCF-101/ucf-preprocessed')
if not os.path.isdir('examples'):
raise IOError('Must run from root dir of none repo')
# check for venv activations
cmd = 'if [ -z "$VIRTUAL_ENV" ];then exit 1;else exit 0;fi'
if subp.call(cmd, shell=True) > 0:
raise IOError('Need to activate the virtualenv')
examples = glob('examples/*.py') + glob('examples/*/train.py')
skipped = []
results = []
for ex in sorted(examples):
if ex in FILES_TO_SKIP:
skipped.append(ex)
continue
cmdargs = "-e {} -b {} --serialize 1 -v --no_progress_bar -s {} {}".format(
NUM_EPOCHS, BACKEND, os.path.splitext(ex)[0] + '.prm',
ADDITIONAL_ARGS)
cmd = "python {} ".format(ex) + cmdargs
if ex in ADD_I1K_BATCH_DIR:
cmd += ' -w {}'.format(I1K_BATCH_DIR)
elif ex in ADD_CIFAR_BATCH_DIR:
cmd += ' -w {}'.format(CIFAR_BATCH_DIR)
elif ex in ADD_UCF101_BATCH_DIR:
cmd += ' -w {} -z 16'.format(UCF101_BATCH_DIR)
else:
cmd += ' -w {}'.format(BASE_DATA_DIR)
if ex in ADD_TINY_SUBSET_PCT:
cmd += ' --subset_pct {}'.format(TINY_SUBSET_PCT)
elif ex in ADD_SUBSET_PCT:
cmd += ' --subset_pct {}'.format(SUBSET_PCT)
start = timer()
rc = subp.call(cmd, shell=True)
end = timer()
results.append([ex, rc, end - start])
print('\nFound {} scripts:'.format(len(examples)))
for dat in results:
if dat[1] == 0:
print('SUCCESS on {} in {}'.format(dat[0], timedelta(seconds=int(dat[2]))))
for ex in skipped:
print('SKIPPED {}'.format(ex))
errors = 0
for dat in results:
if dat[1] != 0:
print('FAILURE on {}'.format(dat[0]))
errors += 1
print("\nExiting with %d errors" % errors)
sys.exit(errors)
| 31.959184 | 98 | 0.678799 |
a296a4a9d7aba1261750f949b22f9a0ca56bbbcf | 5,888 | py | Python | critical/rankorder.py | NECOTIS/CRITICAL | eba2dc9c90936f9cf51e04374081509be433ed10 | [
"BSD-3-Clause"
] | 1 | 2022-02-16T00:59:50.000Z | 2022-02-16T00:59:50.000Z | critical/rankorder.py | NECOTIS/CRITICAL | eba2dc9c90936f9cf51e04374081509be433ed10 | [
"BSD-3-Clause"
] | null | null | null | critical/rankorder.py | NECOTIS/CRITICAL | eba2dc9c90936f9cf51e04374081509be433ed10 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012-2018, NECOTIS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Authors: Simon Brodeur, Jean Rouat (advisor)
# Date: April 18th, 2019
# Organization: Groupe de recherche en Neurosciences Computationnelles et Traitement Intelligent des Signaux (NECOTIS),
# Universit de Sherbrooke, Canada
import logging
import numpy as np
import matplotlib.pyplot as plt
from brian2.units.stdunits import ms
from brian2.units.allunits import second
from matplotlib.lines import Line2D
logger = logging.getLogger(__name__)
| 34.432749 | 119 | 0.640625 |
a29750f0ca25c86bb147bca122dfcaad2818dc92 | 2,007 | py | Python | trac/wiki/tests/web_api.py | clubturbo/Trac-1.4.2 | 254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78 | [
"BSD-3-Clause"
] | null | null | null | trac/wiki/tests/web_api.py | clubturbo/Trac-1.4.2 | 254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78 | [
"BSD-3-Clause"
] | null | null | null | trac/wiki/tests/web_api.py | clubturbo/Trac-1.4.2 | 254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import textwrap
import unittest
from trac.mimeview.patch import PatchRenderer
from trac.test import EnvironmentStub, MockRequest
from trac.web.api import RequestDone
from trac.wiki.web_api import WikiRenderer
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 34.016949 | 77 | 0.620329 |
a2983711f38540e1e3b5409d4bc00bd0c00c0ae8 | 5,814 | py | Python | main.py | eyalnaor/DeepTemporalSR | 7d8c821431dec3a4c480550c61a6033fcac5e640 | [
"MIT"
] | 38 | 2020-09-04T10:53:50.000Z | 2021-08-29T13:10:41.000Z | main.py | eyalnaor/DeepTemporalSR | 7d8c821431dec3a4c480550c61a6033fcac5e640 | [
"MIT"
] | 1 | 2021-02-24T17:20:58.000Z | 2021-02-24T17:20:58.000Z | main.py | eyalnaor/DeepTemporalSR | 7d8c821431dec3a4c480550c61a6033fcac5e640 | [
"MIT"
] | 7 | 2020-12-03T12:11:49.000Z | 2021-08-16T14:43:28.000Z | import torch
import Network
import Network_res3d
from data_handler import *
import cProfile
import io
import pstats
parser = utils.create_parser()
args = parser.parse_args()
if __name__ == '__main__':
# open comment to allow profiling
# pr = cProfile.Profile()
# pr.enable()
# main()
# pr.disable()
# pr.print_stats(sort="cumtime")
# s = io.StringIO()
# ps = pstats.Stats(pr, stream=s).sort_stats('tottime')
# ps.print_stats()
# with open('profile.txt', 'w+') as f:
# f.write(s.getvalue())
main()
print('done.')
| 46.887097 | 171 | 0.626935 |
a298626351e920d8afa27758b5249b92283fda64 | 308 | py | Python | package_name/__init__.py | netserf/template-python-repo | f6a2612b0e2dfd766c1287abb6e17f13fca44b93 | [
"MIT"
] | null | null | null | package_name/__init__.py | netserf/template-python-repo | f6a2612b0e2dfd766c1287abb6e17f13fca44b93 | [
"MIT"
] | null | null | null | package_name/__init__.py | netserf/template-python-repo | f6a2612b0e2dfd766c1287abb6e17f13fca44b93 | [
"MIT"
] | null | null | null | # `name` is the name of the package as used for `pip install package`
name = "package-name"
# `path` is the name of the package for `import package`
path = name.lower().replace("-", "_").replace(" ", "_")
version = "0.1.0"
author = "Author Name"
author_email = ""
description = "" # summary
license = "MIT"
| 30.8 | 69 | 0.655844 |
a29a495e3f7946f01ad82f159c0ca13bc042ba05 | 1,826 | py | Python | signaling_trajectories.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | signaling_trajectories.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | signaling_trajectories.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | """Transform signaling data to smoothed trajectories."""
import sys
import numpy
import pandas as pd
import geopandas as gpd
import shapely.geometry
import matplotlib.patches
import matplotlib.pyplot as plt
import mobilib.voronoi
SAMPLING = pd.Timedelta('00:01:00')
STD = pd.Timedelta('00:05:00')
if __name__ == '__main__':
signals = pd.read_csv(sys.argv[1], sep=';')
signals = signals[signals['phone_nr'] == int(sys.argv[3])]
signals['pos_time'] = pd.to_datetime(signals['pos_time'])
timeweights = (1 / signals.groupby('pos_time')['phone_nr'].count()).reset_index().rename(columns={'phone_nr' : 'weight'})
signals = pd.merge(signals, timeweights, on='pos_time')
antennas = pd.read_csv(sys.argv[2], sep=';')
siglocs = pd.merge(signals, antennas, on='cell_name').groupby('pos_time').agg({
'xcent': 'mean',
'ycent': 'mean',
})
xpos, ypos, tpos = trajectory(siglocs, 'xcent', 'ycent', sampling=SAMPLING, std=STD)
plt.plot(xpos, ypos)
plt.scatter(antennas.xcent, antennas.ycent, s=9, color='orange')
plt.gca().set_aspect('equal')
plt.show()
pd.DataFrame({'x': xpos, 'y': ypos, 't': tpos}).to_csv(sys.argv[4], sep=';', index=False) | 33.2 | 125 | 0.661555 |
a29c18ce763ea0eb8b5497234efc7ee7fced0caa | 440 | py | Python | home/pi/_testing/logging-test.py | rc-bellergy/pxpi | e3d6d1d1a1f6d1fdf53341d314e7d549c8e84a68 | [
"MIT"
] | 26 | 2020-02-16T09:14:16.000Z | 2022-03-28T07:39:47.000Z | home/pi/_testing/logging-test.py | rc-bellergy/pxpi | e3d6d1d1a1f6d1fdf53341d314e7d549c8e84a68 | [
"MIT"
] | 1 | 2020-10-04T03:48:09.000Z | 2020-10-05T01:47:09.000Z | home/pi/_testing/logging-test.py | rc-bellergy/pxpi | e3d6d1d1a1f6d1fdf53341d314e7d549c8e84a68 | [
"MIT"
] | 7 | 2020-10-04T03:45:36.000Z | 2022-02-28T16:54:36.000Z | #!/usr/bin/env python
import logging
import logging.handlers
import os
# Logging to file
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(filename=dir_path + "/test.log", format='%(asctime)s - %(message)s', level=logging.INFO, filemode='w')
# Logging messages to the console
console = logging.StreamHandler()
logger = logging.getLogger()
logger.addHandler(console)
# Logging test
logging.info("** Testing **") | 25.882353 | 122 | 0.747727 |
a29c642613fdef33219868f8958a1851ae0b81aa | 1,556 | py | Python | test_client.py | ericjmartin/slackview | 28797ca06e13f5c9f97c1755e613c0e402ae0ea4 | [
"MIT"
] | null | null | null | test_client.py | ericjmartin/slackview | 28797ca06e13f5c9f97c1755e613c0e402ae0ea4 | [
"MIT"
] | null | null | null | test_client.py | ericjmartin/slackview | 28797ca06e13f5c9f97c1755e613c0e402ae0ea4 | [
"MIT"
] | null | null | null | import os
from slack_sdk.web import WebClient
from slack_sdk.socket_mode import SocketModeClient
# Initialize SocketModeClient with an app-level token + WebClient
client = SocketModeClient(
# This app-level token will be used only for establishing a connection
app_token=os.environ.get("SLACK_APP_TOKEN")
# You will be using this WebClient for performing Web API calls in listeners
web_client=WebClient(token=os.environ.get("SLACK_BOT_TOKEN")) # xoxb-111-222-xyz
)
from slack_sdk.socket_mode.response import SocketModeResponse
from slack_sdk.socket_mode.request import SocketModeRequest
# Add a new listener to receive messages from Slack
# You can add more listeners like this
client.socket_mode_request_listeners.append(process)
# Establish a WebSocket connection to the Socket Mode servers
client.connect()
# Just not to stop this process
from threading import Event
Event().wait() | 39.897436 | 85 | 0.720437 |
a29c710e6a5af2c146c941aed7a01353e7cc6f77 | 1,968 | py | Python | utils/misc.py | hengwei-chan/3D_SBDD | eda6d51aaf01ef25581a46920a25161678fab76d | [
"MIT"
] | 67 | 2021-12-02T05:53:44.000Z | 2022-03-31T07:21:26.000Z | utils/misc.py | hengwei-chan/3D_SBDD | eda6d51aaf01ef25581a46920a25161678fab76d | [
"MIT"
] | 13 | 2021-12-05T14:23:46.000Z | 2022-03-25T21:07:20.000Z | utils/misc.py | hengwei-chan/3D_SBDD | eda6d51aaf01ef25581a46920a25161678fab76d | [
"MIT"
] | 16 | 2022-01-11T11:48:24.000Z | 2022-03-27T19:20:58.000Z | import os
import time
import random
import logging
import torch
import numpy as np
import yaml
from easydict import EasyDict
from logging import Logger
from tqdm.auto import tqdm
| 24.911392 | 87 | 0.676829 |
a29d7acbfba5a243e1f4a49be6ce4cba089c4b1f | 2,928 | py | Python | tests/test_api.py | mattjm/iam-idbase | d96d1bada5adf4dbad9be212f1015e3d7399a63d | [
"Apache-2.0"
] | null | null | null | tests/test_api.py | mattjm/iam-idbase | d96d1bada5adf4dbad9be212f1015e3d7399a63d | [
"Apache-2.0"
] | null | null | null | tests/test_api.py | mattjm/iam-idbase | d96d1bada5adf4dbad9be212f1015e3d7399a63d | [
"Apache-2.0"
] | null | null | null | from idbase.api import RESTDispatch, LoginStatus
from idbase import exceptions
from django.http import HttpResponse
from mock import MagicMock
import pytest
import json
def test_rest_dispatch_run_get_basic(rest_dispatch, req):
response = rest_dispatch.run(req)
assert response.status_code == 200
assert response.content.decode() == json.dumps({'foo': 'bar'})
assert (response._headers['content-type'] ==
('Content-Type', 'application/json'))
rest_dispatch.GET.assert_called_once_with(req)
def test_rest_dispatch_run_http_response(rest_dispatch, req):
rest_dispatch.GET.side_effect = lambda x: HttpResponse(
content='hello world', status=503)
response = rest_dispatch.run(req)
assert response.status_code == 503
assert response.content.decode() == 'hello world'
def test_rest_dispatch_run_get_no_method(req):
rd = RESTDispatch()
response = rd.run(req)
assert response.status_code == 400
assert json.loads(response.content.decode()).get(
'error_message', None) is not None
def test_rest_dispatch_run_invalid_session(rest_dispatch, req):
rest_dispatch.GET.side_effect = exceptions.InvalidSessionError()
response = rest_dispatch.run(req)
assert response.status_code == 401
def test_rest_dispatch_run_not_found(rest_dispatch, req):
rest_dispatch.GET.side_effect = exceptions.NotFoundError()
response = rest_dispatch.run(req)
assert response.status_code == 404
def test_rest_dispatch_run_exception(rest_dispatch, req):
rest_dispatch.GET.side_effect = Exception()
response = rest_dispatch.run(req)
assert response.status_code == 500
def test_rest_dispatch_not_logged_in(rest_dispatch, req):
req.user.is_authenticated.return_value = False
response = rest_dispatch.run(req)
assert response.status_code == 401
def test_rest_dispatch_no_login_necessary(req):
req.user.is_authenticated.return_value = False
rest_dispatch = RESTDispatch(login_required=False)
rest_dispatch.GET = lambda x: {'foo': 'bar'}
response = rest_dispatch.run(req)
assert response.status_code == 200
assert json.loads(response.content.decode()) == {'foo': 'bar'}
def test_login_status_get(req):
req.user.netid = 'jo'
req.user.get_full_name.return_value = 'Jo Blo'
assert LoginStatus().GET(req) == {'netid': 'jo', 'name': 'Jo Blo'}
def test_login_status_no_auth(req):
req.user.is_authenticated.return_value = False
with pytest.raises(exceptions.InvalidSessionError):
LoginStatus().GET(req)
| 30.185567 | 70 | 0.730874 |
a2a205807fc1a9002dcff612423d88ef56c86c00 | 5,885 | py | Python | volatility/volatility/plugins/mac/adiummsgs.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | 2 | 2018-07-16T13:30:40.000Z | 2018-07-17T12:02:05.000Z | volatility/volatility/plugins/mac/adiummsgs.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | null | null | null | volatility/volatility/plugins/mac/adiummsgs.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | null | null | null | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import volatility.obj as obj
import volatility.plugins.mac.pstasks as pstasks
import volatility.plugins.mac.common as common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
| 35.884146 | 138 | 0.497026 |
a2a490d7174747d1795eadc9407c26effc4b112a | 14,165 | py | Python | mod_equations-master/mod_equations.py | userElaina/hg8 | 235dbeca3d58b94e1378ac4240ed8424791ae561 | [
"MIT"
] | null | null | null | mod_equations-master/mod_equations.py | userElaina/hg8 | 235dbeca3d58b94e1378ac4240ed8424791ae561 | [
"MIT"
] | null | null | null | mod_equations-master/mod_equations.py | userElaina/hg8 | 235dbeca3d58b94e1378ac4240ed8424791ae561 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Author @55-AA
19 July, 2016
'''
import copy
def gcd(a, b):
"""
Return the greatest common denominator of integers a and b.
gmpy2.gcd(a, b)
"""
while b:
a, b = b, a % b
return a
def egcd(a, b):
"""
ax + by = 1
ax 1 mod b
Return a 3-element tuple (g, x, y), the g = gcd(a, b)
gmpy2.gcdext(a, b)
"""
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def mod_inv(a, m):
"""
ax 1 mod m
gmpy2.invert(a, m)
"""
g, x, y = egcd(a, m)
assert g == 1
return x % m
def int2mem(x):
"""
0x12233 => '\x33\x22\x01'
"""
pad_even = lambda x : ('', '0')[len(x)%2] + x
x = list(pad_even(format(x, 'x')).decode('hex'))
x.reverse()
return ''.join(x)
def mem2int(x):
"""
'\x33\x22\x01' => 0x12233
"""
x = list(x)
x.reverse()
return int(''.join(x).encode('hex'), 16)
###########################################################
# class
###########################################################
###########################################################
# test
###########################################################
def print_array(x):
prn = "\t["
for j in x:
if j:
prn += "%3d, " % j
else:
prn += " 0, "
print prn[:-2]+"],"
def print_matrix(x):
print "["
for i in x:
print_array(i)
print "]"
def random_test(times):
import random
for i in xrange(times):
print "\n============== random test %d ==============\n" % i
mod = random.randint(5, 999)
col = random.randint(2, 30)
row = random.randint(2, 30)
solution = map(lambda x : random.randint(0, mod - 1), [xc for xc in xrange(col)])
matrix = []
for y in xrange(row):
array = map(lambda x : random.randint(0, mod), [xc for xc in xrange(col)])
t = 0
for j in map(lambda x,y:0 if None == y else x*y, array, solution):
t += j
array.append(t % mod)
matrix.append(array)
run_test(mod, solution, matrix)
def DSA_comK():
"""
# DSAkx
# pL bitsL645121024
# qp - 1160bits
# gg = h^((p-1)/q) mod phh < p - 1, h^((p-1)/q) mod p > 1
# xx < qx
# yy = g^x mod p ( p, q, g, y )
# r = ( g^k mod p ) mod q
# s = ( k^(-1) (HASH(m) + xr)) mod q
# ( m, r, s )
"""
import hashlib
p = 0x8c286991e30fd5341b7832ce9fe869c0a73cf79303c2959ab677d980237abf7ecf853015c9a086c4330252043525a4fa60c64397421caa290225d6bc6ec6b122cd1da4bba1b13f51daca8b210156a28a0c3dbf17a7826f738fdfa87b22d7df990908c13dbd0a1709bbbab5f816ddba6c8166ef5696414538f6780fdce987552b
g = 0x49874582cd9af51d6f554c8fae68588c383272c357878d7f4079c6edcda3bcbf1f2cbada3f7d541a5b1ae7f046199f8f51d72db60a2601bd3375a3b48d7a3c9a0c0e4e8a0680f7fb98a8610f042e10340d2453d3c811088e48c5d6dd834eaa5509daeb430bcd9de8aabc239d698a655004e3f0a2ee456ffe9331c5f32c66f90d
q = 0x843437e860962d85d17d6ee4dd2c43bc4aec07a5
m1 = 0x3132333435363738
r1 = 0x4d91a491d95e4eef4196a583cd282ca0e625f36d
s1 = 0x3639b47678abf7545397fc9a1af108537fd1dfac
m2 = 0x49276c6c206265206261636b2e
r2 = 0x4d91a491d95e4eef4196a583cd282ca0e625f36d
s2 = 0x314c044409a94f4961340212b42ade005fb27b0a
# M1 = mem2int(hashlib.sha1(int2mem(m1)).digest())
M1 = int(hashlib.sha1('3132333435363738'.decode('hex')).hexdigest(), 16)
# M2 = mem2int(hashlib.sha1(int2mem(m2)).digest())
M2 = int(hashlib.sha1('49276c6c206265206261636b2e'.decode("hex")).hexdigest(), 16)
matrix_c = [
[0x3639b47678abf7545397fc9a1af108537fd1dfac, -0x4d91a491d95e4eef4196a583cd282ca0e625f36d, M1],
[0x314c044409a94f4961340212b42ade005fb27b0a, -0x4d91a491d95e4eef4196a583cd282ca0e625f36d, M2]
]
print "mod = %d" % (q)
print "matrix ="
print_matrix(matrix_c)
Gauss = GaussMatrix(matrix_c, q)
ret = Gauss.gauss()
if not ret:
print "error:"
print_matrix(Gauss.d)
print "error_str:", Gauss.error_str
else:
k = ret[0][0]
x = ret[0][1]
print "k: %x" % (k)
print "x: %x" % (x)
print Gauss.verify_solution(ret[0])
exit(0)
# if __name__ == "__main__":
# DSA_comK()
# static_test()
# static_test_ex()
# random_test(1)
# exit(0)
con=[26,28,38,39,40,50,52,79,80,81,91,103,105,115,116,117]
for kk in xrange(144):
if kk in con:
continue
for k in xrange(kk):
if k in con:
continue
matrix=[list(map(int,i.split())) for i in open('n2.txt','r').read().splitlines()]
_p=list()
_p.append(int(k))
if k//12>=1:
_p.append(int(k-12))
if k//12<=10:
_p.append(int(k+12))
if k//12>=2:
_p.append(int(k-24))
if k//12<=9:
_p.append(int(k+24))
if k%12>=1:
_p.append(int(k-1))
if k%12<=10:
_p.append(int(k+1))
if k%12>=2:
_p.append(int(k-2))
if k%12<=9:
_p.append(int(k+2))
_p.append(int(kk))
if kk//12>=1:
_p.append(int(kk-12))
if kk//12<=10:
_p.append(int(kk+12))
if kk//12>=2:
_p.append(int(kk-24))
if kk//12<=9:
_p.append(int(kk+24))
if kk%12>=1:
_p.append(int(kk-1))
if kk%12<=10:
_p.append(int(kk+1))
if kk%12>=2:
_p.append(int(kk-2))
if kk%12<=9:
_p.append(int(kk+2))
for i in sorted(set(_p))[::-1]:
matrix.pop(i)
qwq(matrix)
# input()
| 27.34556 | 266 | 0.454712 |
a2a4ec18f82420451b7a78afd24b5244e8356daf | 451 | py | Python | main.py | attakei/lantis-web-radio | febf5fe156da4bd60ef9d1d09fe57a62c435a380 | [
"MIT"
] | null | null | null | main.py | attakei/lantis-web-radio | febf5fe156da4bd60ef9d1d09fe57a62c435a380 | [
"MIT"
] | null | null | null | main.py | attakei/lantis-web-radio | febf5fe156da4bd60ef9d1d09fe57a62c435a380 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf8 -*-
import sys
import argparse
from lantis.webradio.commands import bind_subparsers
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
bind_subparsers(subparsers)
if __name__ == '__main__':
ret = main()
sys.exit(ret)
| 18.791667 | 52 | 0.687361 |
a2a5a42b6d09e31e44930e2b97a11e3ac6f3bf02 | 6,304 | py | Python | vnpy/app/portfolio_strategy/strategies/daily_amplitude_2_days_volitility_strategy.py | franklili3/vnpy | 4d710553302eb3587e4acb2ff8ce151660fb9c17 | [
"MIT"
] | null | null | null | vnpy/app/portfolio_strategy/strategies/daily_amplitude_2_days_volitility_strategy.py | franklili3/vnpy | 4d710553302eb3587e4acb2ff8ce151660fb9c17 | [
"MIT"
] | null | null | null | vnpy/app/portfolio_strategy/strategies/daily_amplitude_2_days_volitility_strategy.py | franklili3/vnpy | 4d710553302eb3587e4acb2ff8ce151660fb9c17 | [
"MIT"
] | null | null | null | from typing import List, Dict
from datetime import datetime
import numpy as np
from vnpy.app.portfolio_strategy import StrategyTemplate, StrategyEngine
from vnpy.trader.utility import BarGenerator, ArrayManager
from vnpy.trader.object import BarData
from vnpy.trader.constant import Interval
| 32 | 92 | 0.584391 |
a2a878b865e7dd158c1f4d9b527b4dc267ffa7f3 | 7,065 | py | Python | old_game/hotmaps.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 53 | 2015-07-03T21:25:36.000Z | 2022-02-18T23:08:38.000Z | old_game/hotmaps.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 5 | 2015-07-03T21:27:12.000Z | 2016-12-08T14:40:38.000Z | old_game/hotmaps.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 14 | 2016-02-02T06:49:51.000Z | 2022-02-24T13:24:35.000Z | # Pathfinding algorithm.
import pygame
import random
if __name__=='__main__':
import timeit
from . import maps
import random
import pygame
myscene = maps.Scene( 100 , 100 )
for x in range( 5, myscene.width ):
for y in range( 5, myscene.height ):
if random.randint(1,3) == 1:
myscene.map[x][y].wall = maps.BASIC_WALL
myset = set()
myset.add( (23,23) )
t1 = timeit.Timer( OldWay( myscene ) )
t2 = timeit.Timer( NewWay( myscene ) )
print(t1.timeit(100))
print(t2.timeit(100))
| 34.802956 | 165 | 0.501062 |
a2a927903851fa866273d2e9c394ad0c65d802fb | 960 | py | Python | upload_menu.py | jaypee-f/webhook | 4fc8e47c6dd7fd3c90b4db076bfd075ffdd44054 | [
"MIT"
] | null | null | null | upload_menu.py | jaypee-f/webhook | 4fc8e47c6dd7fd3c90b4db076bfd075ffdd44054 | [
"MIT"
] | null | null | null | upload_menu.py | jaypee-f/webhook | 4fc8e47c6dd7fd3c90b4db076bfd075ffdd44054 | [
"MIT"
] | null | null | null | import json
import jsonpickle
from pprint import pprint
prods = Object()
prods.accountId="5c76ae99c6489f0001bc6b0a"
prods.locationId="5db938536d49b300017efcc3"
prods.products=[]
prods.categories=[]
with open ('pl.json', 'r') as f:
products_dict = json.load(f)
for item in products_dict["models"]:
prod = Object()
prod.productType=1
prod.plu=item["id"]
prod.price=item["price"]
prod.posProductId=item["id"]
prod.name=item["name"]
prod.posProductCategoryId=item["parentId"]
prod.imageUrl=""
prod.description=item["description"]
prod.deliveryTax=20000
prod.takeawayTax=20000
prods.products.append(prod)
with open ('cat.json', 'r') as f:
category_dict = json.load(f)
for item in category_dict["models"]:
cat = Object()
cat.name=item["name"]
cat.posCategoryId=item["id"]
cat.imageUrl:""
prods.categories.append(cat)
print(jsonpickle.dumps(prods))
| 20.869565 | 46 | 0.691667 |
a2a95220c05c2685607d88d70a06cedd80129fc1 | 2,489 | py | Python | CareerTinderServer/CareerTinder/migrations/0002_auto_20160918_0011.py | sarojaerabelli/HVGS | 86ec3d2de496540ca439c40f4a0c58c47aa181cf | [
"MIT"
] | 1 | 2016-09-18T16:40:27.000Z | 2016-09-18T16:40:27.000Z | CareerTinderServer/CareerTinder/migrations/0002_auto_20160918_0011.py | sarojaerabelli/HVGS | 86ec3d2de496540ca439c40f4a0c58c47aa181cf | [
"MIT"
] | null | null | null | CareerTinderServer/CareerTinder/migrations/0002_auto_20160918_0011.py | sarojaerabelli/HVGS | 86ec3d2de496540ca439c40f4a0c58c47aa181cf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-18 04:11
from __future__ import unicode_literals
import CareerTinder.listfield
from django.db import migrations, models
| 30.728395 | 143 | 0.546002 |
a2aa47ea240a66801a3fa533dadd5d9026710eb3 | 4,259 | py | Python | cadence/activity_loop.py | mfateev/cadence-python | f8e6e2eb3a010dcd1df76a2e4e59afbb8c11f1db | [
"MIT"
] | null | null | null | cadence/activity_loop.py | mfateev/cadence-python | f8e6e2eb3a010dcd1df76a2e4e59afbb8c11f1db | [
"MIT"
] | null | null | null | cadence/activity_loop.py | mfateev/cadence-python | f8e6e2eb3a010dcd1df76a2e4e59afbb8c11f1db | [
"MIT"
] | null | null | null | import datetime
import logging
import json
from cadence.activity import ActivityContext
from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, \
RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest
from cadence.conversions import json_to_args
from cadence.workflowservice import WorkflowService
from cadence.worker import Worker
logger = logging.getLogger(__name__)
| 48.954023 | 120 | 0.630195 |
a2aacc4cece05e2f823b764750ce6c88673d5b7a | 3,666 | py | Python | support/fetch_validators_load.py | sonofmom/ton-zabbix-scripts | b43471d058873c5ba78a92fa79d334380df5f6fc | [
"MIT"
] | null | null | null | support/fetch_validators_load.py | sonofmom/ton-zabbix-scripts | b43471d058873c5ba78a92fa79d334380df5f6fc | [
"MIT"
] | null | null | null | support/fetch_validators_load.py | sonofmom/ton-zabbix-scripts | b43471d058873c5ba78a92fa79d334380df5f6fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import argparse
import datetime
import time
import requests
import Libraries.arguments as ar
import Classes.AppConfig as AppConfig
import Classes.LiteClient as LiteClient
import Classes.TonNetwork as TonNetwork
import json
if __name__ == '__main__':
run()
| 39.847826 | 139 | 0.642117 |
a2aacc4feda333eaff912d30b183a58db7aa86b3 | 8,324 | py | Python | packnet_sfm/loggers/wandb_logger.py | asmith9455/packnet-sfm | 60a034ac42d2e72314d002b27efcdfc769dbc3fc | [
"MIT"
] | 982 | 2020-02-27T02:48:29.000Z | 2022-03-31T12:33:50.000Z | packnet_sfm/loggers/wandb_logger.py | asmith9455/packnet-sfm | 60a034ac42d2e72314d002b27efcdfc769dbc3fc | [
"MIT"
] | 205 | 2020-03-24T06:44:30.000Z | 2022-03-30T09:13:14.000Z | packnet_sfm/loggers/wandb_logger.py | asmith9455/packnet-sfm | 60a034ac42d2e72314d002b27efcdfc769dbc3fc | [
"MIT"
] | 253 | 2020-01-25T16:14:45.000Z | 2022-03-30T05:57:40.000Z | # Copyright 2020 Toyota Research Institute. All rights reserved.
# Adapted from Pytorch-Lightning
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/loggers/wandb.py
from argparse import Namespace
from collections import OrderedDict
import numpy as np
import torch.nn as nn
import wandb
from wandb.wandb_run import Run
from packnet_sfm.utils.depth import viz_inv_depth
from packnet_sfm.utils.logging import prepare_dataset_prefix
from packnet_sfm.utils.types import is_dict, is_tensor
def log_metrics(self, metrics):
"""Logs training metrics."""
self._metrics.update(metrics)
if 'global_step' in metrics:
self.experiment.log(self._metrics)
self._metrics.clear()
def log_images(self, func, mode, batch, output,
args, dataset, world_size, config):
"""
Adds images to metrics for later logging.
Parameters
----------
func : Function
Function used to process the image before logging
mode : str {"train", "val"}
Training stage where the images come from (serve as prefix for logging)
batch : dict
Data batch
output : dict
Model output
args : tuple
Step arguments
dataset : CfgNode
Dataset configuration
world_size : int
Number of GPUs, used to get logging samples at consistent intervals
config : CfgNode
Model configuration
"""
dataset_idx = 0 if len(args) == 1 else args[1]
prefix = prepare_dataset_prefix(config, dataset_idx)
interval = len(dataset[dataset_idx]) // world_size // config.num_logs
if args[0] % interval == 0:
prefix_idx = '{}-{}-{}'.format(mode, prefix, batch['idx'][0].item())
func(prefix_idx, batch, output)
# Log depth images
def log_depth(self, *args, **kwargs):
"""Helper function used to log images relevant for depth estimation"""
self.log_images(log, *args, **kwargs)
def log_rgb(key, prefix, batch, i=0):
"""
Converts an RGB image from a batch for logging
Parameters
----------
key : str
Key from data containing the image
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the image
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
rgb = batch[key] if is_dict(batch) else batch
return prep_image(prefix, key,
rgb[i])
def log_depth(key, prefix, batch, i=0):
"""
Converts a depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
depth = batch[key] if is_dict(batch) else batch
inv_depth = 1. / depth[i]
inv_depth[depth[i] == 0] = 0
return prep_image(prefix, key,
viz_inv_depth(inv_depth, filter_zeros=True))
def log_inv_depth(key, prefix, batch, i=0):
"""
Converts an inverse depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the inverse depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the inverse depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
inv_depth = batch[key] if is_dict(batch) else batch
return prep_image(prefix, key,
viz_inv_depth(inv_depth[i]))
def prep_image(prefix, key, image):
"""
Prepare image for wandb logging
Parameters
----------
prefix : str
Prefix added to the key for logging
key : str
Key from data containing the inverse depth map
image : torch.Tensor [3,H,W]
Image to be logged
Returns
-------
output : dict
Dictionary with key and value for logging
"""
if is_tensor(image):
image = image.detach().permute(1, 2, 0).cpu().numpy()
prefix_key = '{}-{}'.format(prefix, key)
return {prefix_key: wandb.Image(image, caption=key)}
| 30.379562 | 102 | 0.608602 |
a2ab5037304159997115ed0a2b381a23c81c1548 | 5,781 | py | Python | source_code/trans.py | shinyfe74/EN_KOR_translator | 910e6924b2b7b27a6e111ae35554cbff7e39ac20 | [
"MIT"
] | null | null | null | source_code/trans.py | shinyfe74/EN_KOR_translator | 910e6924b2b7b27a6e111ae35554cbff7e39ac20 | [
"MIT"
] | null | null | null | source_code/trans.py | shinyfe74/EN_KOR_translator | 910e6924b2b7b27a6e111ae35554cbff7e39ac20 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
import numpy as np
from PIL import ImageGrab
from PIL import Image
from pytesseract import *
import re
import cv2
from googletrans import Translator as google_translator
from pypapago import Translator as papago_translator
from kakaotrans import Translator as kakao_translator
pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe'
form = Tk()
form.geometry("300x250")
form.title(" ")
#
Blue = (255, 0, 0)
Green = (0, 255, 0)
Red = (0, 0, 255)
White = (255, 255, 255)
Black = (0, 0, 0)
point1 = (0, 0)
point2 = (0, 0)
click1 = False
translator_combo_Label = Label(form, text="-------------------- ---------------------")
translator_combo_Label_Var = StringVar()
translator_combo = ttk.Combobox(form, width=10, textvariable=translator_combo_Label_Var)
translator_combo['values'] = ('', '','')
translator_combo.set("")
translator_combo.current(0)
btn_trans = Button(form, text=" ",
command=resultform, width=30)
btn_end = Button(form, text=" ", command=form.destroy, width=30)
btn_trans.grid(row=0, columnspan=3, padx=30, pady=20)
btn_end.grid(row=1, columnspan=3, padx=30, pady=5)
translator_combo_Label.grid(row=4, columnspan=5)
translator_combo.grid(row=5, column=1, padx=5)
Manual_Label = Label(
form, text="F2 / ESC or ")
Manual_Label.grid(row=6, columnspan=3, padx=30, pady=10)
Maker_Label = Label(
form, text="--------- : tobeptcoder------------")
Maker_Label.grid(row=7, columnspan=3, padx=30, pady=5)
Email_Label = Label(
form, text="------------tobeptcoder@gmail.com------------")
Email_Label.grid(row=8, columnspan=3, padx=30, pady=5)
form.mainloop() | 28.477833 | 95 | 0.59488 |
a2ab67ac5edaa66589f9eee8088e666122ba3bce | 7,869 | py | Python | src/data/data_processing.py | ChrisPedder/Medieval_Manuscripts | 40bfcf9c273385cfd8aa66e63b2fb80078fef33b | [
"MIT"
] | null | null | null | src/data/data_processing.py | ChrisPedder/Medieval_Manuscripts | 40bfcf9c273385cfd8aa66e63b2fb80078fef33b | [
"MIT"
] | 5 | 2020-12-28T15:28:35.000Z | 2022-02-10T03:26:44.000Z | src/data/data_processing.py | ChrisPedder/Medieval_Manuscripts | 40bfcf9c273385cfd8aa66e63b2fb80078fef33b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 17:20:06 2018
@author: chrispedder
A routine to crop sections from the images of different manuscripts in the two
datasets to the same size, and with the same magnification, so that the average
script size doesn't create a feature that the neural networks can learn.
Reading the data description of the CLaMM dataset, we find that the images
are 150mm*100mm, so we need to take similar-sized crops from our new target
data. Looking at the bar on the left, we find that 6000px =(341-47) = 294mm
So 1mm = 20.41px. We therefore need to crop 3062 * 2041px from the original
However, to not give away too much, we need to make this crop a little
random. Looking at the test images, 1) Their heights vary by around 100px
AFTER downsampling, so around 170px BEFORE downsampling. 2) Their widths
vary by proportionately less, around 65px AFTER, so 110px BEFORE.
We define a crop function below which achieves precisely this.
To run this routine, call something like `python -m src.data.data_processing
--thow_input_path data/raw/MS157/ --thow_output_path data/external/thow_out
--clamm_input_path data/raw/ICDAR2017_CLaMM_Training/
--clamm_output_path data/external/clamm_out`
The four command line args given here are all required.
"""
import numpy as np
import scipy.io
import random
import scipy.ndimage
import glob
import os
import argparse
from PIL import Image
from random import randint
from typing import List
# helper function to clean up file list for scraped THoW filenames
if __name__ == '__main__':
args = parse_args()
processor = ImageProcessor(args)
processor.process_all_files()
| 38.385366 | 80 | 0.648367 |
a2ac4d61989a683d4c9f7b828fb2128fcf9a33a2 | 7,934 | py | Python | ivy/container/gradients.py | Aarsh2001/ivy | 827164d7d31bd08c5287bbd1ac9ccce588b733bc | [
"Apache-2.0"
] | null | null | null | ivy/container/gradients.py | Aarsh2001/ivy | 827164d7d31bd08c5287bbd1ac9ccce588b733bc | [
"Apache-2.0"
] | null | null | null | ivy/container/gradients.py | Aarsh2001/ivy | 827164d7d31bd08c5287bbd1ac9ccce588b733bc | [
"Apache-2.0"
] | null | null | null | from typing import Optional, Union, List, Dict
# local
import ivy
from ivy.container.base import ContainerBase
# noinspection PyMissingConstructor
| 25.511254 | 70 | 0.515755 |
a2adbf90bc22cca044acdd78bea2c9355ce557e4 | 2,848 | py | Python | desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | from __future__ import with_statement
from contextlib import contextmanager
from test import TemplateTest, eq_, raises, template_base, mock
import os
from mako.cmd import cmdline
| 39.013699 | 78 | 0.581812 |
a2b000534f69d5e5c990ba8c2baa88de9b69fc99 | 1,920 | py | Python | corefacility/core/models/module.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/models/module.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/models/module.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | import uuid
from django.db import models
| 60 | 117 | 0.621354 |
a2b0828f0ce39bb552f2d2231688d2adacf5b85e | 1,986 | py | Python | sphinx-sources/Examples/Commands/LensFresnel_Convert.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 132 | 2017-03-15T15:28:46.000Z | 2022-03-09T00:28:25.000Z | sphinx-sources/Examples/Commands/LensFresnel_Convert.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 63 | 2017-01-26T15:46:55.000Z | 2022-01-25T04:50:59.000Z | sphinx-sources/Examples/Commands/LensFresnel_Convert.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 37 | 2017-02-17T16:11:38.000Z | 2022-01-25T18:03:47.000Z | from LightPipes import *
import matplotlib.pyplot as plt
TheExample(100) #100 x 100 grid
TheExample(1000) #1000 x 1000 grid
| 29.641791 | 95 | 0.606244 |
a2b0a81878fe2e8c89e23970f4f8db084dca00c6 | 598 | py | Python | delira/models/backends/chainer/__init__.py | gedoensmax/delira | 545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6 | [
"BSD-2-Clause"
] | 1 | 2019-10-03T21:00:20.000Z | 2019-10-03T21:00:20.000Z | delira/models/backends/chainer/__init__.py | gedoensmax/delira | 545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6 | [
"BSD-2-Clause"
] | null | null | null | delira/models/backends/chainer/__init__.py | gedoensmax/delira | 545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6 | [
"BSD-2-Clause"
] | null | null | null | from delira import get_backends as _get_backends
if "CHAINER" in _get_backends():
from delira.models.backends.chainer.abstract_network import \
AbstractChainerNetwork
from delira.models.backends.chainer.data_parallel import \
DataParallelChainerNetwork
from delira.models.backends.chainer.data_parallel import \
DataParallelChainerOptimizer
from delira.models.backends.chainer.data_parallel import \
ParallelOptimizerUpdateModelParameters
from delira.models.backends.chainer.data_parallel import \
ParallelOptimizerCumulateGradientsHook
| 42.714286 | 65 | 0.789298 |
a2b0f8e387546b569d1ef99efb66f95e406a0935 | 1,071 | py | Python | src/deepproblog/examples/Forth/Add/data/extract.py | vossenwout/gtadeepproblog | 65509b740518af422b96e84ef10716e0ac246e75 | [
"Apache-2.0"
] | 54 | 2021-06-23T08:03:23.000Z | 2022-03-10T01:02:43.000Z | src/deepproblog/examples/Forth/Add/data/extract.py | Damzwan/deepproblog | 56bcf5208e79c17510b5d288068fabc6cd64f3cf | [
"Apache-2.0"
] | 2 | 2021-06-30T23:48:25.000Z | 2022-03-18T10:45:05.000Z | src/deepproblog/examples/Forth/Add/data/extract.py | Damzwan/deepproblog | 56bcf5208e79c17510b5d288068fabc6cd64f3cf | [
"Apache-2.0"
] | 12 | 2021-06-30T10:47:52.000Z | 2022-03-09T23:51:48.000Z | import re
for train in [2, 4, 8]:
for test in [8, 64]:
for mode in ["train", "test", "dev"]:
with open("train{}_test{}/{}.txt".format(train, test, mode)) as f:
text = f.read()
matches = re.findall("\[([0-9 ]*)\]\t\[([0-9 ]*) (\d) (\d*)\]", text)
text = list()
for match in matches:
res = match[0].strip().split(" ")
digits = match[1].strip().split(" ")
carry = [match[2]]
length = int(match[3])
digit1 = list()
digit2 = list()
for i in range(0, len(digits), 2):
digit1.append(digits[i])
digit2.append(digits[i + 1])
text.append(
"add([{}],[{}],{},[{}]).".format(
*[",".join(l) for l in [digit1, digit2, carry, res]]
)
)
with open("train{}_test{}_{}.txt".format(train, test, mode), "w") as f:
f.write("\n".join(text))
| 38.25 | 83 | 0.388422 |
a2b26dec93877fc20d8f5328e080c0557abecb6c | 16,519 | py | Python | app/location/crawler.py | maro99/yapen | 0de7aa9d4b152aadd18511be6e536e89645452d9 | [
"MIT"
] | 1 | 2019-04-28T12:21:51.000Z | 2019-04-28T12:21:51.000Z | app/location/crawler.py | maro99/yapen | 0de7aa9d4b152aadd18511be6e536e89645452d9 | [
"MIT"
] | 5 | 2018-07-30T05:44:44.000Z | 2020-06-05T18:56:41.000Z | app/location/crawler.py | maro99/yapen | 0de7aa9d4b152aadd18511be6e536e89645452d9 | [
"MIT"
] | 5 | 2018-07-23T05:21:41.000Z | 2018-08-08T05:00:42.000Z | import re
import requests
from bs4 import BeautifulSoup
import time
from urllib import parse
from selenium import webdriver
from location.models import Pension, RoomImage, PensionImage, Room, Location, SubLocation
# string, 100,00 0 int
# 5 .
# location_name_list
| 36.225877 | 140 | 0.603971 |
a2b2ffb533dae5272cd3fbc1cefbb22e54b5762b | 1,181 | py | Python | 14-semparsing/ucca/scripts/find_constructions.py | ariasjose/nn4nlp-code | 7327ea3e93161afbc8c008e287b646daa802be4d | [
"Apache-2.0"
] | null | null | null | 14-semparsing/ucca/scripts/find_constructions.py | ariasjose/nn4nlp-code | 7327ea3e93161afbc8c008e287b646daa802be4d | [
"Apache-2.0"
] | null | null | null | 14-semparsing/ucca/scripts/find_constructions.py | ariasjose/nn4nlp-code | 7327ea3e93161afbc8c008e287b646daa802be4d | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
from ucca import constructions
from ucca.ioutil import read_files_and_dirs
if __name__ == "__main__":
argparser = ArgumentParser(description="Extract linguistic constructions from UCCA corpus.")
argparser.add_argument("passages", nargs="+", help="the corpus, given as xml/pickle file names")
constructions.add_argument(argparser, False)
argparser.add_argument("-v", "--verbose", action="store_true", help="print tagged text for each passage")
args = argparser.parse_args()
for passage in read_files_and_dirs(args.passages):
if args.verbose:
print("%s:" % passage.ID)
extracted = constructions.extract_edges(passage, constructions=args.constructions, verbose=args.verbose)
if any(extracted.values()):
if not args.verbose:
print("%s:" % passage.ID)
for construction, edges in extracted.items():
if edges:
print(" %s:" % construction.description)
for edge in edges:
print(" %s [%s %s]" % (edge, edge.tag, edge.child))
print()
| 47.24 | 113 | 0.624047 |
a2b4cc002608cb98fc1f6000c06a7afefddd34dc | 3,870 | py | Python | multicache/__init__.py | bargulg/SimpleCache | 52f6fd18381e9ccb21194b83288d631d6e2cf28e | [
"MIT"
] | 1 | 2017-02-21T14:46:45.000Z | 2017-02-21T14:46:45.000Z | multicache/__init__.py | bargulg/multicache | 52f6fd18381e9ccb21194b83288d631d6e2cf28e | [
"MIT"
] | null | null | null | multicache/__init__.py | bargulg/multicache | 52f6fd18381e9ccb21194b83288d631d6e2cf28e | [
"MIT"
] | null | null | null | import hashlib
import os
import pickle
import tempfile
import zlib
from threading import Lock
from time import time
from multicache.base import BaseCache
try:
from multicache.redis import RedisCache
except ImportError:
pass
lock = Lock()
| 27.062937 | 74 | 0.509044 |
a2b509c564ad0b2601ed7a285ba7c94de901b242 | 754 | py | Python | 01_numbers.py | fernandobd42/Introduction_Python | 7a656df1341bda4e657baa146c28b98bef211fc6 | [
"OLDAP-2.5",
"Python-2.0",
"OLDAP-2.4",
"OLDAP-2.3"
] | 1 | 2016-10-02T00:51:43.000Z | 2016-10-02T00:51:43.000Z | 01_numbers.py | fernandobd42/Introduction_Python | 7a656df1341bda4e657baa146c28b98bef211fc6 | [
"OLDAP-2.5",
"Python-2.0",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null | 01_numbers.py | fernandobd42/Introduction_Python | 7a656df1341bda4e657baa146c28b98bef211fc6 | [
"OLDAP-2.5",
"Python-2.0",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null | x = 1 #x recebe 1
#print ir printar(mostrar) o valor desejado;
print(x) # resultado: 1
print(x + 4) # possvel somar uma varivel com um nmero, desde que a varivel tenha um valor definido - resultado: 5
print(2 * 2) #um asterisco usado para multiplicar - resultado: 4
print(3 ** 3) #dois asterisco usado para elevar a potncia - resultado: 27
print(5 / 2) #diviso com uma barra usado para retornar tipo flutuante - resultado = 2.5
print(5 // 2) #diviso com duas barras usado para retorna tipo inteiro - resultado = 2;
print(5 % 2) #modulo usado para retornar o resto da diviso - resultado = 1;
#OBS: o uso de '=' atribui um valor a uma varivel, j o uso de '==' compara os valores;
y = 1 #1 atribudo para y;
y == 1 #y igual a 1?
| 53.857143 | 119 | 0.706897 |
a2b518c79f5318969c38eb1d484323f66909f1f2 | 3,648 | py | Python | schematizer/models/redshift_sql_entities.py | Yelp/schematizer | 035845d27945a05db475f00eb76f59e8825dbaa4 | [
"Apache-2.0"
] | 86 | 2016-11-17T17:39:13.000Z | 2021-06-01T15:19:05.000Z | schematizer/models/redshift_sql_entities.py | tomzhang/schematizer | 035845d27945a05db475f00eb76f59e8825dbaa4 | [
"Apache-2.0"
] | 2 | 2016-12-01T20:57:43.000Z | 2021-09-28T09:26:25.000Z | schematizer/models/redshift_sql_entities.py | tomzhang/schematizer | 035845d27945a05db475f00eb76f59e8825dbaa4 | [
"Apache-2.0"
] | 26 | 2016-11-29T22:38:11.000Z | 2021-03-02T19:44:17.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains the internal data structure to hold the information
of redshift SQL schemas.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from schematizer.models.sql_entities import SQLColumn
from schematizer.models.sql_entities import SQLTable
| 33.46789 | 76 | 0.625 |
a2b5bf13bb08e8ae97991098e42fc0fd73145597 | 50,707 | py | Python | modules/templates/WACOP/config.py | mswdresden/AsylumEden | a68ee08f9f7031974ec12ec327d00c5d975a740a | [
"MIT"
] | 1 | 2017-07-22T18:49:34.000Z | 2017-07-22T18:49:34.000Z | modules/templates/WACOP/config.py | mswdresden/AsylumEden | a68ee08f9f7031974ec12ec327d00c5d975a740a | [
"MIT"
] | null | null | null | modules/templates/WACOP/config.py | mswdresden/AsylumEden | a68ee08f9f7031974ec12ec327d00c5d975a740a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template for WA-COP + CAD Cloud Integration
"""
T = current.T
# =========================================================================
# System Settings
#
settings.base.system_name = T("Sahana: Washington Common Operating Picture (WA-COP)")
settings.base.system_name_short = T("Sahana")
# Prepop default
settings.base.prepopulate += ("WACOP", "default/users", "WACOP/Demo")
# Theme (folder to use for views/layout.html)
settings.base.theme = "WACOP"
settings.ui.social_buttons = True
# -------------------------------------------------------------------------
# Self-Registration and User Profile
#
# Users can self-register
settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users need to be approved
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -------------------------------------------------------------------------
# Security Policy
#
settings.security.policy = 7 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Espaol"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%b %d %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
settings.msg.require_international_phone_numbers = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Has scalability issues, but should be OK with our number of records
settings.search.dates_auto_range = True
# -------------------------------------------------------------------------
# GIS settings
#
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("US",)
# Levels for the LocationSelector
levels = ("L1", "L2", "L3")
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Until we add support to S3LocationSelector to set dropdowns from LatLons
settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "mcop"
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
settings.gis.location_represent_address_only = "icon"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# -------------------------------------------------------------------------
# Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
#("translate", Storage(
# name_nice = "Translation Functionality",
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = "Events",
restricted = True,
module_type = 2,
)),
("fire", Storage(
name_nice = "Fire",
restricted = True,
module_type = None,
)),
("police", Storage(
name_nice = "Police",
restricted = True,
module_type = None,
)),
("project", Storage(
name_nice = "Tasks",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
])
# -------------------------------------------------------------------------
# CMS Content Management
#
settings.cms.bookmarks = True
settings.cms.richtext = True
settings.cms.show_tags = True
# -------------------------------------------------------------------------
def cms_post_onaccept(form):
"""
Handle Tags in Create / Update forms
"""
post_id = form.vars.id
db = current.db
s3db = current.s3db
ttable = s3db.cms_tag
ltable = s3db.cms_tag_post
# Delete all existing tags for this post
db(ltable.post_id == post_id).delete()
# Add these tags
tags = current.request.post_vars.get("tags")
if not tags:
return
tags = tags.split(",")
tag_ids = db(ttable.name.belongs(tags)).select(ttable.id,
ttable.name).as_dict(key="name")
for tag in tags:
row = tag_ids.get("tag")
if row:
tag_id = row.get("id")
else:
tag_id = ttable.insert(name=tag)
ltable.insert(post_id = post_id,
tag_id = tag_id,
)
# -------------------------------------------------------------------------
settings.customise_cms_post_resource = customise_cms_post_resource
# -------------------------------------------------------------------------
# Event/Incident Management
#
settings.event.incident_teams_tab = "Units"
# Uncomment to preserve linked Incidents when an Event is deleted
settings.event.cascade_delete_incidents = False
# -------------------------------------------------------------------------
settings.customise_event_event_resource = customise_event_event_resource
# -------------------------------------------------------------------------
settings.customise_event_event_controller = customise_event_event_controller
# -------------------------------------------------------------------------
settings.customise_event_incident_resource = customise_event_incident_resource
# -------------------------------------------------------------------------
settings.customise_event_incident_controller = customise_event_incident_controller
# -------------------------------------------------------------------------
settings.customise_event_human_resource_resource = customise_event_human_resource_resource
# -------------------------------------------------------------------------
settings.customise_event_organisation_resource = customise_event_organisation_resource
# -------------------------------------------------------------------------
settings.customise_event_team_resource = customise_event_team_resource
# -------------------------------------------------------------------------
settings.customise_pr_group_resource = customise_pr_group_resource
# -------------------------------------------------------------------------
settings.customise_pr_person_controller = customise_pr_person_controller
# -------------------------------------------------------------------------
settings.customise_project_task_resource = customise_project_task_resource
# =============================================================================
def wacop_event_rheader(r, tabs=[]):
""" EVENT custom resource headers """
if r.representation != "html":
# Resource headers only used in interactive views
return None
from s3 import s3_rheader_resource, S3ResourceHeader
tablename, record = s3_rheader_resource(r)
if tablename != r.tablename:
resource = current.s3db.resource(tablename, id=record.id)
else:
resource = r.resource
rheader = None
rheader_fields = []
if record:
T = current.T
if tablename == "event_event":
if not tabs:
tabs = [(T("Event Details"), None),
(T("Incidents"), "incident"),
(T("Units"), "group"),
(T("Tasks"), "task"),
(T("Updates"), "post"),
]
rheader_fields = [["name"],
["start_date"],
["comments"],
]
elif tablename == "event_incident":
if not tabs:
tabs = [(T("Incident Details"), None),
(T("Units"), "group"),
(T("Tasks"), "task"),
(T("Updates"), "post"),
]
rheader_fields = [["name"],
["date"],
["comments"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
table=resource.table,
record=record,
)
return rheader
# END =========================================================================
| 42.080498 | 142 | 0.428264 |
a2b8296c9037d221e852aad4ef00a8219c5bd0cc | 1,185 | py | Python | main.py | guysoft/kivy-external-storage-permission | a1eefedcabab2e82af948362271a21b4a8b89b56 | [
"MIT"
] | 1 | 2020-04-07T15:13:12.000Z | 2020-04-07T15:13:12.000Z | main.py | guysoft/kivy-external-storage-permission | a1eefedcabab2e82af948362271a21b4a8b89b56 | [
"MIT"
] | null | null | null | main.py | guysoft/kivy-external-storage-permission | a1eefedcabab2e82af948362271a21b4a8b89b56 | [
"MIT"
] | null | null | null | import kivy
from kivy.app import App
from kivy.uix.button import Button
import android
import os
import time
from android.permissions import Permission, request_permission, check_permission
from kivy.clock import Clock
if __name__ == '__main__':
MyApp().run()
| 25.76087 | 113 | 0.672574 |
a2bc3a31cc03d9c0efb26d0509eb49d178d88baf | 401 | py | Python | my_oop/oop05.py | xxwqlee/pylearn | 6eb9ad61bc68b3d0ca0b093f9876df3105efd98e | [
"Apache-2.0"
] | 1 | 2019-03-14T05:04:02.000Z | 2019-03-14T05:04:02.000Z | my_oop/oop05.py | xxwqlee/pylearn | 6eb9ad61bc68b3d0ca0b093f9876df3105efd98e | [
"Apache-2.0"
] | null | null | null | my_oop/oop05.py | xxwqlee/pylearn | 6eb9ad61bc68b3d0ca0b093f9876df3105efd98e | [
"Apache-2.0"
] | null | null | null | """
"""
a = A()
b = B()
a.a_say()
b.a_say()
print("*" * 50)
b.b_say() #
print("*" * 50)
B().b_say()
| 13.366667 | 44 | 0.506234 |
a2bc5af81309d6409de30031673fb2592880c8d4 | 8,852 | py | Python | python/tests/sbp/utils.py | zk20/libsbp | 041c5055f5db422258ebb3ce3f8e9f6e5d3e5fa9 | [
"MIT"
] | null | null | null | python/tests/sbp/utils.py | zk20/libsbp | 041c5055f5db422258ebb3ce3f8e9f6e5d3e5fa9 | [
"MIT"
] | null | null | null | python/tests/sbp/utils.py | zk20/libsbp | 041c5055f5db422258ebb3ce3f8e9f6e5d3e5fa9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: https://support.swiftnav.com
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Utilities for running YAML-defined unit tests.
"""
import base64
import os.path
import json
import sys
import unittest
import yaml
from sbp.msg import SBP
from sbp.table import dispatch, _SBP_TABLE
HERE = os.path.dirname(__file__)
PYTHON_ROOT = os.path.join(HERE, "..", "..")
SPEC_ROOT = os.path.join(PYTHON_ROOT, "..", "spec", "yaml", "swiftnav", "sbp")
_SPECS = {}
def flatten_array(a):
"""Return a mapping from a yaml array of mappings."""
return dict((next(iter(item.keys())), item[next(iter(item.keys()))]) for item in a)
def load_msg_field_classes(msg):
"""Return a mapping of msg field names to custom classes."""
# any_case is only available on Python 3.6+
try:
from any_case import to_snake_case
except ImportError:
return {}
module_name = msg.__class__.__module__
msg_name = msg.__class__.__name__
if module_name not in _SPECS:
sbp_module_name = module_name.rsplit(".", 1)[-1]
module_filename = os.path.join(SPEC_ROOT, sbp_module_name + ".yaml")
if not os.path.exists(module_filename):
raise RuntimeError(module_filename, msg)
with open(module_filename) as f:
_SPECS[module_name] = yaml.load(f.read(), Loader=yaml.FullLoader)
definitions = flatten_array(_SPECS[module_name]["definitions"])
msg_key = to_snake_case(msg_name).upper()
obj_fields = flatten_array(definitions[msg_key]["fields"])
field_classes = {}
for field_name, field in obj_fields.items():
if field["type"] in definitions:
mod = sys.modules[module_name]
cls = getattr(mod, field["type"])
field_classes[field_name] = cls
return field_classes
def _encoded_string(s):
"""Encode the string-like argument as bytes if suitable"""
return s.encode('ascii') if hasattr(s, 'encode') else s
def _assert_unsorted_equal(a, b):
"""
Perform unittest.TestCase.assertCountEqual.
"""
# pytest does not have a similar feature
# https://github.com/pytest-dev/pytest/issues/5548
# This is intentionally inside the function so that it is not collected as a test class
case = UnitTestCase()
case.assertCountEqual(a, b)
def _assert_sbp(sbp, test_case):
"""
Assert that a proper SBP parsing from a raw package of data.
Parameters
----------
sbp : :class: `SBP`
SBP message parsed from unit test's raw packet.
test_case : dict
Unit test case parsed from YAML.
"""
assert sbp.crc == int(test_case['crc'], 0), "Invalid crc."
assert sbp.msg_type == int(test_case['msg_type'], 0), "Invalid msg_type."
assert sbp.sender == int(test_case['sender'], 0), "Invalid sender."
assert sbp.length == test_case['length'], "Invalid length."
assert base64.standard_b64encode(sbp.payload) == _encoded_string(test_case['payload']), \
"Invalid payload."
def deep_encode(e, encoding='ascii'):
"""
Encodes all strings using encoding, default ascii.
"""
if isinstance(e, dict):
return dict((i, deep_encode(j, encoding)) for (i, j) in e.items())
elif isinstance(e, list):
return [deep_encode(i, encoding) for i in e]
elif isinstance(e, str):
e = e.encode(encoding)
return e
def field_eq(p, e):
"""
Checks the field values of a parsed message for equality against
some ground truth value.
Parameters
----------
p : object with dict-like attributed access
Parsed field contents.
e : object with dict-like attributed access
Expected field contents.
Returns
----------
True if fields are equal, else False.
"""
if isinstance(e, dict):
return all(field_eq(p[i], j) for (i, j) in iter(e.items()))
elif isinstance(e, list):
return all(field_eq(p[i], j) for (i, j) in enumerate(e))
elif isinstance(e, str) and isinstance(p, bytes) and p.endswith(b'\x00'):
e = e.encode('ascii')
return p == e
def _assert_msg(msg, test_case):
"""
Asserts that the parsed payload of an SBP message has the expected
field values.
Parameters
----------
msg : Parsed SBP message.
Parsed SBP message.
test_case : dict
Unit test case for this message.
"""
assert msg.__class__.__name__ == test_case['name'], (
"test case name {} loaded class name {}".format(test_case['name'], msg.__class__.__name__))
if test_case['fields']:
for field_name, field_value in test_case['fields'].items():
assert field_eq(getattr(msg, field_name), _encoded_string(field_value)), \
"Unequal field values (name: %s): got %r, but expected %r!" \
% (field_name, getattr(msg, field_name), field_value)
def _assert_msg_roundtrip(msg, raw_packet):
"""
Asserts that a msg gets serialized back into binary with the
expected value.
Parameters
----------
msg : Parsed SBP message.
Parsed SBP message.
raw_packet : dict
Unit test case for this message.
"""
encoding = base64.standard_b64encode(msg.to_binary())
assert encoding == _encoded_string(raw_packet)
def _assert_msg_roundtrip_json(msg, raw_json):
"""
Asserts that a msg gets serialized back into JSON with the
expected value, as well as gets serialized from JSON into
an expected object.
"""
to_json = json.loads(msg.to_json())
from_json = json.loads(raw_json)
assert sorted(to_json.items()) == sorted(from_json.items())
assert msg == msg.from_json(raw_json)
def _assert_materialization(msg, sbp, raw_json):
"""Asserts that a message materialized will get serialized into the
right JSON object.
"""
fields = msg['fields'] or dict()
live_msg = _SBP_TABLE[sbp.msg_type](sbp, **fields)
assert isinstance(live_msg.to_json_dict(), dict)
assert live_msg.to_json_dict() == json.loads(raw_json)
fields = deep_encode(fields)
live_msg = _SBP_TABLE[sbp.msg_type](sbp=None, **fields)
assert isinstance(live_msg.to_json_dict(), dict)
assert sorted(live_msg.to_json_dict().keys()) == sorted(live_msg.to_json_dict().keys())
_assert_unsorted_equal(live_msg.to_json_dict(), live_msg.to_json_dict())
assert msg['module']
assert msg['name']
# Locate the classes for any fields that use one from the same
# module as the test case
if not fields:
return
field_class_map = load_msg_field_classes(live_msg)
if not field_class_map:
return
# Instantiate fields as classes and then instantiate msg using those objects
member_fields = {}
for name, value in fields.items():
if name in field_class_map:
assert isinstance(value, dict)
member_fields[name] = field_class_map[name](sbp=None, **value)
else:
member_fields[name] = value
live_msg = _SBP_TABLE[sbp.msg_type](sbp=None, **member_fields)
_assert_unsorted_equal(live_msg.to_json_dict(), json.loads(raw_json))
def _assert_sane_package(pkg_name, pkg):
"""
Sanity check the package collection of tests before actually
running the tests.
Parameters
----------
pkg_name : str
Name of package to test
pkg : dict
Parsed contents of YAML file.
"""
assert len(pkg['tests']) > 0, "Package has no tests!"
def load_test_package(test_filename):
"""
Runs unit tests for message bindings by reading a YAML unit test
specification, parsing a raw packet for each test, and then
asserting that SBP messages and parsed payloads have their intended
values.
Parameters
----------
test_filename : str
Filepath to unit test specifications
pkg_name : str
Name of package to test
"""
pkg_name = os.path.basename(test_filename)
with open(test_filename, 'r') as f:
pkg = yaml.load(f.read(), Loader=yaml.FullLoader)
try:
_assert_sane_package(pkg_name, pkg)
except Exception as e:
raise RuntimeError("Loading {} failed: {}".format(test_filename, e))
return pkg
| 32.189091 | 95 | 0.703344 |
a2bd09228744e69177dc7286c70d7e20bc69a6fd | 2,453 | py | Python | train_arg_parser.py | DaringDane/Image-Classifier | 6e6a835bd72453c1ee9c5b57cf4959fc9011971b | [
"MIT"
] | null | null | null | train_arg_parser.py | DaringDane/Image-Classifier | 6e6a835bd72453c1ee9c5b57cf4959fc9011971b | [
"MIT"
] | null | null | null | train_arg_parser.py | DaringDane/Image-Classifier | 6e6a835bd72453c1ee9c5b57cf4959fc9011971b | [
"MIT"
] | null | null | null | import argparse
'''
Example commands for the command line:
- Select directory to save checkpoints in: python train.py data_directory --save_dir save_directory
- Select training architecture: python train.py data_directory --arch "densenet121"
- Set hyperparameters: python train.py data_directory --learning_rate 0.005 --hidden_units 2048 --epochs 8
- Use GPU for training: python train.py data_directory --gpu
'''
| 62.897436 | 185 | 0.721565 |
a2bd0fd34368e4604144c29b0f69a07f59c44be6 | 12,878 | py | Python | ckanext-hdx_org_group/ckanext/hdx_org_group/tests/test_controller/test_member_controller.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | ckanext-hdx_org_group/ckanext/hdx_org_group/tests/test_controller/test_member_controller.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | ckanext-hdx_org_group/ckanext/hdx_org_group/tests/test_controller/test_member_controller.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | '''
Created on Jun 23, 2015
@author: alexandru-m-g
'''
import logging
import mock
import ckan.model as model
import ckan.common as common
import ckan.lib.helpers as h
import ckan.lib.mailer as mailer
import ckanext.hdx_users.controllers.mailer as hdx_mailer
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
import ckanext.hdx_theme.tests.mock_helper as mock_helper
import ckanext.hdx_org_group.controllers.member_controller as member_controller
import ckanext.hdx_org_group.tests as org_group_base
c = common.c
log = logging.getLogger(__name__)
q = None
sort = None
c_dict = None
invited_user = None
# def test_members_invite(self):
#
# original_send_invite = mailer.send_invite
#
# def mock_send_invite(user):
# global invited_user
# invited_user = user
#
# mailer.send_invite = mock_send_invite
#
# context = {
# 'model': model, 'session': model.Session, 'user': 'testsysadmin'}
# url = h.url_for(
# controller='ckanext.hdx_org_group.controllers.member_controller:HDXOrgMemberController',
# action='member_new',
# id='hdx-test-org'
# )
# self.app.post(url, params={'email': 'hdxtestuser123@test.test', 'role': 'editor'},
# extra_environ={"REMOTE_USER": "testsysadmin"})
# org = self._get_action('organization_show')(context, {'id': 'hdx-test-org'})
#
# new_member = next((user for user in org['users'] if 'hdxtestuser123' in user['name']), None)
# assert new_member, 'Invited user needs to be a member of the org'
# assert new_member['capacity'] == 'editor', 'Invited user needs to be an editor'
#
# mailer.send_invite = original_send_invite
#
# @mock.patch('ckanext.hdx_theme.helpers.helpers.c')
# @mock.patch('ckanext.hdx_org_group.helpers.organization_helper.c')
# @mock.patch('ckanext.hdx_org_group.controllers.member_controller.c')
# def test_bulk_members_invite(self, member_c, org_helper_c, theme_c):
# test_username = 'testsysadmin'
# mock_helper.populate_mock_as_c(member_c, test_username)
# mock_helper.populate_mock_as_c(org_helper_c, test_username)
# mock_helper.populate_mock_as_c(theme_c, test_username)
# original_send_invite = mailer.send_invite
#
# def mock_send_invite(user):
# global invited_user
# invited_user = user
#
# mailer.send_invite = mock_send_invite
# context = {'model': model, 'session': model.Session, 'user': test_username}
#
# # removing one member from organization
# url = h.url_for(
# controller='ckanext.hdx_org_group.controllers.member_controller:HDXOrgMemberController',
# action='member_delete',
# id='hdx-test-org'
# )
# self.app.post(url, params={'user': 'johndoe1'}, extra_environ={"REMOTE_USER": test_username})
#
# org = self._get_action('organization_show')(context, {'id': 'hdx-test-org'})
# user_controller = MockedHDXOrgMemberController()
# user_controller.members('hdx-test-org')
# user_list = self._populate_member_names(c_dict['members'], org['users'])
# deleted_length = len(user_list)
# assert 'John Doe1' not in user_list
#
# # bulk adding members
# url = h.url_for(
# controller='ckanext.hdx_org_group.controllers.member_controller:HDXOrgMemberController',
# action='bulk_member_new',
# id='hdx-test-org'
# )
#
# self.app.post(url, params={'emails': 'janedoe3,johndoe1,dan@k.ro', 'role': 'editor'},
# extra_environ={"REMOTE_USER": test_username})
# org = self._get_action('organization_show')(context, {'id': 'hdx-test-org'})
#
# assert len(org['users']) == deleted_length + 2, 'Number of members should have increased by 2'
# new_member = next((user for user in org['users'] if 'johndoe1' in user['name']), None)
# assert new_member, 'Invited user needs to be a member of the org'
# assert new_member['capacity'] == 'editor', 'Invited user needs to be an editor'
#
# # making john doe1 a member back
# self.app.post(url, params={'emails': 'johndoe1', 'role': 'member'},
# extra_environ={"REMOTE_USER": test_username})
# org = self._get_action('organization_show')(context, {'id': 'hdx-test-org'})
# new_member = next((user for user in org['users'] if 'johndoe1' in user['name']), None)
# assert new_member, 'Invited user needs to be a member of the org'
# assert new_member['capacity'] == 'member', 'Invited user needs to be an member'
#
# mailer.send_invite = original_send_invite
| 43.802721 | 115 | 0.656701 |
a2bd228991060d0a29b89ddd1eb606ca0ff8fed6 | 1,044 | py | Python | bulletin/factories.py | ralphqq/ph-earthquake-dashboard | b9a599e92844b13fd1f7e3f54e087ec0ab6bc53a | [
"MIT"
] | null | null | null | bulletin/factories.py | ralphqq/ph-earthquake-dashboard | b9a599e92844b13fd1f7e3f54e087ec0ab6bc53a | [
"MIT"
] | 7 | 2020-06-05T20:14:42.000Z | 2022-03-02T15:00:30.000Z | bulletin/factories.py | ralphqq/ph-earthquake-dashboard | b9a599e92844b13fd1f7e3f54e087ec0ab6bc53a | [
"MIT"
] | null | null | null | from datetime import timedelta
import random
from django.utils import timezone
import factory
| 23.2 | 70 | 0.573755 |
a2beade77d575c19dad94b1f0e0efaa28bdb3efa | 792 | py | Python | figure2/Initialization.py | QianLab/Soft_MOCU | 516ab0c9fffcde0542576c5c9b20132880ea2dc1 | [
"MIT"
] | 1 | 2021-02-24T19:33:32.000Z | 2021-02-24T19:33:32.000Z | figure2/Initialization.py | QianLab/Soft_MOCU | 516ab0c9fffcde0542576c5c9b20132880ea2dc1 | [
"MIT"
] | null | null | null | figure2/Initialization.py | QianLab/Soft_MOCU | 516ab0c9fffcde0542576c5c9b20132880ea2dc1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
py_eq_z = None
classnum = 2
multiclass = False | 26.4 | 87 | 0.597222 |
a2bef8254bccb013d26eb0c1c08e9ae8163682c2 | 14,153 | py | Python | plex_export_watched_history.py | chazlarson/plex-watched-tools | ef3e34e733ec9555353d695ced582395bdc73480 | [
"MIT"
] | null | null | null | plex_export_watched_history.py | chazlarson/plex-watched-tools | ef3e34e733ec9555353d695ced582395bdc73480 | [
"MIT"
] | null | null | null | plex_export_watched_history.py | chazlarson/plex-watched-tools | ef3e34e733ec9555353d695ced582395bdc73480 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python3 -m pip install --force -U --user PlexAPI
"""
Metadata to be handled:
* Audiobooks
* Playlists -- https://github.com/pkkid/python-plexapi/issues/551
"""
import copy
import json
import time
import logging
import collections
from urllib.parse import urlparse
import plexapi
import plexapi.video
import plexapi.myplex
import plexapi.server
import plexapi.library
import plexapi.exceptions
PLEX_URL = ""
PLEX_TOKEN = ""
WATCHED_HISTORY = ""
LOG_FILE = ""
BATCH_SIZE = 10000
PLEX_REQUESTS_SLEEP = 0
CHECK_USERS = [
]
LOG_FORMAT = \
"[%(name)s][%(process)05d][%(asctime)s][%(levelname)-8s][%(funcName)-15s]" \
" %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG_LEVEL = logging.INFO
plexapi.server.TIMEOUT = 3600
plexapi.server.X_PLEX_CONTAINER_SIZE = 2500
_SHOW_RATING_KEY_GUID_MAPPING = {}
_MOVIE_RATING_KEY_GUID_MAPPING = {}
_EPISODE_RATING_KEY_GUID_MAPPING = {}
logger = logging.getLogger("PlexWatchedHistoryExporter")
SHOW_HISTORY = {
'guid': "",
'title': "",
'watched': False,
'userRating': "",
'episodes': collections.defaultdict(lambda: copy.deepcopy(EPISODE_HISTORY))
}
MOVIE_HISTORY = {
'guid': "",
'title': "",
'watched': False,
'viewCount': 0,
'viewOffset': 0,
'userRating': ""
}
EPISODE_HISTORY = {
'guid': "",
'title': "",
'watched': False,
'viewCount': 0,
'viewOffset': 0,
'userRating': ""
}
def _get_movie_section_watched_history(section, movie_history):
movies_watched_history = _batch_get(section, BATCH_SIZE)
for movie in movies_watched_history:
movie_guid = _get_guid(_MOVIE_RATING_KEY_GUID_MAPPING, movie)
# TODO: Check if reload is necessary
# movie.reload(checkFiles=False)
if urlparse(movie_guid).scheme != 'plex':
continue
if movie.isWatched:
logger.debug(f"Fully Watched Movie: {movie.title} [{movie_guid}]")
movie_history[movie_guid].update({
'guid': _cast(str, movie_guid),
'title': _cast(str, movie.title),
'watched': _cast(bool, movie.isWatched),
'viewCount': _cast(int, movie.viewCount),
'viewOffset': _cast(int, movie.viewOffset),
'userRating': _cast(str, movie.userRating)
})
else:
logger.debug(f"Partially Watched Movie: {movie.title} [{movie_guid}]")
existing_watched = movie_history[movie_guid]['watched']
# Prefer fully watched over partially watched entries
# TODO: Check for userRating & viewOffset too, however this shouldn't ever be
# different since Plex tracks the item via the GUID across libraries/sections
if existing_watched:
continue
movie_history[movie_guid].update({
'guid': _cast(str, movie_guid),
'title': _cast(str, movie.title),
'watched': _cast(bool, movie.isWatched),
'viewCount': _cast(int, movie.viewCount),
'viewOffset': _cast(int, movie.viewOffset),
'userRating': _cast(str, movie.userRating)
})
if __name__ == "__main__":
main()
| 33.458629 | 106 | 0.637603 |
a2c10a54ceee9affb03ce15e17008ad6f880f4e9 | 414 | py | Python | src/models/product.py | superxuu/fastapi_pony_2 | 297ef01cc009a40af891593018565fe5b06b4ee8 | [
"MIT"
] | 2 | 2020-06-17T09:53:13.000Z | 2020-10-23T18:20:13.000Z | src/models/product.py | superxuu/fastapi_pony_2 | 297ef01cc009a40af891593018565fe5b06b4ee8 | [
"MIT"
] | null | null | null | src/models/product.py | superxuu/fastapi_pony_2 | 297ef01cc009a40af891593018565fe5b06b4ee8 | [
"MIT"
] | null | null | null | from datetime import datetime
from decimal import Decimal
from src.models import db, Required, Optional
| 25.875 | 71 | 0.729469 |
a2c13ed5fd70470e8dadf6ddcecfc8a4c03d41b3 | 32,397 | py | Python | tgthemer/themer.py | eskilop/TgThemer-py | 3ebb7d1c3c78c32754cee82aa92a6c97ac18f27f | [
"MIT"
] | 1 | 2020-05-12T21:33:56.000Z | 2020-05-12T21:33:56.000Z | tgthemer/themer.py | eskilop/TgThemer-py | 3ebb7d1c3c78c32754cee82aa92a6c97ac18f27f | [
"MIT"
] | null | null | null | tgthemer/themer.py | eskilop/TgThemer-py | 3ebb7d1c3c78c32754cee82aa92a6c97ac18f27f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from .color import Color
import shutil
import os
std_none = "#FF000000"
| 52.763844 | 96 | 0.619131 |
a2c331cfd9f663070b5e40ecc3ae373845f2e7c4 | 662 | py | Python | plotly/validators/layout/xaxis/_constraintoward.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/layout/xaxis/_constraintoward.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/layout/xaxis/_constraintoward.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
| 26.48 | 70 | 0.560423 |
a2c3c1fae06adac5e17ba36e3e5bcfafc2b96e97 | 4,057 | py | Python | openmdao/core/test/test_group_derivatives.py | jcchin/project_clippy | ed38e11a96848a81c024c5a0e5821bc5db04fdc7 | [
"Apache-2.0"
] | null | null | null | openmdao/core/test/test_group_derivatives.py | jcchin/project_clippy | ed38e11a96848a81c024c5a0e5821bc5db04fdc7 | [
"Apache-2.0"
] | null | null | null | openmdao/core/test/test_group_derivatives.py | jcchin/project_clippy | ed38e11a96848a81c024c5a0e5821bc5db04fdc7 | [
"Apache-2.0"
] | null | null | null | """ Testing group-level finite difference. """
import unittest
import numpy as np
from openmdao.components.param_comp import ParamComp
from openmdao.core.component import Component
from openmdao.core.group import Group
from openmdao.core.problem import Problem
from openmdao.test.converge_diverge import ConvergeDivergeGroups
from openmdao.test.simple_comps import SimpleCompDerivMatVec
from openmdao.test.util import assert_rel_error
if __name__ == "__main__":
unittest.main()
| 32.456 | 90 | 0.562731 |
a2c3dab8b5c5b5fea5c21366ad80b3c046f70f38 | 2,235 | py | Python | rsbroker/core/user.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | rsbroker/core/user.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | rsbroker/core/user.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import ujson
from rsbroker.core.upstream import RTCWebSocketClient
| 25.988372 | 82 | 0.521253 |
a2c3e3fd647b669204ee60f34d14ceb1b5e30f77 | 12,756 | py | Python | src/constraint_solver.py | khairulislam/phys | fc702520fcd3b23022b9253e7d94f878978b4500 | [
"MIT"
] | null | null | null | src/constraint_solver.py | khairulislam/phys | fc702520fcd3b23022b9253e7d94f878978b4500 | [
"MIT"
] | null | null | null | src/constraint_solver.py | khairulislam/phys | fc702520fcd3b23022b9253e7d94f878978b4500 | [
"MIT"
] | null | null | null | from pgm.pgmplayer import PGMPlayer
import cps_constraints as con
from operator import itemgetter
import uuid
import os
| 43.835052 | 93 | 0.409768 |
a2c43ed1aafc32a3c7c2532f7e7717a9aecd874b | 1,901 | py | Python | cpu_ver/funkyyak/tests/test_util.py | bigaidream-projects/drmad | a4bb6010595d956f29c5a42a095bab76a60b29eb | [
"MIT"
] | 119 | 2016-02-24T17:20:50.000Z | 2021-05-28T21:35:16.000Z | cpu_ver/funkyyak/tests/test_util.py | LinZichuan/drmad | a4bb6010595d956f29c5a42a095bab76a60b29eb | [
"MIT"
] | 8 | 2016-02-25T03:13:38.000Z | 2017-09-15T00:54:52.000Z | cpu_ver/funkyyak/tests/test_util.py | LinZichuan/drmad | a4bb6010595d956f29c5a42a095bab76a60b29eb | [
"MIT"
] | 31 | 2016-03-10T04:57:11.000Z | 2021-05-02T01:00:04.000Z | import numpy as np
import itertools as it
from funkyyak import grad
from copy import copy
| 33.350877 | 88 | 0.602315 |
a2c46503127480012c6964e3561a31835e11fb15 | 2,772 | py | Python | game_main.py | smarTHh2019/melody_path_finder | 79cf0108afa12dd18be099d2c8c6291be992ff0d | [
"MIT"
] | null | null | null | game_main.py | smarTHh2019/melody_path_finder | 79cf0108afa12dd18be099d2c8c6291be992ff0d | [
"MIT"
] | null | null | null | game_main.py | smarTHh2019/melody_path_finder | 79cf0108afa12dd18be099d2c8c6291be992ff0d | [
"MIT"
] | null | null | null | import time
import random
import pygame
import pygame.midi
import numpy as np
from typing import Tuple
__author__ = "Thomas Heller"
AV_SIZE = 20
WIN_X = 30 * AV_SIZE
WIN_Y = 30 * AV_SIZE
DIFF_MAX = np.sqrt(WIN_X**2 + WIN_Y**2)
if __name__=="__main__":
main() | 31.862069 | 121 | 0.636364 |
a2c538a523c20cc3cd74501dba0bcc96fa5757c1 | 4,806 | py | Python | lib/preproc.py | ayshrv/paperpile-notion | 0fe73aee1e6bfcf3105b9a417182736a285ec797 | [
"Apache-2.0"
] | null | null | null | lib/preproc.py | ayshrv/paperpile-notion | 0fe73aee1e6bfcf3105b9a417182736a285ec797 | [
"Apache-2.0"
] | null | null | null | lib/preproc.py | ayshrv/paperpile-notion | 0fe73aee1e6bfcf3105b9a417182736a285ec797 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List
def format_entry(entry: Dict[str, str], journals: List[Dict[str, str]], conferences: List[Dict[str, str]]) -> Dict[str, Dict[str, str]]:
""" Produces a dictionary in format column_name: {type: x, value: y} for each value in the entry"""
########## VENUE ################################
conference_tuple = [ [c['short'], c['name']] for c in conferences]
# Select the conference shortname based on proceedings
if entry['Item type'] == 'Journal Article':
if 'Full journal' in entry.keys() and entry['Full journal']:
venue = [j['short'] for j in journals if j['name'] == entry['Full journal'].strip()]
else:
venue = [j['short'] for j in journals if j['name'] == entry['Journal'].strip()]
if not venue:
venue = [entry['Journal'].strip()[:100]]
elif entry['Item type'] == 'Conference Paper':
venue = [
c['short'] for c in conferences if c['name'] == match(
entry['Proceedings title'].strip().replace('{','').replace('}',''), conference_tuple
)[1]]
if not venue:
venue = [entry['Proceedings title'].strip()[:100]]
elif entry['Item type'] == 'Preprint Manuscript':
if "openreview" in entry['URLs'].strip().split(';')[0]:
venue = ["OpenReview"]
else:
venue = [entry['Archive prefix'].strip()]
elif entry['Item type'] == 'Book Chapter':
venue = [entry['Book title'].strip()]
else:
venue = []
# Arxiv links are privileged
links = [x for x in entry['URLs'].strip().split(';')]
arxiv_links = [x for x in links if 'arxiv' in x]
if len(arxiv_links) > 0:
selected_link = arxiv_links[0]
venue.append('arXiv')
else:
selected_link = links[0]
# Multichoice don't accept commas and maybe other punctuation, too
for i, v in enumerate(venue):
exclude = set([','])
venue[i] = ''.join(ch for ch in v if ch not in exclude)
###################################################
############## DATE #################################
date = ''
if 'Date published' in entry.keys():
if entry['Date published'].strip() != '':
date = entry['Date published'].strip()
if date == '':
if 'Publication year' in entry.keys():
if entry['Publication year'].strip() != '':
date = entry['Publication year'].strip() + '-01-01'
if len(date) > 10: # YYYY-MM-DD....
date = date[:10]
if len(date) == 4: # YYYY
date = entry['Publication year'].strip() + '-01-01'
if len(date) == 7: # YYYY-MM
date = date + '-01'
if date == '':
date = '2000-01-01'
######################################################
all_labels = [x.strip() for x in entry['Labels filed in'].strip().split(';')]
all_folders = [x.strip() for x in entry['Folders filed in'].strip().split(';')]
if len(all_labels) == 1 and len(all_labels[0]) == 0:
all_labels = []
if len(all_folders) == 1 and len(all_folders[0]) == 0:
all_folders = []
# categories = [x for x in all_labels if ' - ' not in x]
# methods = [x.split(' - ')[1] for x in all_labels if ' - ' in x]
formatted_entry = {
'Item type': {'type': 'select', 'value': entry['Item type'].strip()},
'Authors': {'type': 'multi_select', 'value': entry['Authors'].strip().split(',')},
'Title': {'type': 'title', 'value': entry['Title'].strip().replace('{','').replace('}','')},
'Venues': {'type': 'multi_select', 'value': venue},
'Date': {'type': 'date', 'value': date},
'Link': {'type': 'url', 'value': selected_link},
'Labels': {'type': 'multi_select', 'value': all_labels}, #, 'color': [COLOR_MAP[cat]['color'] for cat in categories]}
'Folders': {'type': 'multi_select', 'value': all_folders}
}
# if "reading-list" in all_labels:
status_value = 'to-be-read'
formatted_entry['Status'] = {'type': 'select', 'value': status_value}
filtered_formatted_entry = formatted_entry.copy()
keys_delete = []
for key, value in filtered_formatted_entry.items():
if value["value"] in ['', "", [], [''], [""]]:
keys_delete.append(key)
for key in keys_delete:
del filtered_formatted_entry[key]
return formatted_entry, filtered_formatted_entry
| 38.448 | 136 | 0.52913 |
a2c5873d31b6625bca48f32ced06274ab2625243 | 12,181 | py | Python | twitter/Update.py | bhargavz/py-twitter-sentiment-analysis | fc611df592ed607e58c2600bd20fceffa309108c | [
"MIT"
] | null | null | null | twitter/Update.py | bhargavz/py-twitter-sentiment-analysis | fc611df592ed607e58c2600bd20fceffa309108c | [
"MIT"
] | null | null | null | twitter/Update.py | bhargavz/py-twitter-sentiment-analysis | fc611df592ed607e58c2600bd20fceffa309108c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: Update.py
#
# This class provides mechanisms to update, reply to, retweet status
# messages and send direct messages
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
import sys, time, json, logging
from sochi.twitter.Login import Login
from sochi.twitter.TwitterBase import TwitterBase
from sochi.twitter.auth_settings import *
if __name__ == '__main__':
main(sys.argv)
| 33.190736 | 153 | 0.531566 |
a2c66a4215e8e48df86487ea705d0d4b4b919ca2 | 654 | py | Python | library/utils/time.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | library/utils/time.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | library/utils/time.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals, absolute_import
from datetime import datetime
from pytz import UTC
from dateutil.parser import parse
fmt = '%Y-%m-%d %H:%M:%S'
utc_fmt = "%Y-%m-%dT%H:%M:%SZ"
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.utcnow()
if not at.tzinfo: # UTC
at.replace(tzinfo=UTC)
at_utc = at
else: #
at_utc = at.astimezone(UTC)
return at_utc.strftime(utc_fmt)
| 21.8 | 56 | 0.652905 |
a2ce4992ffbad38fcb41d65444677ff2a942a09e | 5,612 | py | Python | aguas_altas/build_gdb/build_gdb_table.py | PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii | 2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e | [
"MIT"
] | null | null | null | aguas_altas/build_gdb/build_gdb_table.py | PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii | 2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e | [
"MIT"
] | null | null | null | aguas_altas/build_gdb/build_gdb_table.py | PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii | 2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Build Geo Database for PaisajesSonorosTB
----------------------------------------
This script compiles output from multiple scripts that should be executed previously:
- audio_metadata
- acoustic_indices
- graphical_soundscapes
- soundscape_composition
"""
import numpy as np
import pandas as pd
import glob
import os
#%% Load acoustic indices, graphical soundscapes, and manual annotations to build the GDB
# Audio metadata
df_metadata = pd.read_csv('../audio_metadata/audio_metadata_lluvias.csv')
# Acoustic indices
df_indices = pd.read_csv('../acoustic_community_characterization/acoustic_indices/dataframes/allsites_acoustic_indices_env.csv')
# Graphical soundscapes
flist = glob.glob('../acoustic_community_characterization/graphical_soundscapes/dataframes/*.csv')
df_graph = pd.DataFrame()
for fname in flist:
aux = pd.read_csv(fname)
aux.drop(columns='hour', inplace=True)
aux = pd.Series(aux.values.ravel(), name=os.path.basename(fname)[0:4])
df_graph = df_graph.append(aux)
# Soundscape components using manual annotations
df_comp = pd.read_csv('../soundscape_composition/dataframes/presence_absence_global_components.csv')
# Environmental data ANH_to_GXX data
df_eventID = pd.read_csv('../env_data/ANH_to_GXX.csv')[['sensor_name', 'eventID']]
#%% Process dataframes to meet GDB criteria
# Compute metadata per site
df_site_metadata = pd.DataFrame()
for site_idx, site in df_metadata.groupby('site'):
site_metadata = pd.Series({'sensor_name': site_idx,
'TASA_MUEST': site['sample.rate'].unique()[0].astype(int),
'RES_BITS': site['bits'].unique()[0].astype(int),
'MICROFONO': 'Audiomoth v1.20',
'REF_GRAB': 'Audiomoth v1.20',
'FECHA_INI': site.date.sort_values().iloc[0][0:10],
'FECHA_FIN': site.date.sort_values().iloc[-1][0:10],
'HORA_INI': site.date.sort_values().iloc[0][11:],
'HORA_FIN': site.date.sort_values().iloc[-1][11:],
'NUM_GRAB': len(site),
'TASA_GRAB': '60 segundos cada 1800 segundos',
'ESTACIONAL': 'Hmedo',
'ALTURA': 1.5
})
df_site_metadata = df_site_metadata.append(site_metadata, ignore_index=True)
# Compute proportion of components per site
df_comp['sensor_name'] = df_comp['fname'].str[0:4]
df_site_comp = pd.DataFrame()
for site_idx, site in df_comp.groupby('sensor_name'):
site_comp = pd.Series({'sensor_name': site_idx,
'GEOFONIA': (site['GEO'].sum()/len(site) * 100).round(3),
'ANTROPOFON': (site['ANT'].sum()/len(site) * 100).round(3),
'BIOFONIA': (site['BIO'].sum()/len(site) * 100).round(3)
})
df_site_comp = df_site_comp.append(site_comp, ignore_index=True)
# Acoustic indices per site
df_site_indices = pd.DataFrame()
for site_idx, site in df_indices.groupby('sensor_name'):
site_indices = pd.Series({'sensor_name': site_idx,
'ACI_Q25': site.ACI.quantile(q=0.25),
'ACI_Q50': site.ACI.quantile(q=0.5),
'ACI_Q75': site.ACI.quantile(q=0.75),
'ADI_Q25': site.ADI.quantile(q=0.25),
'ADI_Q50': site.ADI.quantile(q=0.5),
'ADI_Q75': site.ADI.quantile(q=0.75),
'NDSI_Q25': site.NDSI.quantile(q=0.25),
'NDSI_Q50': site.NDSI.quantile(q=0.5),
'NDSI_Q75': site.NDSI.quantile(q=0.75),
'BIO_Q25': site.BI.quantile(q=0.25),
'BIO_Q50': site.BI.quantile(q=0.5),
'BIO_Q75': site.BI.quantile(q=0.75),
'AEI_Q25': site.H.quantile(q=0.25),
'AEI_Q50': site.H.quantile(q=0.5),
'AEI_Q75': site.H.quantile(q=0.75),
'NP_Q25': site.NP.quantile(q=0.25),
'NP_Q50': site.NP.quantile(q=0.5),
'NP_Q75': site.NP.quantile(q=0.75),
'SC_Q25': site.SC.quantile(q=0.25),
'SC_Q50': site.SC.quantile(q=0.5),
'SC_Q75': site.SC.quantile(q=0.75),
'HF_Q25': site.Hf.quantile(q=0.25),
'HF_Q50': site.Hf.quantile(q=0.5),
'HF_Q75': site.Hf.quantile(q=0.75),
'HT_Q25': site.Ht.quantile(q=0.25),
'HT_Q50': site.Ht.quantile(q=0.5),
'HT_Q75': site.Ht.quantile(q=0.75),
'ASU': (df_graph.loc[site_idx,:]>0).sum()/df_graph.shape[1]
})
df_site_indices = df_site_indices.append(site_indices, ignore_index=True)
df_eventID.rename(columns={'eventID': 'ID_MUEST_PT'}, inplace=True)
#%% Build GDB
df_gdb = df_eventID.merge(df_site_metadata, on='sensor_name')
df_gdb = df_gdb.merge(df_site_comp, on='sensor_name')
df_gdb = df_gdb.merge(df_site_indices, on='sensor_name')
df_gdb.to_csv('./dataframes/gdb_site.csv', index=False) | 48.37931 | 128 | 0.541162 |
a2cef5581d6639f72a0f834dc67419807bab8ec4 | 759 | py | Python | dear_petition/petition/migrations/0008_auto_20200208_0222.py | robert-w-gries/dear-petition | 35244afc8e967b41ae5265ae31fd13b26e4e835a | [
"MIT"
] | 4 | 2020-04-01T14:42:45.000Z | 2021-12-12T21:11:11.000Z | dear_petition/petition/migrations/0008_auto_20200208_0222.py | robert-w-gries/dear-petition | 35244afc8e967b41ae5265ae31fd13b26e4e835a | [
"MIT"
] | 142 | 2019-08-12T19:08:34.000Z | 2022-03-29T23:05:35.000Z | dear_petition/petition/migrations/0008_auto_20200208_0222.py | robert-w-gries/dear-petition | 35244afc8e967b41ae5265ae31fd13b26e4e835a | [
"MIT"
] | 8 | 2020-02-04T20:37:00.000Z | 2021-03-28T13:28:32.000Z | # Generated by Django 2.2.4 on 2020-02-08 02:22
from django.db import migrations
| 29.192308 | 76 | 0.671937 |
a2cf483b7a318378a4b51126b7de177267f4c55e | 23 | py | Python | auto_ml/_version.py | amlanbanerjee/auto_ml | db8e1d2cfa93f13a21e55739acfc8d99837e91b0 | [
"MIT"
] | 1,671 | 2016-08-09T04:44:48.000Z | 2022-03-27T01:29:23.000Z | auto_ml/_version.py | amlanbanerjee/auto_ml | db8e1d2cfa93f13a21e55739acfc8d99837e91b0 | [
"MIT"
] | 428 | 2016-08-08T00:13:04.000Z | 2022-01-19T10:09:05.000Z | auto_ml/_version.py | amlanbanerjee/auto_ml | db8e1d2cfa93f13a21e55739acfc8d99837e91b0 | [
"MIT"
] | 334 | 2016-08-29T12:34:18.000Z | 2022-01-31T09:14:30.000Z | __version__ = "2.9.10"
| 11.5 | 22 | 0.652174 |
a2d07750f771787adbd733681780afac8dc73bc5 | 3,442 | py | Python | maya/libs/sceneutils.py | bhsingleton/dcc | 9ad59f1cb8282df938062e15c020688dd268a722 | [
"MIT"
] | 1 | 2021-08-06T16:04:24.000Z | 2021-08-06T16:04:24.000Z | maya/libs/sceneutils.py | bhsingleton/dcc | 9ad59f1cb8282df938062e15c020688dd268a722 | [
"MIT"
] | null | null | null | maya/libs/sceneutils.py | bhsingleton/dcc | 9ad59f1cb8282df938062e15c020688dd268a722 | [
"MIT"
] | 1 | 2021-08-06T16:04:31.000Z | 2021-08-06T16:04:31.000Z | import maya.cmds as mc
import os
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def isNewScene():
"""
Method used to check if this is an untitled scene file.
:rtype: bool
"""
return len(mc.file(query=True, sceneName=True)) == 0
def isSaveRequired():
"""
Method used to check if the open scene file has changes that need to be saved.
:rtype: bool
"""
return mc.file(query=True, modified=True)
def currentFilePath():
"""
Convenience method used to retrieve the path of the open scene file.
:rtype: str
"""
if not isNewScene():
return os.path.normpath(mc.file(query=True, sceneName=True))
else:
return ''
def currentFilename():
"""
Convenience method used to retrieve the name of the open scene file.
:rtype: str
"""
return os.path.split(currentFilePath())[1]
def currentDirectory():
"""
Convenience method used to retrieve the directory of the open scene file.
:rtype: str
"""
return os.path.split(currentFilePath())[0]
def removeUserAttributes():
"""
Convenience method used to removed any user attributes that have carried over using fbx.
:rtype: None
"""
# Iterate through selection
#
nodeNames = mc.ls(sl=True)
for nodeName in nodeNames:
# Check if node has any user attributes
#
attrNames = mc.listAttr(nodeName, userDefined=True)
if attrNames is None:
continue
for attrName in attrNames:
log.info('Removing "%s.%s" attribute.' % (nodeName, attrName))
mc.deleteAttr('%s.%s' % (nodeName, attrName))
def unloadTurtlePlugin():
"""
Convenience method used to unload the turtle plugin from the open scene file.
:rtype: None
"""
# Check if turtle is loaded
#
isLoaded = mc.pluginInfo('Turtle', query=True, loaded=True)
if not isLoaded:
log.info('Could not locate "Turtle" in the open scene file.')
return
# Remove all node types associated with turtle
#
nodeTypes = mc.pluginInfo('Turtle', query=True, dependNode=True)
for nodeType in nodeTypes:
# List all nodes by type
#
nodeNames = mc.ls(type=nodeType)
numNodeNames = len(nodeNames)
if numNodeNames == 0:
continue
# Unlock and remove nodes
#
mc.lockNode(nodeNames, lock=False)
mc.delete(nodeNames)
# Flush undo queue
#
mc.flushUndo()
# Remove shelf from tab bar
#
if mc.shelfLayout('TURTLE', query=True, exists=True):
log.info('Removing "TURTLE" from the shelf tab!')
mc.deleteUI('TURTLE', layout=True)
# Unlock plugin
#
mc.unloadPlugin('Turtle')
def resetWindowPositions():
"""
Method used to move all of the active maya windows to the top left corner.
:rtype: None
"""
# Collect all windows
#
windowNames = mc.lsUI(windows=True)
for windowName in windowNames:
log.info('Resetting "%s" window...' % windowName)
mc.window(windowName, edit=True, topLeftCorner=[0, 0])
def resetStartupCameras():
"""
Method used to fix the startup cameras when they're thrown out of wack.
:rtype: None
"""
mc.viewSet('top', home=True)
mc.viewSet('front', home=True)
mc.viewSet('side', home=True)
| 20.011628 | 92 | 0.621732 |
a2d10542879056ad7800cdebe98204d350251551 | 346 | py | Python | diffir/__init__.py | capreolus-ir/diffir | 90906ce4b7d5f23d6190eea26020f9e4096cb0cd | [
"Apache-2.0"
] | 12 | 2021-03-10T17:04:05.000Z | 2022-01-13T15:44:34.000Z | diffir/__init__.py | capreolus-ir/diffir | 90906ce4b7d5f23d6190eea26020f9e4096cb0cd | [
"Apache-2.0"
] | 7 | 2021-05-19T21:28:52.000Z | 2021-12-16T16:01:40.000Z | diffir/__init__.py | capreolus-ir/diffir | 90906ce4b7d5f23d6190eea26020f9e4096cb0cd | [
"Apache-2.0"
] | null | null | null | __version__ = "0.2.0"
from diffir.weight import Weight
from diffir.weight.custom import CustomWeight
from diffir.weight.unsupervised import ExactMatchWeight
from diffir.measure import Measure
from diffir.measure.qrels import QrelMeasure
from diffir.measure.unsupervised import TopkMeasure
from diffir.weight.weights_builder import WeightBuilder
| 34.6 | 55 | 0.858382 |
a2d1763a00e0070a7178e1445d0a7e1fdef3a6a9 | 34,160 | py | Python | tool/pylib/generator/output/PartBuilder.py | mever/qooxdoo | 2bb08cb6c4ddfaf2425e6efff07deb17e960a050 | [
"MIT"
] | 1 | 2021-02-05T23:00:25.000Z | 2021-02-05T23:00:25.000Z | tool/pylib/generator/output/PartBuilder.py | mever/qooxdoo | 2bb08cb6c4ddfaf2425e6efff07deb17e960a050 | [
"MIT"
] | 3 | 2019-02-18T04:22:52.000Z | 2021-02-21T15:02:54.000Z | tool/pylib/generator/output/PartBuilder.py | mever/qooxdoo | 2bb08cb6c4ddfaf2425e6efff07deb17e960a050 | [
"MIT"
] | 1 | 2021-06-03T23:08:44.000Z | 2021-06-03T23:08:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Thomas Herchenroeder (thron7)
# * Richard Sternagel (rsternagel)
#
################################################################################
##
# PartBuilder -- create packages and associates parts to packages, from parts configuration and class list
#
# Interface:
# - PartBuilder.getPackages()
##
from misc.Collections import OrderedDict
from misc.Collections import DefaultOrderedDict
from generator.output.Part import Part
from generator.output.Package import Package
from generator.code.Class import CompileOptions
from generator.config.Config import ConfigurationError
| 42.593516 | 157 | 0.600907 |
a2d2d2628caff1c2156c6ad988dc74d14a5fd8cd | 6,486 | py | Python | factorizer/datasets/wmh.py | pashtari/factorizer | 730f295b403a90c1c691f99b529d5d32b635d0c6 | [
"Apache-2.0"
] | 7 | 2022-03-05T00:43:29.000Z | 2022-03-07T01:23:08.000Z | factorizer/datasets/wmh.py | pashtari/factorizer | 730f295b403a90c1c691f99b529d5d32b635d0c6 | [
"Apache-2.0"
] | null | null | null | factorizer/datasets/wmh.py | pashtari/factorizer | 730f295b403a90c1c691f99b529d5d32b635d0c6 | [
"Apache-2.0"
] | 1 | 2022-03-21T05:28:23.000Z | 2022-03-21T05:28:23.000Z | import sys
import numpy as np
import torch
from monai import transforms, data
from ..data import DataModule, ReadImaged, Renamed, Inferer
###################################
# Transform
###################################
###################################
# Data module
###################################
# alias
WMH = WMHDataModule
###################################
# Inference
###################################
| 31.033493 | 79 | 0.561671 |
a2d721ef72b39de52022137d721dac292cbddcad | 890 | py | Python | Python/Topics/Sending-Email/05-pdf-attachment.py | shihab4t/Software-Development | 0843881f2ba04d9fca34e44443b5f12f509f671e | [
"Unlicense"
] | null | null | null | Python/Topics/Sending-Email/05-pdf-attachment.py | shihab4t/Software-Development | 0843881f2ba04d9fca34e44443b5f12f509f671e | [
"Unlicense"
] | null | null | null | Python/Topics/Sending-Email/05-pdf-attachment.py | shihab4t/Software-Development | 0843881f2ba04d9fca34e44443b5f12f509f671e | [
"Unlicense"
] | null | null | null | import imghdr
import smtplib
import os
from email.message import EmailMessage
EMAIL_ADDRESS = os.environ.get("GMAIL_ADDRESS")
EMAIL_PASSWORD = os.environ.get("GMAIL_APP_PASS")
pdfs = ["/home/shihab4t/Downloads/Profile.pdf"]
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
reciver = "shihab4tdev@gmail.com"
msg = EmailMessage()
msg["Subject"] = "Grab dinner this weekend? 2"
msg["From"] = EMAIL_ADDRESS
msg["To"] = reciver
msg.set_content("How about dinner at 6pm this Saturday")
for pdf in pdfs:
with open(pdf, "rb") as pdf:
pdf_data = pdf.read()
pdf_name = pdf.name
msg.add_attachment(pdf_data, maintype="application",
subtype="octet-stream", filename=pdf_name)
smtp.send_message(msg)
print(f"Email was sented to {reciver}")
| 26.969697 | 69 | 0.665169 |
a2d7927bd74ff2bc70037658a7110cb4dffa918c | 43 | py | Python | rcds/project/__init__.py | jordanbertasso/rcds | d3d655a59a350042d65476793db84e761de04829 | [
"BSD-3-Clause"
] | 5 | 2020-07-13T12:40:02.000Z | 2021-08-21T11:18:28.000Z | rcds/project/__init__.py | jordanbertasso/rcds | d3d655a59a350042d65476793db84e761de04829 | [
"BSD-3-Clause"
] | 144 | 2020-07-06T11:26:49.000Z | 2022-02-01T14:33:28.000Z | rcds/project/__init__.py | jordanbertasso/rcds | d3d655a59a350042d65476793db84e761de04829 | [
"BSD-3-Clause"
] | 7 | 2020-07-22T12:38:32.000Z | 2021-12-21T14:27:54.000Z | from .project import Project # noqa: F401
| 21.5 | 42 | 0.744186 |
a2d972366674ffee05dbeed1f54a9dc88de6bb40 | 163 | py | Python | MyEircode.py | MrBrianMonaghan/mapping | 1b525eaaad3b22709a53167b46c901ece365ecab | [
"Apache-2.0"
] | null | null | null | MyEircode.py | MrBrianMonaghan/mapping | 1b525eaaad3b22709a53167b46c901ece365ecab | [
"Apache-2.0"
] | null | null | null | MyEircode.py | MrBrianMonaghan/mapping | 1b525eaaad3b22709a53167b46c901ece365ecab | [
"Apache-2.0"
] | null | null | null | import selenium
from selenium import webdriver
try:
browser = webdriver.Firefox()
browser.get('mikekus.com')
except KeyboardInterrupt:
browser.quit()
| 18.111111 | 33 | 0.742331 |
a2d986e45466635f24a005d6cc044f9cdfb62b88 | 740 | py | Python | tests/test_rotor/rotor_test.py | axevalley/enigma | fdfa5a85dbd4675f195e00e4b7e22d976a3d9015 | [
"MIT"
] | null | null | null | tests/test_rotor/rotor_test.py | axevalley/enigma | fdfa5a85dbd4675f195e00e4b7e22d976a3d9015 | [
"MIT"
] | 28 | 2019-07-30T16:15:52.000Z | 2022-03-14T19:14:25.000Z | tests/test_rotor/rotor_test.py | lukeshiner/enigma | 917066c8f33f67b43f092800ba46220d107f622b | [
"MIT"
] | null | null | null | """Base class for rotor tests."""
import unittest
from enigma.rotor.reflector import Reflector
from enigma.rotor.rotor import Rotor
| 24.666667 | 65 | 0.631081 |
a2deabeee99e67fa9e9a47d417ca86a406f16c31 | 2,186 | py | Python | kyu_8/check_the_exam/test_check_exam.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 1 | 2022-02-12T05:56:04.000Z | 2022-02-12T05:56:04.000Z | kyu_8/check_the_exam/test_check_exam.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 182 | 2020-04-30T00:51:36.000Z | 2021-09-07T04:15:05.000Z | kyu_8/check_the_exam/test_check_exam.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 4 | 2020-04-29T22:04:20.000Z | 2021-07-13T20:04:14.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ARRAYS NUMBERS BASIC LANGUAGE FEATURES
import unittest
import allure
from utils.log_func import print_log
from kyu_8.check_the_exam.check_exam import check_exam
| 34.15625 | 94 | 0.531565 |
a2dec2415ed78800e66aae16391df2b37d8f56eb | 1,193 | py | Python | pysoup/venv/__init__.py | illBeRoy/pysoup | 742fd6630e1be27c275cb8dc6ee94412472cb20b | [
"MIT"
] | 4 | 2016-02-21T12:40:44.000Z | 2019-06-13T13:23:19.000Z | pysoup/venv/__init__.py | illBeRoy/pysoup | 742fd6630e1be27c275cb8dc6ee94412472cb20b | [
"MIT"
] | null | null | null | pysoup/venv/__init__.py | illBeRoy/pysoup | 742fd6630e1be27c275cb8dc6ee94412472cb20b | [
"MIT"
] | 1 | 2020-07-16T12:22:12.000Z | 2020-07-16T12:22:12.000Z | import os.path
from twisted.internet import defer
import pysoup.utils
| 29.097561 | 134 | 0.668064 |
a2df2293ad90461c1622171c3d5669f2f6f7fd84 | 2,791 | py | Python | yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | from yggdrasil.metaschema.datatypes import MetaschemaTypeError
from yggdrasil.metaschema.datatypes.MetaschemaType import MetaschemaType
from yggdrasil.metaschema.datatypes.JSONObjectMetaschemaType import (
JSONObjectMetaschemaType)
from yggdrasil.metaschema.properties.ArgsMetaschemaProperty import (
ArgsMetaschemaProperty)
| 34.036585 | 78 | 0.636331 |
a2df9c5cd443a1cdbe81e54c4e448271480f6781 | 368 | py | Python | battleships/migrations/0004_auto_20181202_1852.py | ArturAdamczyk/Battleships | 748e4fa87ed0c17c57abbdf5a0a2bca3c91dff24 | [
"MIT"
] | null | null | null | battleships/migrations/0004_auto_20181202_1852.py | ArturAdamczyk/Battleships | 748e4fa87ed0c17c57abbdf5a0a2bca3c91dff24 | [
"MIT"
] | null | null | null | battleships/migrations/0004_auto_20181202_1852.py | ArturAdamczyk/Battleships | 748e4fa87ed0c17c57abbdf5a0a2bca3c91dff24 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.3 on 2018-12-02 17:52
from django.db import migrations
| 19.368421 | 51 | 0.589674 |
a2e147bc50d8522b84f76610398b1cf2e73d60bb | 11,168 | py | Python | jme/stagecache/text_metadata.py | jmeppley/stagecache | a44f93b7936e1c6ea40dec0a31ad9c19d2415f3a | [
"MIT"
] | null | null | null | jme/stagecache/text_metadata.py | jmeppley/stagecache | a44f93b7936e1c6ea40dec0a31ad9c19d2415f3a | [
"MIT"
] | null | null | null | jme/stagecache/text_metadata.py | jmeppley/stagecache | a44f93b7936e1c6ea40dec0a31ad9c19d2415f3a | [
"MIT"
] | null | null | null | """
Functions for storing and retrieving cache metadata from text files.
Each Cache asset is a path: /path/filename
There are four metadata files in the cache for each:
/path/.stagecache.filename/size The size of the asset in bytes
/path/.stagecache.filename/cache_lock The requested end time of the cache
/path/.stagecache.filename/log A record of past requests
/path/.stagecache.filename/write_lock
Exists if cache being updated
There are also global metadata files in cache_root:
.stagecache.global/asset_list list of assets in this cache
.stagecache.global/write_lock
Usage:
Initialize TargetMetadata() class with cache_root and target paths.
Initialize CacheMetadata() class with cache_root path
TargetMetadata Functions:
get_cached_target_size(): returns size and date from file
set_cached_target_size(size): writes size to file
get_last_lock_date(): returns the most recent lock end date
set_cache_lock_date(date): writes new date to lock file
get_write_lock():
mark file as in progress (wait for existing lock)
release_write_lock(): remove in_progress mark
CacheMetadata Functions:
get_write_lock()
iter_cached_files(locked=None):
return list of assets with sizes and lock dates
remove_cached_file(path): remove record of asset
add_cached_file(path): add record of asset
All functions take cache=cache_root as a kwarg
All get_ functions throw FileNotFound exception if asset not yet in cache
"""
import logging
import os
import time
import stat
from contextlib import contextmanager
LOGGER = logging.getLogger(name='metadata')
| 36.980132 | 86 | 0.576916 |
a2e1fed84d2ed3d71ec400a1f6a513cfa6d50f07 | 3,858 | py | Python | lib/roi_data/minibatch.py | BarneyQiao/pcl.pytorch | 4e0280e5e1470f705e620eda26f881d627c5016c | [
"MIT"
] | 233 | 2019-05-10T07:17:42.000Z | 2022-03-30T09:24:16.000Z | lib/roi_data/minibatch.py | Michael-Steven/Crack_Image_WSOD | 4e8591a7c0768cee9eb7240bb9debd54824f5b33 | [
"MIT"
] | 78 | 2019-05-10T21:10:47.000Z | 2022-03-29T13:57:32.000Z | lib/roi_data/minibatch.py | Michael-Steven/Crack_Image_WSOD | 4e8591a7c0768cee9eb7240bb9debd54824f5b33 | [
"MIT"
] | 57 | 2019-05-10T07:17:37.000Z | 2022-03-24T04:43:24.000Z | import numpy as np
import numpy.random as npr
import cv2
from core.config import cfg
import utils.blob as blob_utils
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data', 'rois', 'labels']
return blob_names
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob
im_blob, im_scales = _get_image_blob(roidb)
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
blobs['data'] = im_blob
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0, num_classes), dtype=np.float32)
num_images = len(roidb)
for im_i in range(num_images):
labels, im_rois = _sample_rois(roidb[im_i], num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(rois_blob_this_image * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
rois_blob_this_image = rois_blob_this_image[index, :]
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels blob
labels_blob = np.vstack((labels_blob, labels))
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
return blobs, True
def _sample_rois(roidb, num_classes):
"""Generate a random sample of RoIs"""
labels = roidb['gt_classes']
rois = roidb['boxes']
if cfg.TRAIN.BATCH_SIZE_PER_IM > 0:
batch_size = cfg.TRAIN.BATCH_SIZE_PER_IM
else:
batch_size = np.inf
if batch_size < rois.shape[0]:
rois_inds = npr.permutation(rois.shape[0])[:batch_size]
rois = rois[rois_inds, :]
return labels.reshape(1, -1), rois
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
| 33.842105 | 80 | 0.630897 |
a2e200b1e2fac4ccc713c3e1526076efebc09cea | 1,288 | py | Python | src/PrimaryInputs.py | elastacloud/input-output-tables | 82f932c8627071bc245e178f5b47a7c1104c4e4c | [
"Apache-2.0"
] | null | null | null | src/PrimaryInputs.py | elastacloud/input-output-tables | 82f932c8627071bc245e178f5b47a7c1104c4e4c | [
"Apache-2.0"
] | null | null | null | src/PrimaryInputs.py | elastacloud/input-output-tables | 82f932c8627071bc245e178f5b47a7c1104c4e4c | [
"Apache-2.0"
] | null | null | null | import abc
import os
import pandas as pd
import numpy as np
from EoraReader import EoraReader
| 35.777778 | 96 | 0.615683 |
a2e589c4ee6ca6ac8b468da944e0f2d14d31872f | 695 | py | Python | toto/methods/client_error.py | VNUELIVE/Toto | 6940b4114fc6b680e0d40ae248b7d2599c954f81 | [
"MIT"
] | null | null | null | toto/methods/client_error.py | VNUELIVE/Toto | 6940b4114fc6b680e0d40ae248b7d2599c954f81 | [
"MIT"
] | null | null | null | toto/methods/client_error.py | VNUELIVE/Toto | 6940b4114fc6b680e0d40ae248b7d2599c954f81 | [
"MIT"
] | null | null | null | import logging
from toto.invocation import *
| 36.578947 | 91 | 0.723741 |
a2e5b6c37644bb0cda6e0ffc3d078b3332260604 | 1,945 | py | Python | parallelformers/policies/gptj.py | Oaklight/parallelformers | 57fc36f81734c29aaf814e092ce13681d3c28ede | [
"Apache-2.0"
] | 454 | 2021-07-18T02:51:23.000Z | 2022-03-31T04:00:53.000Z | parallelformers/policies/gptj.py | Oaklight/parallelformers | 57fc36f81734c29aaf814e092ce13681d3c28ede | [
"Apache-2.0"
] | 16 | 2021-07-18T10:47:21.000Z | 2022-03-22T18:49:57.000Z | parallelformers/policies/gptj.py | Oaklight/parallelformers | 57fc36f81734c29aaf814e092ce13681d3c28ede | [
"Apache-2.0"
] | 33 | 2021-07-18T04:48:28.000Z | 2022-03-14T22:16:36.000Z | # Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.models.gptj.modeling_gptj import GPTJBlock
from parallelformers.policies.base import Layer, Policy
from parallelformers.utils import AllReduceLinear
| 28.188406 | 74 | 0.607712 |
a2e61afbf4f6a03e376d0464c7acf87dc5bb080e | 503 | py | Python | app/modules/checkerbox.py | hboueix/PyCheckers | c1339a004f30f76a33461b52f9633bbbd1204bb0 | [
"MIT"
] | null | null | null | app/modules/checkerbox.py | hboueix/PyCheckers | c1339a004f30f76a33461b52f9633bbbd1204bb0 | [
"MIT"
] | null | null | null | app/modules/checkerbox.py | hboueix/PyCheckers | c1339a004f30f76a33461b52f9633bbbd1204bb0 | [
"MIT"
] | null | null | null | import pygame
| 23.952381 | 65 | 0.606362 |
a2e6d1a1d562ff46afccc16626cb0e1d9bd964d4 | 1,319 | py | Python | tests/python/test_talos_walk_sl1m_topt.py | daeunSong/multicontact-locomotion-planning | 0aeabe6a7a8d49e54d6996a6126740cc90aa0050 | [
"BSD-2-Clause"
] | 31 | 2019-11-08T14:46:03.000Z | 2022-03-25T08:09:16.000Z | tests/python/test_talos_walk_sl1m_topt.py | pFernbach/multicontact-locomotion-planning | 86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2 | [
"BSD-2-Clause"
] | 21 | 2019-04-12T13:13:31.000Z | 2021-04-02T14:28:15.000Z | tests/python/test_talos_walk_sl1m_topt.py | pFernbach/multicontact-locomotion-planning | 86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2 | [
"BSD-2-Clause"
] | 11 | 2019-04-12T13:03:55.000Z | 2021-11-22T08:19:06.000Z | # Copyright (c) 2020, CNRS
# Authors: Pierre Fernbach <pfernbac@laas.fr>
import unittest
import subprocess
import time
from mlp import LocoPlanner, Config
from utils import check_motion
from hpp.corbaserver.rbprm.utils import ServerManager
if __name__ == '__main__':
unittest.main()
| 36.638889 | 125 | 0.718726 |
a2e7779c3e2b321cf059e7d364c94dc2593aa13c | 212 | py | Python | definitions.py | elpeix/kaa | b840613cb5eba876d937faf32031651332e5b5f6 | [
"MIT"
] | null | null | null | definitions.py | elpeix/kaa | b840613cb5eba876d937faf32031651332e5b5f6 | [
"MIT"
] | null | null | null | definitions.py | elpeix/kaa | b840613cb5eba876d937faf32031651332e5b5f6 | [
"MIT"
] | null | null | null | import os
import logging
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
LOG = logging.getLogger()
NAME = 'Sample Server'
VERSION = 'v1.0'
SERVER = 'example.SampleServer'
ENABLE_CORS = True
| 16.307692 | 53 | 0.740566 |
a2e9b6f6bd695b4f20c44aff1b1aeaa6c236f680 | 9,567 | py | Python | uncertainty_baselines/datasets/smcalflow.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/datasets/smcalflow.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/datasets/smcalflow.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SMCalflow dataset builder.
The SMCalFlow dataset is from the following paper:
Task-Oriented Dialogue as Dataflow Synthesis (Andreas et al., 2020)
The MultiWoz 2.1 dataset is the released version from the following paper:
Task-Oriented Dialogue as Dataflow Synthesis (Andreas et al., 2020)
The dataset is originally published at:
MultiWOZ 2.1: A Consolidated Multi-Domain Dialogue Dataset with State
Corrections and State Tracking Baselines (Eric et al., 2019)
The released version is processed by:
Transferable Multi-Domain State Generator for Task-Oriented Dialogue Systems
(Wu et al., 2019)
Processed following the directions in:
https://github.com/microsoft/task_oriented_dialogue_as_dataflow_synthesis
"""
import os.path
from typing import Any, Dict, Optional, Type
import seqio
import t5.data
import tensorflow as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import base
_NUM_TRAIN_SMCALFLOW = 121200
_NUM_VAL_SMCALFLOW = 13499
_NUM_TRAIN_MULTIWOZ = 56668
_NUM_VAL_MULTIWOZ = 7374
_NUM_TEST_MULTIWOZ = 7368
_FEATURES = [
'encoder_input_tokens', 'decoder_target_tokens', 'decoder_input_tokens',
'encoder_segment_ids', 'decoder_segment_ids'
]
def _get_num_examples(name: str) -> Dict[str, int]:
"""Retrieves the number of examples and filenames according to task name."""
if name == 'smcalflow':
num_examples = {
tfds.Split.TRAIN: _NUM_TRAIN_SMCALFLOW,
tfds.Split.VALIDATION: _NUM_VAL_SMCALFLOW,
}
elif name == 'multiwoz':
num_examples = {
tfds.Split.TRAIN: _NUM_TRAIN_MULTIWOZ,
tfds.Split.VALIDATION: _NUM_VAL_MULTIWOZ,
tfds.Split.TEST: _NUM_TEST_MULTIWOZ,
}
else:
raise ValueError('"name" can only be one of "smcalflow" or "multiwoz". '
'Got "{}".'.format(name))
return num_examples
| 35.831461 | 93 | 0.691126 |
a2eb8907fa9fa5c982005554035cbb22b3ce7287 | 1,098 | py | Python | 1-FrequencyDivisionMultiplexing.py | mahnooranjum/Demo_CommunicationSystems | 6c3be46f9ad4a38bfe553b9a01855156713e49d9 | [
"MIT"
] | null | null | null | 1-FrequencyDivisionMultiplexing.py | mahnooranjum/Demo_CommunicationSystems | 6c3be46f9ad4a38bfe553b9a01855156713e49d9 | [
"MIT"
] | null | null | null | 1-FrequencyDivisionMultiplexing.py | mahnooranjum/Demo_CommunicationSystems | 6c3be46f9ad4a38bfe553b9a01855156713e49d9 | [
"MIT"
] | null | null | null | '''
==============================================================================
Author:
Mahnoor Anjum
Description:
Digital Multiplexing Techniques:
1- Frequency Division Multiplexing
Contact:
manomaq@gmail.com
==============================================================================
'''
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 10, 0.1);
m1 = np.sin(x)*1000
m2 = np.array(x*x)*10
m3 = np.array(80*x)
plt.plot(x, m1)
plt.plot(x, m2)
plt.plot(x, m3)
plt.title('Sine wave')
plt.xlabel('Time')
plt.ylabel('Messages')
plt.axhline(y=0, color='k')
plt.show()
'''
We will send all the signals at the same time through the channel
but at different frequencies.
Here we show frequency bands by the numbers on rcv1, rcv2, rcv3
'''
rcv1 = []
rcv2 = []
rcv3 = []
for i in range(x.size):
rcv1.append(m1[i])
rcv2.append(m2[i])
rcv3.append(m3[i])
plt.plot(x, rcv1)
plt.plot(x, rcv2)
plt.plot(x, rcv3)
plt.title('FDM')
plt.xlabel('Time')
plt.ylabel('Received')
plt.axhline(y=0, color='k')
plt.show()
| 21.115385 | 78 | 0.547359 |
a2ebe5b887b32f0561c68f37282697177b6753ec | 3,880 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''OpenGL extension ATI.text_fragment_shader
This module customises the behaviour of the
OpenGL.raw.GL.ATI.text_fragment_shader to provide a more
Python-friendly API
Overview (from the spec)
The ATI_fragment_shader extension exposes a powerful fragment
processing model that provides a very general means of expressing
fragment color blending and dependent texture address modification.
The processing is termed a fragment shader or fragment program and
is specifed using a register-based model in which there are fixed
numbers of instructions, texture lookups, read/write registers, and
constants.
ATI_fragment_shader provides a unified instruction set
for operating on address or color data and eliminates the
distinction between the two. That extension provides all the
interfaces necessary to fully expose this programmable fragment
processor in GL.
ATI_text_fragment_shader is a redefinition of the
ATI_fragment_shader functionality, using a slightly different
interface. The intent of creating ATI_text_fragment_shader is to
take a step towards treating fragment programs similar to other
programmable parts of the GL rendering pipeline, specifically
vertex programs. This new interface is intended to appear
similar to the ARB_vertex_program API, within the limits of the
feature set exposed by the original ATI_fragment_shader extension.
The most significant differences between the two extensions are:
(1) ATI_fragment_shader provides a procedural function call
interface to specify the fragment program, whereas
ATI_text_fragment_shader uses a textual string to specify
the program. The fundamental syntax and constructs of the
program "language" remain the same.
(2) The program object managment portions of the interface,
namely the routines used to create, bind, and delete program
objects and set program constants are managed
using the framework defined by ARB_vertex_program.
(3) ATI_fragment_shader refers to the description of the
programmable fragment processing as a "fragment shader".
In keeping with the desire to treat all programmable parts
of the pipeline consistently, ATI_text_fragment_shader refers
to these as "fragment programs". The name of the extension is
left as ATI_text_fragment_shader instead of
ATI_text_fragment_program in order to indicate the underlying
similarity between the API's of the two extensions, and to
differentiate it from any other potential extensions that
may be able to move even further in the direction of treating
fragment programs as just another programmable area of the
GL pipeline.
Although ATI_fragment_shader was originally conceived as a
device-independent extension that would expose the capabilities of
future generations of hardware, changing trends in programmable
hardware have affected the lifespan of this extension. For this
reason you will now find a fixed set of features and resources
exposed, and the queries to determine this set have been deprecated
in ATI_fragment_shader. Further, in ATI_text_fragment_shader,
most of these resource limits are fixed by the text grammar and
the queries have been removed altogether.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.text_fragment_shader import *
from OpenGL.raw.GL.ATI.text_fragment_shader import _EXTENSION_NAME
def glInitTextFragmentShaderATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 46.190476 | 71 | 0.802835 |
a2ed46d6b33e4e8573f56ac8afc0ade0ec58667b | 7,311 | py | Python | vhog3d.py | parthsuresh/3dvhog | 9a439687a0ce30b86b7730a61733b3f3845d27c5 | [
"MIT"
] | 3 | 2021-05-18T07:48:39.000Z | 2021-12-23T06:35:41.000Z | vhog3d.py | parthsuresh/3dvhog | 9a439687a0ce30b86b7730a61733b3f3845d27c5 | [
"MIT"
] | null | null | null | vhog3d.py | parthsuresh/3dvhog | 9a439687a0ce30b86b7730a61733b3f3845d27c5 | [
"MIT"
] | null | null | null | import numpy as np
import math
from scipy.ndimage import convolve
from tqdm import tqdm
def hog3d(vox_volume, cell_size, block_size, theta_histogram_bins, phi_histogram_bins, step_size=None):
"""
Inputs
vox_volume : a [x x y x z] numpy array defining voxels with values in the range 0-1
cell_size : size of a 3d cell (int)
block_size : size of a 3d block defined in cells
theta_histogram_bins : number of bins to break the angles in the xy plane - 180 degrees
phi_histogram_bins : number of bins to break the angles in the xz plane - 360 degrees
step_size : OPTIONAL integer defining the number of cells the blocks should overlap by.
"""
if step_size is None:
step_size = block_size
c = cell_size
b = block_size
sx, sy, sz = vox_volume.shape
num_x_cells = math.floor(sx / cell_size)
num_y_cells = math.floor(sy / cell_size)
num_z_cells = math.floor(sz / cell_size)
# Get cell positions
x_cell_positions = np.array(list(range(0, (num_x_cells * cell_size), cell_size)))
y_cell_positions = np.array(list(range(0, (num_y_cells * cell_size), cell_size)))
z_cell_positions = np.array(list(range(0, (num_z_cells * cell_size), cell_size)))
# Get block positions
x_block_positions = (x_cell_positions[0: num_x_cells: block_size])
y_block_positions = (y_cell_positions[0: num_y_cells: block_size])
z_block_positions = (z_cell_positions[0: num_z_cells: block_size])
# Check if last block in each dimension has enough voxels to be a full block. If not, discard it.
if x_block_positions[-1] > ((sx + 1) - (cell_size * block_size)):
x_block_positions = x_block_positions[:-2]
if y_block_positions[-1] > ((sy + 1) - (cell_size * block_size)):
y_block_positions = y_block_positions[:-2]
if z_block_positions[-1] > ((sz + 1) - (cell_size * block_size)):
z_block_positions = z_block_positions[:-2]
# Number of blocks
num_x_blocks = len(x_block_positions)
num_y_blocks = len(y_block_positions)
num_z_blocks = len(z_block_positions)
# Create 3D gradient vectors
# X filter and vector
x_filter = np.zeros((3, 3, 3))
x_filter[0, 1, 1], x_filter[2, 1, 1] = 1, -1
x_vector = convolve(vox_volume, x_filter, mode='constant', cval=0)
# Y filter and vector
y_filter = np.zeros((3, 3, 3))
y_filter[1, 0, 0], y_filter[1, 2, 0] = 1, -1
y_vector = convolve(vox_volume, y_filter, mode='constant', cval=0)
# Z filter and vector
z_filter = np.zeros((3, 3, 3))
z_filter[1, 1, 0], z_filter[1, 1, 2] = 1, -1
z_vector = convolve(vox_volume, z_filter, mode='constant', cval=0)
magnitudes = np.zeros([sx, sy, sz])
for i in range(sx):
for j in range(sy):
for k in range(sz):
magnitudes[i, j, k] = (x_vector[i, j, k] ** 2 + y_vector[i, j, k] ** 2 + z_vector[i, j, k] ** 2) ** (
0.5)
# Voxel Weights
kernel_size = 3
voxel_filter = np.full((kernel_size, kernel_size, kernel_size), 1 / (kernel_size * kernel_size * kernel_size))
weights = convolve(vox_volume, voxel_filter, mode='constant', cval=0)
weights = weights + 1
# Gradient vector
grad_vector = np.zeros((sx, sy, sz, 3))
for i in range(sx):
for j in range(sy):
for k in range(sz):
grad_vector[i, j, k, 0] = x_vector[i, j, k]
grad_vector[i, j, k, 1] = y_vector[i, j, k]
grad_vector[i, j, k, 2] = z_vector[i, j, k]
theta = np.zeros((sx, sy, sz))
phi = np.zeros((sx, sy, sz))
for i in range(sx):
for j in range(sy):
for k in range(sz):
theta[i, j, k] = math.acos(grad_vector[i, j, k, 2])
phi[i, j, k] = math.atan2(grad_vector[i, j, k, 1], grad_vector[i, j, k, 0])
phi[i, j, k] += math.pi
# Binning
b_size_voxels = int(c * b)
t_hist_bins = math.pi / theta_histogram_bins
p_hist_bins = (2 * math.pi) / phi_histogram_bins
block_inds = np.zeros((num_x_blocks * num_y_blocks * num_z_blocks, 3))
i = 0
for z_block in range(num_z_blocks):
for y_block in range(num_y_blocks):
for x_block in range(num_x_blocks):
block_inds[i] = np.array(
[x_block_positions[x_block], y_block_positions[y_block], z_block_positions[z_block]])
i += 1
num_blocks = len(block_inds)
error_count = 0
features = []
for i in range(num_blocks):
full_empty = vox_volume[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
if np.sum(full_empty) != 0 and np.sum(full_empty) != full_empty.size:
feature = np.zeros((b, b, b, theta_histogram_bins, phi_histogram_bins))
t_weights = weights[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
t_magnitudes = magnitudes[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
t_theta = theta[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
t_phi = phi[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
for l in range(b_size_voxels):
for m in range(b_size_voxels):
for n in range(b_size_voxels):
cell_pos_x = math.ceil(l / c) - 1
cell_pos_y = math.ceil(m / c) - 1
cell_pos_z = math.ceil(n / c) - 1
hist_pos_theta = math.ceil(t_theta[l, m, n] / t_hist_bins) - 1
hist_pos_phi = math.ceil(t_phi[l, m, n] / p_hist_bins) - 1
if phi_histogram_bins >= hist_pos_phi > 0 and theta_histogram_bins >= hist_pos_theta > 0:
feature[cell_pos_x, cell_pos_y, cell_pos_z, hist_pos_theta, hist_pos_phi] += (
t_magnitudes[l, m, n] * t_weights[l, m, n])
else:
error_count += 1
feature = np.reshape(feature, ((b * b * b), theta_histogram_bins, phi_histogram_bins))
l2 = np.linalg.norm(feature)
if l2 != 0:
norm_feature = feature / l2
else:
norm_feature = feature
norm_feature = np.reshape(norm_feature, ((b * b * b), (theta_histogram_bins * phi_histogram_bins)))
features.append(norm_feature)
features = np.array(features)
return features
| 43.778443 | 117 | 0.591164 |
a2ee6d19098aed822e580f589bbcc0c4df0bf0c1 | 320 | py | Python | tests/urls.py | skioo/django-datatrans | c2159b08935cd0c70355ca6e8ff92bbe86d372cd | [
"MIT"
] | 9 | 2017-09-12T12:45:30.000Z | 2022-03-30T13:53:57.000Z | tests/urls.py | skioo/django-datatrans | c2159b08935cd0c70355ca6e8ff92bbe86d372cd | [
"MIT"
] | null | null | null | tests/urls.py | skioo/django-datatrans | c2159b08935cd0c70355ca6e8ff92bbe86d372cd | [
"MIT"
] | 1 | 2021-11-08T10:21:01.000Z | 2021-11-08T10:21:01.000Z | from django.urls import include, path
from datatrans.views import example
urlpatterns = [
path(r'^datatrans/', include('datatrans.urls')),
path(r'^example/register-credit-card$', example.register_credit_card, name='example_register_credit_card'),
path(r'^example/pay$', example.pay, name='example_pay'),
]
| 32 | 111 | 0.7375 |
a2f252e2b9ab4a63f342c14ab8d8666d4956f841 | 11,160 | py | Python | gibbs/minimization.py | volpatto/gibbs | 776acff6166dd4fd3039d55074542d995ac91754 | [
"MIT"
] | 28 | 2019-05-25T14:50:00.000Z | 2022-01-18T00:54:22.000Z | gibbs/minimization.py | volpatto/gibbs | 776acff6166dd4fd3039d55074542d995ac91754 | [
"MIT"
] | 10 | 2019-06-15T06:07:14.000Z | 2021-09-01T04:32:50.000Z | gibbs/minimization.py | volpatto/gibbs | 776acff6166dd4fd3039d55074542d995ac91754 | [
"MIT"
] | 5 | 2019-08-04T05:37:34.000Z | 2022-01-18T10:10:40.000Z | import attr
import types
from typing import Union
from enum import Enum
import numpy as np
from scipy.optimize import differential_evolution
import pygmo as pg
| 38.088737 | 133 | 0.664606 |
a2f4994690266aa4a640429912d46124db104724 | 1,461 | py | Python | tests/unittests/types/test_array.py | TrigonDev/apgorm | 5b593bfb5a200708869e079248c25786608055d6 | [
"MIT"
] | 8 | 2022-01-21T23:07:29.000Z | 2022-03-26T12:03:57.000Z | tests/unittests/types/test_array.py | TrigonDev/apgorm | 5b593bfb5a200708869e079248c25786608055d6 | [
"MIT"
] | 22 | 2021-12-23T00:43:41.000Z | 2022-03-23T13:17:32.000Z | tests/unittests/types/test_array.py | TrigonDev/apgorm | 5b593bfb5a200708869e079248c25786608055d6 | [
"MIT"
] | 3 | 2022-01-15T20:58:33.000Z | 2022-01-26T21:36:13.000Z | # MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from apgorm.types import Array, Int # for subtypes
| 38.447368 | 79 | 0.750171 |
a2f56add77b1581d6619a3c899c2460cc7dc3102 | 137 | py | Python | cisco_support/__version__.py | rothdennis/cisco_support | c20b955794400eb565fa5c178749c2ee6ef7dc0f | [
"MIT"
] | 4 | 2021-09-09T07:24:13.000Z | 2022-03-04T19:51:01.000Z | cisco_support/__version__.py | rothdennis/cisco_support | c20b955794400eb565fa5c178749c2ee6ef7dc0f | [
"MIT"
] | null | null | null | cisco_support/__version__.py | rothdennis/cisco_support | c20b955794400eb565fa5c178749c2ee6ef7dc0f | [
"MIT"
] | null | null | null | __title__ = 'cisco_support'
__description__ = 'Cisco Support APIs'
__version__ = '0.1.0'
__author__ = 'Dennis Roth'
__license__ = 'MIT'
| 19.571429 | 38 | 0.737226 |
a2f8a7986f7bf085148eeaed0a44176810f81182 | 747 | py | Python | code/searchers.py | trunc8/mespp | 8348bdd0ba8f584ef7196c0064b8e5bafa38a0fb | [
"MIT"
] | 2 | 2021-07-07T17:01:17.000Z | 2022-03-30T05:28:44.000Z | code/searchers.py | trunc8/mespp | 8348bdd0ba8f584ef7196c0064b8e5bafa38a0fb | [
"MIT"
] | null | null | null | code/searchers.py | trunc8/mespp | 8348bdd0ba8f584ef7196c0064b8e5bafa38a0fb | [
"MIT"
] | 1 | 2021-07-07T17:00:54.000Z | 2021-07-07T17:00:54.000Z | #!/usr/bin/env python3
# trunc8 did this
import numpy as np | 25.758621 | 58 | 0.649264 |
a2fa1506f35030e5726f14dab7372d11ea530f9d | 1,015 | py | Python | vogue/api/api_v1/api.py | mayabrandi/vogue | 463e6417a9168eadb0d11dea2d0f97919494bcd3 | [
"MIT"
] | 1 | 2021-12-16T19:29:17.000Z | 2021-12-16T19:29:17.000Z | vogue/api/api_v1/api.py | mayabrandi/vogue | 463e6417a9168eadb0d11dea2d0f97919494bcd3 | [
"MIT"
] | 188 | 2018-10-25T06:13:17.000Z | 2022-02-25T19:47:06.000Z | vogue/api/api_v1/api.py | mayabrandi/vogue | 463e6417a9168eadb0d11dea2d0f97919494bcd3 | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from vogue.api.api_v1.endpoints import (
insert_documents,
home,
common_trends,
sequencing,
genootype,
reagent_labels,
prepps,
bioinfo_covid,
bioinfo_micro,
bioinfo_mip,
update,
)
from vogue.settings import static_files
app = FastAPI()
app.mount(
"/static",
static_files,
name="static",
)
app.include_router(home.router, tags=["home"])
app.include_router(common_trends.router, tags=["common_trends"])
app.include_router(sequencing.router, tags=["sequencing"])
app.include_router(genootype.router, tags=["genotype"])
app.include_router(reagent_labels.router, tags=["index"])
app.include_router(prepps.router, tags=["preps"])
app.include_router(bioinfo_micro.router, tags=["bioinfo_micro"])
app.include_router(bioinfo_covid.router, tags=["bioinfo_covid"])
app.include_router(bioinfo_mip.router, tags=["bioinfo_mip"])
app.include_router(update.router, tags=["update"])
app.include_router(insert_documents.router, tags=["sample"])
| 27.432432 | 64 | 0.747783 |
a2fa916053116744cb58a54f835b741f35144a4f | 1,090 | py | Python | models/dgcnn.py | veronicatozzo/SimpleView | 70dbde727b25db8fdd9dc486ac1f74ff31a89821 | [
"BSD-3-Clause"
] | 95 | 2021-06-09T09:44:14.000Z | 2022-03-13T12:10:50.000Z | models/dgcnn.py | veronicatozzo/SimpleView | 70dbde727b25db8fdd9dc486ac1f74ff31a89821 | [
"BSD-3-Clause"
] | 7 | 2021-06-23T04:44:25.000Z | 2022-01-14T15:45:27.000Z | models/dgcnn.py | veronicatozzo/SimpleView | 70dbde727b25db8fdd9dc486ac1f74ff31a89821 | [
"BSD-3-Clause"
] | 13 | 2021-07-01T23:55:15.000Z | 2022-01-04T12:29:02.000Z |
import torch.nn as nn
import torch.nn.functional as F
from dgcnn.pytorch.model import DGCNN as DGCNN_original
from all_utils import DATASET_NUM_CLASS
| 27.25 | 74 | 0.534862 |
a2fcc2dcdf1e51df954863eb81bc306011453b3d | 283 | py | Python | atcoder/arc/a036.py | tomato-300yen/coding | db6f440a96d8c83f486005c650461a69f27e3926 | [
"MIT"
] | null | null | null | atcoder/arc/a036.py | tomato-300yen/coding | db6f440a96d8c83f486005c650461a69f27e3926 | [
"MIT"
] | null | null | null | atcoder/arc/a036.py | tomato-300yen/coding | db6f440a96d8c83f486005c650461a69f27e3926 | [
"MIT"
] | null | null | null | from collections import deque
N, K = map(int, input().split())
T = [int(input()) for _ in range(N)]
ans_dq = deque([0, 0, 0])
for i, t in enumerate(T):
ans_dq.append(t)
ans_dq.popleft()
if sum(ans_dq) < K and i > 1:
print(i + 1)
break
else:
print(-1)
| 21.769231 | 36 | 0.568905 |
a2fcecf1decf4817a91d5d880a0ea9320b043380 | 238 | py | Python | Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex029.py | Jhonattan-rocha/Meus-primeiros-programas | f5971b66c0afd049b5d0493e8b7a116b391d058e | [
"MIT"
] | null | null | null | Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex029.py | Jhonattan-rocha/Meus-primeiros-programas | f5971b66c0afd049b5d0493e8b7a116b391d058e | [
"MIT"
] | null | null | null | Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex029.py | Jhonattan-rocha/Meus-primeiros-programas | f5971b66c0afd049b5d0493e8b7a116b391d058e | [
"MIT"
] | null | null | null | velocidade = float(input("Digite a sua velocidade em Km/h: "))
if velocidade > 80:
amais = velocidade - 80
amais = amais*7
print("Voc foi multado, devera pagar uma multa de: R${:.2f}".format(amais))
print("FIM, no se mate")
| 34 | 80 | 0.663866 |
a2fdf1816d77bc5926536585a5ffc8b6a4ac1f23 | 3,746 | py | Python | research/radar-communication/dqn_agent.py | hieunq95/keras-rl | d965ea951220b5ede5ea1e11fab7d7eb45a8c2c5 | [
"MIT"
] | null | null | null | research/radar-communication/dqn_agent.py | hieunq95/keras-rl | d965ea951220b5ede5ea1e11fab7d7eb45a8c2c5 | [
"MIT"
] | null | null | null | research/radar-communication/dqn_agent.py | hieunq95/keras-rl | d965ea951220b5ede5ea1e11fab7d7eb45a8c2c5 | [
"MIT"
] | null | null | null | import numpy as np
import gym
import argparse
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Convolution2D
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from rl.callbacks import FileLogger, ModelIntervalCheckpoint
from environment import AV_Environment
from config import test_parameters, transition_probability, unexpected_ev_prob, state_space_size, action_space_size
from logger import Logger
from AV_Processor import AVProcessor
TEST_ID = test_parameters['test_id']
NB_STEPS = test_parameters['nb_steps']
EPSILON_LINEAR_STEPS = test_parameters['nb_epsilon_linear']
TARGET_MODEL_UPDATE = test_parameters['target_model_update']
GAMMA = test_parameters['gamma']
# ALPHA = test_parameters['alpha']
ALPHA = 0.001
DOUBLE_DQN = False
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--env-name', type=str, default='AV_Radar-v1')
parser.add_argument('--weights', type=str, default=None)
args = parser.parse_args()
env = AV_Environment()
nb_actions = env.action_space.n
# policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05,
# nb_steps=EPSILON_LINEAR_STEPS)
policy = EpsGreedyQPolicy(eps=.1)
processor = AVProcessor(env)
memory = SequentialMemory(limit=50000, window_length=1)
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.nvec.shape))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
target_model_update=TARGET_MODEL_UPDATE, policy=policy, processor=processor,
enable_double_dqn=DOUBLE_DQN, gamma=GAMMA)
dqn.compile(Adam(lr=ALPHA), metrics=['mae'])
processor.add_agent(dqn)
print('********************* Start {}DQN - test-id: {} ***********************'.
format('DOUBLE-' if DOUBLE_DQN else '', TEST_ID))
print('************************************************************************** \n '
'**************************** Simulation parameters*********************** \n'
'{} \n {} \n {} \n {} \n {} \n'.format(transition_probability, unexpected_ev_prob, state_space_size,
action_space_size, test_parameters)
+ '*************************************************************************** \n')
if args.mode == 'train':
weights_filename = './logs/dqn_{}_weights_{}.h5f'.format(args.env_name, TEST_ID)
checkpoint_weights_filename = './logs/dqn_' + args.env_name + '_weights_{step}.h5f'
log_filename = './logs/{}dqn_{}_log_{}.json'.format('d-' if DOUBLE_DQN else '', args.env_name, TEST_ID)
callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=NB_STEPS/2)]
callbacks += [Logger(log_filename, environment=env, interval=100)]
dqn.fit(env, nb_steps=NB_STEPS, visualize=False, verbose=2, nb_max_episode_steps=None, callbacks=callbacks)
dqn.save_weights(weights_filename, overwrite=True)
dqn.test(env, nb_episodes=10, visualize=False)
elif args.mode == 'test':
weights_filename = './logs/dqn_{}_weights_{}.h5f'.format(args.env_name, TEST_ID)
if args.weights:
weights_filename = args.weights
dqn.load_weights(weights_filename)
dqn.test(env, nb_episodes=100, visualize=False)
print("****************************************"
" End of training {}-th "
"****************************************".format(TEST_ID))
| 45.682927 | 115 | 0.67165 |
a2fe2076a061b4411e718858d451c717a3acc756 | 318 | py | Python | Chapter01/displacy-save-as-image-1-4-5.py | indrasmartmob/Mastering-spaCy | 756876902eee8437d6d9ddcef2ba7ffabfc970a3 | [
"MIT"
] | 76 | 2021-07-07T14:32:42.000Z | 2022-03-27T17:15:15.000Z | Chapter01/displacy-save-as-image-1-4-5.py | indrasmartmob/Mastering-spaCy | 756876902eee8437d6d9ddcef2ba7ffabfc970a3 | [
"MIT"
] | 4 | 2021-08-18T18:08:23.000Z | 2022-03-27T03:30:27.000Z | Chapter01/displacy-save-as-image-1-4-5.py | indrasmartmob/Mastering-spaCy | 756876902eee8437d6d9ddcef2ba7ffabfc970a3 | [
"MIT"
] | 38 | 2021-07-09T22:23:38.000Z | 2022-03-12T07:11:37.000Z | #!/usr/bin/env python3
import spacy
from spacy import displacy
from pathlib import Path
nlp = spacy.load("en_core_web_md")
doc = nlp("I'm a butterfly.")
svg = displacy.render(doc, style="dep", jupyter=False)
filename = "butterfly.svg"
output_path = Path(filename)
output_path.open("w", encoding="utf-8").write(svg)
| 22.714286 | 54 | 0.735849 |
a2fe69feb718bafa1d3ea491a261e3b0356c764f | 3,485 | py | Python | mask_detector/opencv/camera_ver2.py | osamhack2021/AI_Mask_Detector | 1d71980bd7b7168a9d006f03325fb51783c7f877 | [
"MIT"
] | null | null | null | mask_detector/opencv/camera_ver2.py | osamhack2021/AI_Mask_Detector | 1d71980bd7b7168a9d006f03325fb51783c7f877 | [
"MIT"
] | null | null | null | mask_detector/opencv/camera_ver2.py | osamhack2021/AI_Mask_Detector | 1d71980bd7b7168a9d006f03325fb51783c7f877 | [
"MIT"
] | 1 | 2021-11-21T08:19:54.000Z | 2021-11-21T08:19:54.000Z | import cv2
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
model = "./AI_Mask_Detector/res10_300x300_ssd_iter_140000_fp16.caffemodel"
config = "./AI_Mask_Detector/deploy.prototxt"
# model = './AI_Mask_Detector/opencv_face_detector_uint8.pb'
# config = './AI_Mask_Detector/opencv_face_detector.pbtxt'
mask_model = tf.keras.models.load_model("./AI_Mask_Detector/model.h5")
probability_model = tf.keras.Sequential([mask_model])
width = 64
height = 64
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("./AI_Mask_Detector/demoVideo/test2.mp4")
if not cap.isOpened():
print("Camera open failed!")
exit()
net = cv2.dnn.readNet(model, config)
if net.empty():
print("Net open failed!")
exit()
categories = ["mask", "none"]
print("len(categories) = ", len(categories))
while True:
ret, frame = cap.read()
if ret:
img = cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB)
blob = cv2.dnn.blobFromImage(img, 1, (300, 300), (104, 177, 123))
net.setInput(blob)
detect = net.forward()
detect = detect[0, 0, :, :]
(h, w) = frame.shape[:2]
# print('--------------------------')
for i in range(detect.shape[0]):
confidence = detect[i, 2]
if confidence < 0.4:
break
x1 = int(detect[i, 3] * w)
y1 = int(detect[i, 4] * h)
x2 = int(detect[i, 5] * w)
y2 = int(detect[i, 6] * h)
# cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
margin = 0
face = img[y1 - margin : y2 + margin, x1 - margin : x2 + margin]
resize = cv2.resize(face, (width, height))
# print(x1, y1, x2, y2, width, height)
# cv2.imshow("frame1", resize)
# np_image_data = np.asarray(inp)
rgb_tensor = tf.convert_to_tensor(resize, dtype=tf.float32)
rgb_tensor /= 255.0
rgb_tensor = tf.expand_dims(rgb_tensor, 0)
#
predictions = probability_model.predict(rgb_tensor)
# print(categories[predictions[i][1]], ' ' , np.argmax(predictions[i]))
# lebel = categories[predictions[i]]
if predictions[0][0] > predictions[0][1]: # and predictions[0][0] > 0.7:
label = "Mask " + str(predictions[0][0])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
cv2.putText(
frame,
label,
(x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
cv2.LINE_AA,
)
if predictions[0][0] < predictions[0][1]: # and predictions[0][1] > 0.7:
label = "No Mask " + str(predictions[0][1])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))
cv2.putText(
frame,
label,
(x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255),
1,
cv2.LINE_AA,
)
# print(predictions[0][0], ' ', predictions[0][1])
cv2.imshow("frame", frame)
if cv2.waitKey(30) == 27:
break
else:
print("error")
cap.release()
cv2.destroyAllWindows()
| 29.786325 | 85 | 0.505595 |
a2ff595beb35cc3bf63e8eee3f852f028caee135 | 55,499 | py | Python | pipelines/head-pose-pipeline/training/models.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | pipelines/head-pose-pipeline/training/models.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | pipelines/head-pose-pipeline/training/models.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | import sys
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from capsulelayers import CapsuleLayer
from capsulelayers import MatMulLayer
from loupe_keras import NetVLAD
sys.setrecursionlimit(2**20)
np.random.seed(2**10)
# Custom layers
# Note - Usage of Lambda layers prevent the convertion
# and the optimizations by the underlying math engine (tensorflow in this case)
# Capsule FSANetworks
# NetVLAD models
# // Metric models
| 40.658608 | 131 | 0.568479 |
0c0064090948d111bf7fd540d7adcc81adb3d655 | 2,537 | py | Python | remijquerytools/__init__.py | kdahlhaus/remi-jquery-tools | 3ecc78d6a39edc7a77b89dd8ed08649f759b503a | [
"Apache-2.0"
] | null | null | null | remijquerytools/__init__.py | kdahlhaus/remi-jquery-tools | 3ecc78d6a39edc7a77b89dd8ed08649f759b503a | [
"Apache-2.0"
] | null | null | null | remijquerytools/__init__.py | kdahlhaus/remi-jquery-tools | 3ecc78d6a39edc7a77b89dd8ed08649f759b503a | [
"Apache-2.0"
] | null | null | null | import remi.gui as gui
import os
import logging
log = logging.getLogger('remi.gui.remijquerytools.overlay')
def get_res_path():
""" return addtion to 'res' path for items needed by this lib """
res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')
return res_path
| 32.948052 | 109 | 0.581395 |
0c01e08aaee863025867488824fa6692ef88b661 | 468 | py | Python | Python_Advanced_Softuni/Comprehensions_Exericises/venv/number_classification.py | borisboychev/SoftUni | 22062312f08e29a1d85377a6d41ef74966d37e99 | [
"MIT"
] | 1 | 2020-12-14T23:25:19.000Z | 2020-12-14T23:25:19.000Z | Python_Advanced_Softuni/Comprehensions_Exericises/venv/number_classification.py | borisboychev/SoftUni | 22062312f08e29a1d85377a6d41ef74966d37e99 | [
"MIT"
] | null | null | null | Python_Advanced_Softuni/Comprehensions_Exericises/venv/number_classification.py | borisboychev/SoftUni | 22062312f08e29a1d85377a6d41ef74966d37e99 | [
"MIT"
] | null | null | null | elements = [int(x) for x in input().split(', ')]
even_numbers = [x for x in elements if x % 2 == 0]
odd_numbers = [x for x in elements if x % 2 != 0]
positive = [x for x in elements if x >= 0]
negative = [x for x in elements if x < 0]
print(f"Positive: {', '.join(str(x) for x in positive)}")
print(f"Negative: {', '.join(str(x) for x in negative)}")
print(f"Even: {', '.join(str(x) for x in even_numbers)}")
print(f"Odd: {', '.join(str(x) for x in odd_numbers)}")
| 36 | 57 | 0.613248 |
0c02d2fcd975ca2fafbae393016b1ddc2ddcf6b5 | 2,048 | py | Python | src/probnum/type.py | ralfrost/probnum | 6b0988009a9dd7ecda87ba28c9d5c0b8019981b6 | [
"MIT"
] | null | null | null | src/probnum/type.py | ralfrost/probnum | 6b0988009a9dd7ecda87ba28c9d5c0b8019981b6 | [
"MIT"
] | 2 | 2020-12-28T19:37:16.000Z | 2020-12-28T19:37:31.000Z | src/probnum/type.py | admdev8/probnum | 792b6299bac247cf8b1b5056756f0f078855d83a | [
"MIT"
] | null | null | null | import numbers
from typing import Iterable, Tuple, Union
import numpy as np
########################################################################################
# API Types
########################################################################################
ShapeType = Tuple[int, ...]
RandomStateType = Union[np.random.RandomState, np.random.Generator]
"""Type of a random number generator."""
########################################################################################
# Argument Types
########################################################################################
IntArgType = Union[int, numbers.Integral, np.integer]
FloatArgType = Union[float, numbers.Real, np.floating]
ShapeArgType = Union[IntArgType, Iterable[IntArgType]]
"""Type of a public API argument for supplying a shape. Values of this type should
always be converted into :class:`ShapeType` using the function
:func:`probnum.utils.as_shape` before further internal processing."""
DTypeArgType = Union[np.dtype, str]
"""Type of a public API argument for supplying a dtype. Values of this type should
always be converted into :class:`np.dtype` using the function
:func:`np.dtype` before further internal processing."""
ScalarArgType = Union[int, float, complex, numbers.Number, np.float_]
"""Type of a public API argument for supplying a scalar value. Values of this type
should always be converted into :class:`np.generic` using the function
:func:`probnum.utils.as_scalar` before further internal processing."""
ArrayLikeGetitemArgType = Union[
int,
slice,
np.ndarray,
np.newaxis,
None,
type(Ellipsis),
Tuple[Union[int, slice, np.ndarray, np.newaxis, None, type(Ellipsis)], ...],
]
RandomStateArgType = Union[None, int, np.random.RandomState, np.random.Generator]
"""Type of a public API argument for supplying a random number generator. Values of this
type should always be converted into :class:`RandomStateType` using the function
:func:`probnum.utils.as_random_state` before further internal processing."""
| 40.156863 | 88 | 0.626953 |
0c03aa3f4a41bc42ddd522aaf547cfa062e47c23 | 12,279 | py | Python | src/socialprofile/views.py | DLRSP/django-sp | 9079358a4fc054f1a5afb056ccfd6a8b8afb36fa | [
"MIT"
] | 1 | 2022-01-11T07:25:17.000Z | 2022-01-11T07:25:17.000Z | src/socialprofile/views.py | DLRSP/django-sp | 9079358a4fc054f1a5afb056ccfd6a8b8afb36fa | [
"MIT"
] | 16 | 2021-12-20T01:30:34.000Z | 2022-03-31T01:38:59.000Z | src/socialprofile/views.py | DLRSP/django-sp | 9079358a4fc054f1a5afb056ccfd6a8b8afb36fa | [
"MIT"
] | null | null | null | """Django Views for the socialprofile module"""
import json
import logging
import sweetify
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.generic import DeleteView, TemplateView, UpdateView
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
from rest_framework import permissions, viewsets
from social_core.backends.oauth import BaseOAuth1, BaseOAuth2
from social_core.backends.utils import load_backends
from social_django.utils import psa
from .decorators import render_to
from .forms import SocialProfileForm
from .models import SocialProfile
# from .serializers import SocialProfileSerializer, GroupSerializer
from .serializers import SocialProfileSerializer
LOGGER = logging.getLogger(name="socialprofile.views")
DEFAULT_RETURNTO_PATH = getattr(settings, "DEFAULT_RETURNTO_PATH", "/")
# ViewSets define the view behavior.
# class GroupViewSet(viewsets.ModelViewSet):
# """Serialize Groups"""
# permission_classes = [permissions.IsAuthenticated, TokenHasScope]
# required_scopes = ['groups']
# queryset = Group.objects.all()
# serializer_class = GroupSerializer
def logout(request):
"""Logs out user"""
auth_logout(request)
return redirect("sp_select_page")
def context(**extra):
return dict(
{
# "plus_id": getattr(settings, "SOCIAL_AUTH_GOOGLE_PLUS_KEY", None),
# "plus_scope": " ".join(GooglePlusAuth.DEFAULT_SCOPE),
"available_backends": load_backends(settings.AUTHENTICATION_BACKENDS),
},
**extra,
)
class SelectAuthView(TemplateView):
"""
Lets users choose how they want to request access.
url: /select
"""
template_name = "socialprofile/sp_account_select.html"
def get_context_data(self, **kwargs):
"""Ensure that 'next' gets passed along"""
LOGGER.debug("socialprofile.views.SelectAuthView.get_context_data")
next_url = self.request.GET.get(REDIRECT_FIELD_NAME, DEFAULT_RETURNTO_PATH)
context = super().get_context_data(**kwargs)
context["next_param"] = REDIRECT_FIELD_NAME
context["next_url"] = next_url
# context["plus_id"] = getattr(settings, "SOCIAL_AUTH_GOOGLE_PLUS_KEY", None)
# context["plus_scope"] = " ".join(GooglePlusAuth.DEFAULT_SCOPE)
context["available_backends"] = load_backends(settings.AUTHENTICATION_BACKENDS)
return context
class SocialProfileWelcome(TemplateView):
"""
New Profile Page
url: /sp/new-profile
"""
template_name = "socialprofile/sp_new_profile.html"
# class SocialProfileView(DetailView):
| 33.186486 | 96 | 0.582784 |
0c042004c2d10428499c1e729e50d34d388b3eb9 | 519 | py | Python | sources/101_test.py | Painatalman/python101 | 9727ca03da46f81813fc2d338b8ba22fc0d8b78b | [
"Apache-2.0"
] | null | null | null | sources/101_test.py | Painatalman/python101 | 9727ca03da46f81813fc2d338b8ba22fc0d8b78b | [
"Apache-2.0"
] | null | null | null | sources/101_test.py | Painatalman/python101 | 9727ca03da46f81813fc2d338b8ba22fc0d8b78b | [
"Apache-2.0"
] | null | null | null | from fruits import validate_fruit
fruits = ["banana", "lemon", "apple", "orange", "batman"]
print fruits
list_fruits(fruits)
print fruits
| 22.565217 | 100 | 0.628131 |
0c04e662d416158f9b46ddaf7846e7bfe2b9fca2 | 3,439 | py | Python | tests/test_cms_config.py | Aiky30/djangocms-content-expiry | da7d348bcdafbf1a9862e4cc69a8363b3305a31a | [
"BSD-3-Clause"
] | null | null | null | tests/test_cms_config.py | Aiky30/djangocms-content-expiry | da7d348bcdafbf1a9862e4cc69a8363b3305a31a | [
"BSD-3-Clause"
] | 4 | 2021-09-27T10:15:13.000Z | 2021-11-23T17:18:04.000Z | tests/test_cms_config.py | Aiky30/djangocms-content-expiry | da7d348bcdafbf1a9862e4cc69a8363b3305a31a | [
"BSD-3-Clause"
] | 4 | 2021-09-06T20:13:45.000Z | 2021-10-02T15:00:58.000Z | from unittest.mock import Mock
from django.apps import apps
from django.contrib import admin
from django.test import RequestFactory, TestCase
from djangocms_moderation.cms_config import ModerationExtension
from djangocms_moderation.models import ModerationRequestTreeNode
from djangocms_content_expiry.cms_config import (
ContentExpiryAppConfig,
ContentExpiryExtension,
)
from djangocms_content_expiry.constants import CONTENT_EXPIRY_EXPIRE_FIELD_LABEL
| 40.458824 | 111 | 0.756325 |
0c0689f206c41c5e5d28c78e11446ccb008b17b1 | 4,466 | py | Python | tilequeue/format/OSciMap4/StaticVals/__init__.py | ducdk90/tilequeue | c664b5c89a9f0e6743405ab266aa9ca80b57806e | [
"MIT"
] | 29 | 2016-11-03T18:39:21.000Z | 2022-02-27T17:42:37.000Z | tilequeue/format/OSciMap4/StaticVals/__init__.py | ducdk90/tilequeue | c664b5c89a9f0e6743405ab266aa9ca80b57806e | [
"MIT"
] | 146 | 2016-07-07T16:41:07.000Z | 2021-12-11T00:27:20.000Z | tilequeue/format/OSciMap4/StaticVals/__init__.py | ducdk90/tilequeue | c664b5c89a9f0e6743405ab266aa9ca80b57806e | [
"MIT"
] | 28 | 2016-08-19T16:08:52.000Z | 2021-07-26T10:16:29.000Z | vals = {
"yes" : 0,
"residential" : 1,
"service" : 2,
"unclassified" : 3,
"stream" : 4,
"track" : 5,
"water" : 6,
"footway" : 7,
"tertiary" : 8,
"private" : 9,
"tree" : 10,
"path" : 11,
"forest" : 12,
"secondary" : 13,
"house" : 14,
"no" : 15,
"asphalt" : 16,
"wood" : 17,
"grass" : 18,
"paved" : 19,
"primary" : 20,
"unpaved" : 21,
"bus_stop" : 22,
"parking" : 23,
"parking_aisle" : 24,
"rail" : 25,
"driveway" : 26,
"8" : 27,
"administrative" : 28,
"locality" : 29,
"turning_circle" : 30,
"crossing" : 31,
"village" : 32,
"fence" : 33,
"grade2" : 34,
"coastline" : 35,
"grade3" : 36,
"farmland" : 37,
"hamlet" : 38,
"hut" : 39,
"meadow" : 40,
"wetland" : 41,
"cycleway" : 42,
"river" : 43,
"school" : 44,
"trunk" : 45,
"gravel" : 46,
"place_of_worship" : 47,
"farm" : 48,
"grade1" : 49,
"traffic_signals" : 50,
"wall" : 51,
"garage" : 52,
"gate" : 53,
"motorway" : 54,
"living_street" : 55,
"pitch" : 56,
"grade4" : 57,
"industrial" : 58,
"road" : 59,
"ground" : 60,
"scrub" : 61,
"motorway_link" : 62,
"steps" : 63,
"ditch" : 64,
"swimming_pool" : 65,
"grade5" : 66,
"park" : 67,
"apartments" : 68,
"restaurant" : 69,
"designated" : 70,
"bench" : 71,
"survey_point" : 72,
"pedestrian" : 73,
"hedge" : 74,
"reservoir" : 75,
"riverbank" : 76,
"alley" : 77,
"farmyard" : 78,
"peak" : 79,
"level_crossing" : 80,
"roof" : 81,
"dirt" : 82,
"drain" : 83,
"garages" : 84,
"entrance" : 85,
"street_lamp" : 86,
"deciduous" : 87,
"fuel" : 88,
"trunk_link" : 89,
"information" : 90,
"playground" : 91,
"supermarket" : 92,
"primary_link" : 93,
"concrete" : 94,
"mixed" : 95,
"permissive" : 96,
"orchard" : 97,
"grave_yard" : 98,
"canal" : 99,
"garden" : 100,
"spur" : 101,
"paving_stones" : 102,
"rock" : 103,
"bollard" : 104,
"convenience" : 105,
"cemetery" : 106,
"post_box" : 107,
"commercial" : 108,
"pier" : 109,
"bank" : 110,
"hotel" : 111,
"cliff" : 112,
"retail" : 113,
"construction" : 114,
"-1" : 115,
"fast_food" : 116,
"coniferous" : 117,
"cafe" : 118,
"6" : 119,
"kindergarten" : 120,
"tower" : 121,
"hospital" : 122,
"yard" : 123,
"sand" : 124,
"public_building" : 125,
"cobblestone" : 126,
"destination" : 127,
"island" : 128,
"abandoned" : 129,
"vineyard" : 130,
"recycling" : 131,
"agricultural" : 132,
"isolated_dwelling" : 133,
"pharmacy" : 134,
"post_office" : 135,
"motorway_junction" : 136,
"pub" : 137,
"allotments" : 138,
"dam" : 139,
"secondary_link" : 140,
"lift_gate" : 141,
"siding" : 142,
"stop" : 143,
"main" : 144,
"farm_auxiliary" : 145,
"quarry" : 146,
"10" : 147,
"station" : 148,
"platform" : 149,
"taxiway" : 150,
"limited" : 151,
"sports_centre" : 152,
"cutline" : 153,
"detached" : 154,
"storage_tank" : 155,
"basin" : 156,
"bicycle_parking" : 157,
"telephone" : 158,
"terrace" : 159,
"town" : 160,
"suburb" : 161,
"bus" : 162,
"compacted" : 163,
"toilets" : 164,
"heath" : 165,
"works" : 166,
"tram" : 167,
"beach" : 168,
"culvert" : 169,
"fire_station" : 170,
"recreation_ground" : 171,
"bakery" : 172,
"police" : 173,
"atm" : 174,
"clothes" : 175,
"tertiary_link" : 176,
"waste_basket" : 177,
"attraction" : 178,
"viewpoint" : 179,
"bicycle" : 180,
"church" : 181,
"shelter" : 182,
"drinking_water" : 183,
"marsh" : 184,
"picnic_site" : 185,
"hairdresser" : 186,
"bridleway" : 187,
"retaining_wall" : 188,
"buffer_stop" : 189,
"nature_reserve" : 190,
"village_green" : 191,
"university" : 192,
"1" : 193,
"bar" : 194,
"townhall" : 195,
"mini_roundabout" : 196,
"camp_site" : 197,
"aerodrome" : 198,
"stile" : 199,
"9" : 200,
"car_repair" : 201,
"parking_space" : 202,
"library" : 203,
"pipeline" : 204,
"true" : 205,
"cycle_barrier" : 206,
"4" : 207,
"museum" : 208,
"spring" : 209,
"hunting_stand" : 210,
"disused" : 211,
"car" : 212,
"tram_stop" : 213,
"land" : 214,
"fountain" : 215,
"hiking" : 216,
"manufacture" : 217,
"vending_machine" : 218,
"kiosk" : 219,
"swamp" : 220,
"unknown" : 221,
"7" : 222,
"islet" : 223,
"shed" : 224,
"switch" : 225,
"rapids" : 226,
"office" : 227,
"bay" : 228,
"proposed" : 229,
"common" : 230,
"weir" : 231,
"grassland" : 232,
"customers" : 233,
"social_facility" : 234,
"hangar" : 235,
"doctors" : 236,
"stadium" : 237,
"give_way" : 238,
"greenhouse" : 239,
"guest_house" : 240,
"viaduct" : 241,
"doityourself" : 242,
"runway" : 243,
"bus_station" : 244,
"water_tower" : 245,
"golf_course" : 246,
"conservation" : 247,
"block" : 248,
"college" : 249,
"wastewater_plant" : 250,
"subway" : 251,
"halt" : 252,
"forestry" : 253,
"florist" : 254,
"butcher" : 255}
| 17.111111 | 26 | 0.59382 |