hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a1066f50cc73cde3a7e5f65b4cffbe41ddedc46 | 575 | py | Python | 2020/04/Teil 1 - V01.py | HeWeMel/adventofcode | 90acb10f03f21ef388673bbcf132d04972175970 | [
"MIT"
] | 1 | 2020-12-12T19:34:59.000Z | 2020-12-12T19:34:59.000Z | 2020/04/Teil 1 - V01.py | HeWeMel/adventofcode | 90acb10f03f21ef388673bbcf132d04972175970 | [
"MIT"
] | null | null | null | 2020/04/Teil 1 - V01.py | HeWeMel/adventofcode | 90acb10f03f21ef388673bbcf132d04972175970 | [
"MIT"
] | null | null | null | import sys
lines=[]
new=True
lc=0
with open('input.txt', 'r') as f:
for line in f:
line=line[:-1] # remove new line char
if line=='':
lc+=1
new=True
else:
if new:
lines.append(line)
new=False
else:
lines[lc] = lines[lc] + " " + line
valids=0
for line in lines:
valid=True
for must in ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']:
if line.find(must)==-1:
valid=False
if valid:
valids += 1
print(valids)
| 19.166667 | 66 | 0.452174 |
5a10f64e9ccb60f772e7fb4e5d093560ebd8cdb4 | 9,366 | py | Python | src/falconpy/quick_scan.py | CrowdStrike/falconpy | e7245202224647a2c8d134e72f27d2f6c667a1ce | [
"Unlicense"
] | 111 | 2020-11-19T00:44:18.000Z | 2022-03-03T21:02:32.000Z | src/falconpy/quick_scan.py | CrowdStrike/falconpy | e7245202224647a2c8d134e72f27d2f6c667a1ce | [
"Unlicense"
] | 227 | 2020-12-05T03:02:27.000Z | 2022-03-22T14:12:42.000Z | src/falconpy/quick_scan.py | CrowdStrike/falconpy | e7245202224647a2c8d134e72f27d2f6c667a1ce | [
"Unlicense"
] | 47 | 2020-11-23T21:00:14.000Z | 2022-03-28T18:30:19.000Z | """Falcon Quick Scan API Interface Class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import force_default, process_service_request, handle_single_argument
from ._payload import generic_payload_list, aggregate_payload
from ._service_class import ServiceClass
from ._endpoint._quick_scan import _quick_scan_endpoints as Endpoints
# The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Quick_Scan = QuickScan # pylint: disable=C0103
| 40.025641 | 105 | 0.604527 |
5a118345944c61aa57f158d2bab247572f49c59f | 353 | py | Python | images/auth-service/settings.d/00-settings.py | ESGF/esgf-docker | 95f5b76c85be65920810795484786a13865f4ac1 | [
"Apache-2.0"
] | 3 | 2018-04-16T00:58:30.000Z | 2020-10-07T17:58:02.000Z | images/auth-service/settings.d/00-settings.py | ESGF/esgf-docker | 95f5b76c85be65920810795484786a13865f4ac1 | [
"Apache-2.0"
] | 115 | 2017-01-10T20:12:42.000Z | 2021-03-03T16:11:48.000Z | images/auth-service/settings.d/00-settings.py | ESGF/esgf-docker | 95f5b76c85be65920810795484786a13865f4ac1 | [
"Apache-2.0"
] | 21 | 2017-08-28T15:20:24.000Z | 2021-02-09T00:08:49.000Z | # Application definition
INSTALLED_APPS = [
'django.contrib.staticfiles',
'django.contrib.sessions',
'authenticate',
]
ROOT_URLCONF = 'auth_service.urls'
WSGI_APPLICATION = 'auth_service.wsgi.application'
# Use a non database session engine
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_SECURE = False
| 23.533333 | 66 | 0.776204 |
5a12d4be2ea76f2966c05949af40280a754ab4f5 | 3,641 | py | Python | tests/test_gru.py | nsuke/hyrnn | b3efcc7b004d8402344467bf319f1d86324d11e5 | [
"Apache-2.0"
] | 73 | 2019-04-08T08:17:39.000Z | 2022-03-29T03:48:07.000Z | tests/test_gru.py | nsuke/hyrnn | b3efcc7b004d8402344467bf319f1d86324d11e5 | [
"Apache-2.0"
] | 10 | 2019-03-19T04:24:07.000Z | 2021-02-25T00:19:24.000Z | tests/test_gru.py | nsuke/hyrnn | b3efcc7b004d8402344467bf319f1d86324d11e5 | [
"Apache-2.0"
] | 14 | 2019-05-06T09:42:37.000Z | 2021-07-17T17:18:05.000Z | import hyrnn
import torch.nn
| 31.938596 | 84 | 0.651744 |
5a12f3dfdcd98b07c6a9e2f6f164d8d44b462308 | 1,190 | py | Python | ecogvis/signal_processing/common_referencing.py | NKI-ECOG/ecogVIS | f65212fc238e5b2588a4674a6aa1236f99e7d833 | [
"BSD-3-Clause"
] | 13 | 2020-04-01T22:39:24.000Z | 2022-03-04T13:27:51.000Z | ecogvis/signal_processing/common_referencing.py | NKI-ECOG/ecogVIS | f65212fc238e5b2588a4674a6aa1236f99e7d833 | [
"BSD-3-Clause"
] | 56 | 2020-04-01T14:27:21.000Z | 2022-03-23T21:33:06.000Z | ecogvis/signal_processing/common_referencing.py | luiztauffer/ecogVIS | c97e79a20b3af1074a3a5e1f1ad864a580c97e04 | [
"BSD-3-Clause"
] | 11 | 2020-05-15T17:48:53.000Z | 2022-02-01T23:55:12.000Z | from __future__ import division
import numpy as np
__all__ = ['subtract_CAR',
'subtract_common_median_reference']
def subtract_CAR(X, b_size=16):
"""
Compute and subtract common average reference in 16 channel blocks.
"""
channels, time_points = X.shape
s = channels // b_size
r = channels % b_size
X_1 = X[:channels-r].copy()
X_1 = X_1.reshape((s, b_size, time_points))
X_1 -= np.nanmean(X_1, axis=1, keepdims=True)
if r > 0:
X_2 = X[channels-r:].copy()
X_2 -= np.nanmean(X_2, axis=0, keepdims=True)
X = np.vstack([X_1.reshape((s*b_size, time_points)), X_2])
return X
else:
return X_1.reshape((s*b_size, time_points))
def subtract_common_median_reference(X, channel_axis=-2):
"""
Compute and subtract common median reference
for the entire grid.
Parameters
----------
X : ndarray (..., n_channels, n_time)
Data to common median reference.
Returns
-------
Xp : ndarray (..., n_channels, n_time)
Common median referenced data.
"""
median = np.nanmedian(X, axis=channel_axis, keepdims=True)
Xp = X - median
return Xp
| 23.8 | 71 | 0.616807 |
5a13150c841953f716f3e772e7c48bc269734ed8 | 3,701 | py | Python | rackspace/heat_store/catalog/tests.py | rohithkumar-rackspace/rcbops | fb690bc528499bbf9aebba3ab0cce0b4dffd9e35 | [
"Apache-2.0"
] | null | null | null | rackspace/heat_store/catalog/tests.py | rohithkumar-rackspace/rcbops | fb690bc528499bbf9aebba3ab0cce0b4dffd9e35 | [
"Apache-2.0"
] | null | null | null | rackspace/heat_store/catalog/tests.py | rohithkumar-rackspace/rcbops | fb690bc528499bbf9aebba3ab0cce0b4dffd9e35 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import unittest
import mox
import six.moves.urllib.request as urlrequest
from six import StringIO
from solution import Solution
if __name__ == '__main__':
unittest.main()
| 39.795699 | 80 | 0.599838 |
5a13d8b3614f878639aab1f5c25f37f50a754ad3 | 17 | py | Python | tests/errors/semantic/ex4.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | tests/errors/semantic/ex4.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | tests/errors/semantic/ex4.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | x is 1
y is None
| 5.666667 | 9 | 0.647059 |
5a15278549975dbd09dff5e97bcd011523d42479 | 4,784 | py | Python | GetTopK.py | unsuthee/SemanticHashingWeekSupervision | 2b2498c70ad3184203855222efde861211edcaea | [
"MIT"
] | 19 | 2018-10-30T08:36:49.000Z | 2020-09-11T08:08:47.000Z | GetTopK.py | unsuthee/SemanticHashingWeekSupervision | 2b2498c70ad3184203855222efde861211edcaea | [
"MIT"
] | 1 | 2019-10-12T07:03:06.000Z | 2020-03-08T09:22:00.000Z | GetTopK.py | unsuthee/SemanticHashingWeekSupervision | 2b2498c70ad3184203855222efde861211edcaea | [
"MIT"
] | 6 | 2018-09-05T09:07:34.000Z | 2020-04-07T16:58:08.000Z | ################################################################################################################
# Author: Suthee Chaidaroon
# schaidaroon@scu.edu
################################################################################################################
import numpy as np
import os
from utils import *
from tqdm import tqdm
import scipy.io
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
#################################################################################################################
#################################################################################################################
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpunum")
parser.add_argument("--dataset")
parser.add_argument("--usetrain", action='store_true')
args = parser.parse_args()
if args.gpunum:
print("Use GPU #:{}".format(args.gpunum))
gpunum = args.gpunum
else:
print("Use GPU #0 as a default gpu")
gpunum = "0"
os.environ["CUDA_VISIBLE_DEVICES"]=gpunum
if args.dataset:
print("load {} dataset".format(args.dataset))
dataset = args.dataset
else:
parser.error("Need to provide the dataset.")
data = Load_Dataset("data/ng20.mat")
print("num train:{} num tests:{}".format(data.n_trains, data.n_tests))
if args.usetrain:
print("use train as a query corpus")
query_corpus = data.train
out_fn = "bm25/{}_train_top101.txt".format(dataset)
else:
print("use test as a query corpus")
query_corpus = data.test
out_fn = "bm25/{}_test_top101.txt".format(dataset)
print("save the result to {}".format(out_fn))
GetTopK_UsingCosineSim(out_fn, query_corpus, data.train, TopK=101, queryBatchSize=500, docBatchSize=100) | 36.519084 | 114 | 0.567517 |
5a16e96d11bf3bbabd290d8e7eb17ada9e705ea1 | 1,065 | py | Python | migrate_from_fnordcredit.py | stratum0/Matekasse | 9b48a8a07978a150e1df1b13b394791044cce82e | [
"MIT"
] | 1 | 2019-07-13T16:25:06.000Z | 2019-07-13T16:25:06.000Z | migrate_from_fnordcredit.py | stratum0/Matekasse | 9b48a8a07978a150e1df1b13b394791044cce82e | [
"MIT"
] | 10 | 2020-01-09T16:14:19.000Z | 2021-03-07T17:04:30.000Z | migrate_from_fnordcredit.py | stratum0/Matekasse | 9b48a8a07978a150e1df1b13b394791044cce82e | [
"MIT"
] | 1 | 2021-06-01T07:21:03.000Z | 2021-06-01T07:21:03.000Z | from matekasse import create_app, db
from matekasse.models import User, Transaction
import sqlite3
import argparse
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("-p", "--path", action='store', type=str, required=True, help="Path to fnordcredit database")
inp = parser.parse_args()
app = create_app()
ctx = app.app_context()
ctx.push()
try:
conn = sqlite3.connect(inp.path)
cursor = conn.cursor()
cursor.execute('SELECT * FROM user')
rows = cursor.fetchall()
for r in rows:
user = r[5]
credit = r[1] * 100
newuser = User(username=user, credit=credit)
db.session.add(newuser)
'''cursor.execute('SELECT * FROM transaction')
rows = cursor.fetchall()
for r in rows:
user = r[5]
trans = r[2] * 100
newtrans = Transaction(userid=user, credit=trans)
db.session.add(newtrans)'''
db.session.commit()
except sqlite3.Error as error:
print(error)
finally:
if conn:
conn.close()
print('Migration complete')
ctx.pop()
exit()
| 25.97561 | 113 | 0.650704 |
5a17b8b4d053d2409ae3602977dee83dcbebc0b2 | 4,340 | py | Python | scripts/lc/ARES/testing/run_rose_tool.py | ouankou/rose | 76f2a004bd6d8036bc24be2c566a14e33ba4f825 | [
"BSD-3-Clause"
] | 488 | 2015-01-09T08:54:48.000Z | 2022-03-30T07:15:46.000Z | scripts/lc/ARES/testing/run_rose_tool.py | ouankou/rose | 76f2a004bd6d8036bc24be2c566a14e33ba4f825 | [
"BSD-3-Clause"
] | 174 | 2015-01-28T18:41:32.000Z | 2022-03-31T16:51:05.000Z | scripts/lc/ARES/testing/run_rose_tool.py | ouankou/rose | 76f2a004bd6d8036bc24be2c566a14e33ba4f825 | [
"BSD-3-Clause"
] | 146 | 2015-04-27T02:48:34.000Z | 2022-03-04T07:32:53.000Z | #!/usr/bin/env python
"""Runs a ROSE tool. If the tool does not return status 0, then runs the
corresponding non-ROSE compiler. Records whether the tool succeeded, in
passed.txt and failed.txt, but always returns status 0.
"""
import argparse
import inspect
import os
from support.local_logging import Logger
from support.runner import Runner
_SEPARATOR = "================================================================================"
if __name__ == '__main__':
main()
| 41.333333 | 118 | 0.653917 |
5a18641e63b3fcad6914df89d4ba92c48cbaed17 | 951 | py | Python | source/odp/migrations/0003_auto_20201121_0919.py | kssvrk/BhoonidhiODP | e222087629250ea4ccd1ae8d8903d9ff400c13b4 | [
"BSD-3-Clause"
] | null | null | null | source/odp/migrations/0003_auto_20201121_0919.py | kssvrk/BhoonidhiODP | e222087629250ea4ccd1ae8d8903d9ff400c13b4 | [
"BSD-3-Clause"
] | null | null | null | source/odp/migrations/0003_auto_20201121_0919.py | kssvrk/BhoonidhiODP | e222087629250ea4ccd1ae8d8903d9ff400c13b4 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-21 09:19
from django.db import migrations, models
| 32.793103 | 114 | 0.59306 |
5a18bfbbcf6c30bc2b6197bebec5c6f5638d264b | 935 | py | Python | test/auth/test_client_credentials.py | membranepotential/mendeley-python-sdk | 0336f0164f4d409309e813cbd0140011b5b2ff8f | [
"Apache-2.0"
] | 103 | 2015-01-12T00:40:51.000Z | 2022-03-29T07:02:06.000Z | test/auth/test_client_credentials.py | membranepotential/mendeley-python-sdk | 0336f0164f4d409309e813cbd0140011b5b2ff8f | [
"Apache-2.0"
] | 26 | 2015-01-10T04:08:41.000Z | 2021-02-05T16:31:37.000Z | test/auth/test_client_credentials.py | membranepotential/mendeley-python-sdk | 0336f0164f4d409309e813cbd0140011b5b2ff8f | [
"Apache-2.0"
] | 43 | 2015-03-04T18:11:06.000Z | 2022-03-13T02:33:34.000Z | from oauthlib.oauth2 import InvalidClientError, MissingTokenError
import pytest
from test import configure_mendeley, cassette
| 34.62963 | 115 | 0.764706 |
5a18ed1bc8e6b13c94274ea7e8252580407f9a6b | 338 | py | Python | problem/01000~09999/06137/6137.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/06137/6137.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/06137/6137.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | s=[]
for i in range(int(input())):
s.append(input())
cnt=0
while s:
flag=True
for i in range(len(s)//2):
if s[i]<s[-(i+1)]:
print(s[0],end='')
s.pop(0)
flag=False
break
elif s[-(i+1)]<s[i]:
print(s[-1],end='')
s.pop()
flag=False
break
if flag:
print(s[-1],end='')
s.pop()
cnt+=1
if cnt%80==0: print() | 15.363636 | 29 | 0.52071 |
5a1bae372e9a9d499e2d0814cd4b789a6fdb51ad | 2,072 | py | Python | test/test_thirty.py | jakubtuchol/dailycodingproblem | 9f0f3193f1746e949e16febace5aa5622dc5d4dc | [
"MIT"
] | 1 | 2020-10-13T20:54:37.000Z | 2020-10-13T20:54:37.000Z | test/test_thirty.py | jakubtuchol/dailycodingproblem | 9f0f3193f1746e949e16febace5aa5622dc5d4dc | [
"MIT"
] | null | null | null | test/test_thirty.py | jakubtuchol/dailycodingproblem | 9f0f3193f1746e949e16febace5aa5622dc5d4dc | [
"MIT"
] | null | null | null | from src.thirty import edit_distance
from src.thirty import find_second_largest_node
from src.thirty import make_palindrome
from src.thirty import powerset
from src.data_structures import BinaryNode
| 22.769231 | 55 | 0.605695 |
5a1be255168c22e03a6a98004add6394315035a9 | 3,947 | py | Python | src/google_music_proto/musicmanager/utils.py | ddboline/google-music-proto | d3af3a1fe911edcd083482c9a6e8bde5a2902462 | [
"MIT"
] | null | null | null | src/google_music_proto/musicmanager/utils.py | ddboline/google-music-proto | d3af3a1fe911edcd083482c9a6e8bde5a2902462 | [
"MIT"
] | null | null | null | src/google_music_proto/musicmanager/utils.py | ddboline/google-music-proto | d3af3a1fe911edcd083482c9a6e8bde5a2902462 | [
"MIT"
] | null | null | null | __all__ = [
'generate_client_id',
'get_album_art',
'get_transcoder',
'transcode_to_mp3',
]
import os
import shutil
import subprocess
from base64 import b64encode
from binascii import unhexlify
from hashlib import md5
import audio_metadata
# The id is found by: getting md5sum of audio, base64 encode md5sum, removing trailing '='.
def get_transcoder():
"""Return the path to a transcoder (ffmpeg or avconv) with MP3 support."""
transcoders = ['ffmpeg', 'avconv']
transcoder_details = {}
for transcoder in transcoders:
command_path = shutil.which(transcoder)
if command_path is None:
transcoder_details[transcoder] = 'Not installed.'
continue
stdout = subprocess.run(
[command_path, '-codecs'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
).stdout
mp3_encoding_support = (
'libmp3lame' in stdout
and 'disable-libmp3lame' not in stdout
)
if mp3_encoding_support:
transcoder_details[transcoder] = "MP3 encoding support."
break
else:
transcoder_details[transcoder] = "No MP3 encoding support."
else:
raise ValueError(
f"ffmpeg or avconv must be in the path and support mp3 encoding."
"\nDetails: {transcoder_details}"
)
return command_path
| 23.777108 | 91 | 0.701292 |
5a1c4a115bd07e61146d8a14b7bb3639da60f1ea | 8,731 | py | Python | Other/SocialNetwork/Solver.py | lesyk/Evolife | 8e3dd1aab84061f7ce082f3a4b1bac0b2e31bc4a | [
"MIT"
] | null | null | null | Other/SocialNetwork/Solver.py | lesyk/Evolife | 8e3dd1aab84061f7ce082f3a4b1bac0b2e31bc4a | [
"MIT"
] | null | null | null | Other/SocialNetwork/Solver.py | lesyk/Evolife | 8e3dd1aab84061f7ce082f3a4b1bac0b2e31bc4a | [
"MIT"
] | null | null | null | ## {{{ http://code.activestate.com/recipes/303396/ (r1)
'''equation solver using attributes and introspection'''
from __future__ import division
## end of http://code.activestate.com/recipes/303396/ }}}
######### Example ############
##from math import cos
##
##def toto(x,A):
## return A-cos(x)
##
##T = Solver(toto)
##T.A = 0
##print 2 * T.x
######### Fin Example ############
def competence(BottomCompetence, Quality):
return BottomCompetence + (1-BottomCompetence)*Quality
def Profit(b, K, friendQuality, r, NFriends):
Risk = 1
for f in range(NFriends):
Risk *= (1 - K * r**f * competence(b, friendQuality))
return 1 - Risk
def IntegralProfit(b, K, friendQuality, r, NFriends):
Sum = 0
for FQ in range(int(friendQuality * 100.01)):
Sum += Profit(b, K, FQ/100.1, r, NFriends)
return Sum / 100.01
def CompetitiveSignal(b, K, q, r, NFriends, cost):
profit = Profit(b, K, q, r, NFriends)
integralProfit = IntegralProfit(b, K, q, r, NFriends)
return (competence(b, q) * profit - (1-b) * ralProfit) / cost
def CompetitiveSignal1FriendsB0(K, q, cost):
# special case 1 friend and no bottom competence
return K*q**2/(2*cost)
def CompetitiveSignal2FriendsB0(K, q, r, cost):
# special case 2 friends and no bottom competence
return (-2*K**2*r*q**3/(3*cost)+K*q**2*(1+r)/(2*cost))
def CompetitiveSignal3Friends(b, q, r, cost):
# special case 3 friends
return (1-b)*((1+r+r**2)*(b*q+(1-b)*q**2/2) - 2*r*(1+r+r**2)*(b**2*q + (1-b)**2*q**3/3 +2*b*(1-b)*q**2/2) + 3*r**3 *(b**3*q + 3*b**2*(1-b)*q**2/2 + 3*b*(1-b)**2*q**3/3 + (1-b)**3*q**4/4) )/cost
def CompetitiveSignal3FriendsB0(q, r, cost):
# special case 3 friends and no bottom competence
return (1+r+r**2) *q**2/(2*cost) - 2*r*(1+r+r**2) *q**3/(3*cost) + 3*r**3* q**4/(4*cost)
def Equil2Friends(b, K, C, eta, r, deltag):
# for two friends
ro = 0
S_eta = competence(b, eta)
S_tau = competence(b,(1+eta)/2)
bh = K * S_tau *(1+r) - K**2 * r * S_tau**2 - C * deltag
bl = (1 - (1 - (1-ro)*K*S_tau - ro * K * S_eta)*(1 - K*r*(1-ro)*S_tau - ro*K*r*S_eta)) + C * deltag
# return bh-bl
#return (1-S_eta)*(1 + r/2 - 3*r*S_eta/2) - 4 * C * deltag
return K*(1-b)*(1-eta)*(1+r-K*r*(2*b + (1-b)*(3*eta+1)/2))/4 - C*deltag
def EquilManyFriends(b, K, C, eta, r, deltag, NF):
S_eta = competence(b, eta)
S_tau = competence(b,(1+eta)/2)
return Profit(b,K,S_tau,r,NF) - Profit(b,K,S_eta,r,NF) - 2*C*deltag
def SubEquilManyFriends(b, K, C, eta, tau, theta, r, deltag, NF):
S_eta = competence(b, eta)
S_tau = competence(b, tau)
SubMiddle = competence(b, (theta+eta)/2)
## return Profit(b,K,S_tau,r,NF) - Profit(b,K,SubMiddle,r,NF) - 2*C*deltag / S_eta
return Profit(b,K,S_tau,r,NF) - Profit(b,K,SubMiddle,r,NF) - 2*C*deltag
def UniformSignalB0(K, C, eta, r, deltag, NF):
b = 0 # otherwise false
S_eta = competence(b, eta)
S_tau = competence(b,(1+eta)/2)
Pu = (Profit(b,K,S_tau,r,NF) + Profit(b,K,S_eta,r,NF)) /2
Pc = IntegralProfit(b, K, eta, r, NF)
return ( S_eta * Pu - Pc ) / C
def UniformBenefitB0(K, C, eta, theta, r, deltag, NF, sm, ro):
b = 0 # otherwise false
S_eta = competence(b, eta)
S_theta = competence(b, theta)
S_tau = competence(b,(1+eta)/2)
Ptau = (1 + ro + ro*ro/4)/2
#Ptau = (1 + ro)/2
return Ptau * Profit(b,K,S_tau,r,NF) + (1-Ptau) * Profit(b,K,theta,r,NF) - (C*sm+ro*deltag)/S_theta
def DiffBenefitB0(K, C, eta, theta, r, deltag, NF, sm, ro):
S_theta = competence(b, theta)
return UniformBenefitB0(K, C, eta, theta, r, deltag, NF, sm, ro) \
- IntegralProfit(b, K, theta, r, NF)/S_theta
Equil = Solver(EquilManyFriends)
#Equil = Solver(Equil2Friends)
Equil.deltag = deltag = 1.2 * 0.11 # Learning noise
Equil.b = b = 0.0 # BottomCompetence
Equil.K = K = 1.0
Equil.r = r = 0.6 # RankEffect
Equil.NF = NF = 2 # Number of friends
Equil.C = C = 0.6 # Cost
#Equil.ro = 0.0 # shift from uniform signal
Equil.eta = 0.1 # threshold for uniform signal (initialization)
ETA = Equil.eta
OffSet = 0
print Equil
#print '%d\t%d' % (int(round(100* ETA)),(100*CompetitiveSignal(b,K,ETA, r, NF, C)))
#print 'Eta: %d\t competitive signal in Eta: %d' % (int(round(100* ETA)),(100*CompetitiveSignal3FriendsB0(ETA, r, C))),
print 'Eta: %d\t competitive signal in Eta: %d' % (int(round(100* ETA)),(100*CompetitiveSignal2FriendsB0(1,ETA, r, C))),
# sm = CompetitiveSignal3FriendsB0(ETA, r, C)
sm = UniformSignalB0(K, C, ETA, r, deltag, NF)
print 'sm: %d' % (100 * sm),
SubEquil = Solver(SubEquilManyFriends)
SubEquil.deltag = deltag # Learning noise
SubEquil.b = b # BottomCompetence
SubEquil.K = K
SubEquil.r = r # RankEffect
SubEquil.NF = NF # Number of friends
SubEquil.C = C # Cost
SubEquil.eta = ETA
SubEquil.tau = ETA
SubEquil.theta = 0.1 # initialization
THETA = SubEquil.theta
print 'Theta: %d' % (100 * THETA),
### 2nd iteration
##SubEquil.eta = THETA
##SubEquil.theta = 0.1 # initialization
##THTHETA = SubEquil.theta
##print 'ThTheta: %d' % (100 * THTHETA),
print
#print SubEquil
"""
for q in range(1,int(round(100* THETA)),1):
# print CompetitiveSignal3Friends(0,q/100.0,0.6,0.6),
print "%01.1f\t" % (100 * CompetitiveSignal1FriendsB0(K,q/100.0,C)),
#print "%01.1f\t" % (10000 * CompetitiveSignal3FriendsB0(q/100.0,r,C)/q),
for q in range(int(round(100* THETA)),101,1):
print "%01.1f\t" % (100 * sm),
#print "%01.1f\t" % (10000 * sm/q),
print
"""
##for q in range(1,int(round(100* ETA)),1):
## print "%01.2f\t" % (100 * IntegralProfit(b,K,q/100.0,r,NF)),
##print
##for ro in range(-190, 190, 1):
## Equil.ro = ro/100.0
## Equil.eta = 0.01
## ETA = Equil.eta
## print '%d\t' % (int(round(100* ETA))),
##print
##for dtheta in range(-5, 10):
## theta = ETA - dtheta / 100.0
## print theta
## for ro in range(-5,5,1):
## print "%01.1f\t" % (100 * DiffBenefitB0(K, C, ETA, theta, r, deltag, NF, sm, ro/100.0)),
## print
#print "%01.1f\t" % (100 * DiffBenefitB0(K, C, ETA, ETA, r, deltag, NF, sm, 0.0))
__author__ = 'Dessalles'
| 31.293907 | 195 | 0.611499 |
5a1c596e4aa4f0daea1821382fed5edc2f1a2f2c | 15,459 | py | Python | server/graph.py | Alpacron/vertex-cover | cfdace128f1578f9613e30990b9a87cc64ffb988 | [
"MIT"
] | null | null | null | server/graph.py | Alpacron/vertex-cover | cfdace128f1578f9613e30990b9a87cc64ffb988 | [
"MIT"
] | 15 | 2021-04-03T08:28:58.000Z | 2021-06-07T15:08:08.000Z | server/graph.py | Alpacron/vertex-cover | cfdace128f1578f9613e30990b9a87cc64ffb988 | [
"MIT"
] | 1 | 2021-05-21T13:16:51.000Z | 2021-05-21T13:16:51.000Z | import json
import random
| 37.982801 | 113 | 0.559545 |
5a1caac07eb9f441668b6c4d0592a3fd8fa4aefc | 576 | py | Python | ex4.py | AyeAyeZin/python_exercises | 77079dcd7809dd2967180ffd30df0166dd53edb4 | [
"MIT"
] | null | null | null | ex4.py | AyeAyeZin/python_exercises | 77079dcd7809dd2967180ffd30df0166dd53edb4 | [
"MIT"
] | null | null | null | ex4.py | AyeAyeZin/python_exercises | 77079dcd7809dd2967180ffd30df0166dd53edb4 | [
"MIT"
] | null | null | null | cars=100
cars_in_space=5
drivers=20
pasengers=70
car_not_driven=cars-drivers
cars_driven=drivers
carpool_capacity=cars_driven*space_in_a_car
average_passengers_percar=passengers/cars_driven
print("There are", cars,"cars availble")
print("There are only",drivers,"drivers availble")
print("There will be",car_not_driven,"empty cars today")
print("There are",cars_in_space,"space availble in car")
print("We can transport",carpool_capacity,"peopletoday.")
print("We have", passengers,"to carpool today.")
print("We need to put about", average_passengers_per_car,"in each car.")
| 36 | 72 | 0.805556 |
5a1cb185553a265ef90a6017854334865e3cc339 | 304 | py | Python | python_docs/05Functions/01Definition.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | python_docs/05Functions/01Definition.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | python_docs/05Functions/01Definition.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | # Definio de funo
# Programa principal
a = int(input('Digite um valor para A: '))
b = int(input('Digite um valor para B: '))
soma(a, b)
| 25.333333 | 63 | 0.582237 |
5a1d0c02ec27af98a78a24a6f4e896b2268b6a0f | 852 | py | Python | python/number.py | Dahercode/datalumni-test | 9587400bddafd1c32e97655727c5d3dbbfd17574 | [
"MIT"
] | 1 | 2020-02-18T16:56:38.000Z | 2020-02-18T16:56:38.000Z | python/number.py | Dahercode/datalumni-test | 9587400bddafd1c32e97655727c5d3dbbfd17574 | [
"MIT"
] | null | null | null | python/number.py | Dahercode/datalumni-test | 9587400bddafd1c32e97655727c5d3dbbfd17574 | [
"MIT"
] | null | null | null | # Your code goes here
tab=[]
for i in range(1000) :
tab.append(i)
tab2=[]
for i in range(len(tab)):
if sum([ int(c) for c in str(tab[i]) ]) <= 10:
tab2.append(tab[i])
tab3=[]
for i in range(len(tab2)):
a=str(tab2[i])
if a[len(a)-2] == '4':
tab3.append(tab2[i])
tab4=[]
for i in range(len(tab3)):
if len(str(tab3[i]))>=2 :
tab4.append(tab3[i])
tab5=[]
for i in range(len(tab4)):
a=str(tab4[i])
if a.find('7')==-1 and a.find('1')==-1:
tab5.append(tab4[i])
tab6=[]
for i in range(len(tab5)):
a=str(tab5[i])
if ((int(a[0])+int(a[1]))%2) != 0:
tab6.append(tab5[i])
tab7=[]
for i in range(len(tab6)):
a=str(tab6[i])
if str(len(a)) == a[len(a)-1]:
tab7.append(tab6[i])
print(tab7)
mystery_number=tab7[0]
print(f'Le nombre mystre est le : {mystery_number}')
| 18.933333 | 53 | 0.543427 |
5a1d385aaac2b104c89e97a052215f1dccd44141 | 3,885 | py | Python | backend/src/baserow/contrib/database/migrations/0016_token_tokenpermission.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/src/baserow/contrib/database/migrations/0016_token_tokenpermission.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/src/baserow/contrib/database/migrations/0016_token_tokenpermission.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.11 on 2020-10-23 08:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 32.647059 | 88 | 0.344916 |
5a1f561a631ec5529e54bc7090d1958be5eb6f6f | 1,639 | py | Python | setup.py | davidharvey1986/rrg | 26b4658f14279af21af1a61d57e9936daf315a71 | [
"MIT"
] | 2 | 2019-11-18T12:51:09.000Z | 2019-12-11T03:13:51.000Z | setup.py | davidharvey1986/rrg | 26b4658f14279af21af1a61d57e9936daf315a71 | [
"MIT"
] | 5 | 2017-06-09T10:06:27.000Z | 2019-07-19T11:28:18.000Z | setup.py | davidharvey1986/rrg | 26b4658f14279af21af1a61d57e9936daf315a71 | [
"MIT"
] | 2 | 2017-07-19T15:48:33.000Z | 2017-08-09T16:07:20.000Z | #!/usr/local/bin/python3
import sys,os,string,glob,subprocess
from setuptools import setup,Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
import numpy
long_description = """\
This module uses the RRG method to measure the shapes of galaxies
in Hubble Space Telescope data
"""
#sudo python3 setup.py sdist upload -r pypi
version='0.1.2'
INCDIRS=['.']
packages = ['pyRRG', 'RRGtools','asciidata']
package_dir = {'RRGtools':'./lib/RRGtools',
'pyRRG':'./src',
'asciidata':'./lib/asciidata'}
package_data = {'pyRRG': ['psf_lib/*/*','sex_files/*','*.pkl']}
setup ( name = "pyRRG",
version = version,
author = "David Harvey",
author_email = "david.harvey@epfl.ch",
description = "pyRRG module",
license = 'MIT',
packages = packages,
package_dir = package_dir,
package_data = package_data,
scripts = ['scripts/pyRRG'],
url = 'https://github.com/davidharvey1986/pyRRG', # use the URL to the github repo
download_url = 'https://github.com/davidharvey1986/pyRRG/archive/'+version+'.tar.gz',
install_requires=['scikit-learn',\
'numpy', \
'ipdb', 'pyraf',\
'scipy'],
)
| 32.78 | 101 | 0.494814 |
5a2032ec76e617ec97b415d06f3a42408d534a65 | 635 | py | Python | restbed/core/api.py | mr-tenders/restbed | 68d36536286203048ce01f1467d3db7ee108bebb | [
"MIT"
] | null | null | null | restbed/core/api.py | mr-tenders/restbed | 68d36536286203048ce01f1467d3db7ee108bebb | [
"MIT"
] | null | null | null | restbed/core/api.py | mr-tenders/restbed | 68d36536286203048ce01f1467d3db7ee108bebb | [
"MIT"
] | null | null | null | """
restbed core api
"""
import pyinsane2
from typing import List
| 22.678571 | 63 | 0.63937 |
5a20458f16a895f14563ad81b494f0d3c1292dbf | 736 | py | Python | secure_notes_client/thread_pool.py | rlee287/secure-notes-client | 56d5fcce1d2eeb46de22aac63131fe7214b6f185 | [
"MIT"
] | null | null | null | secure_notes_client/thread_pool.py | rlee287/secure-notes-client | 56d5fcce1d2eeb46de22aac63131fe7214b6f185 | [
"MIT"
] | 4 | 2019-07-10T01:34:12.000Z | 2019-08-20T01:52:31.000Z | secure_notes_client/thread_pool.py | rlee287/secure-notes-client | 56d5fcce1d2eeb46de22aac63131fe7214b6f185 | [
"MIT"
] | null | null | null | from concurrent.futures import ThreadPoolExecutor
import time
from PySide2.QtCore import QCoreApplication
thread_pool=None
#TODO: find a less hacky way to keep events processed
| 26.285714 | 58 | 0.762228 |
5a2274118fccaff1a7fc9becbc4b24e208209e91 | 10,463 | py | Python | Fairness_attack/data_utils.py | Ninarehm/attack | 0d5a6b842d4e81484540151d879036e9fe2184f1 | [
"MIT"
] | 8 | 2021-03-08T17:13:42.000Z | 2022-03-31T00:57:53.000Z | Fairness_attack/data_utils.py | lutai14/attack | 773024c7b86be112521a2243f2f809a54891c81f | [
"MIT"
] | null | null | null | Fairness_attack/data_utils.py | lutai14/attack | 773024c7b86be112521a2243f2f809a54891c81f | [
"MIT"
] | 1 | 2022-02-10T22:36:11.000Z | 2022-02-10T22:36:11.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import numpy as np
import scipy.sparse as sparse
import defenses
import upper_bounds
# Can speed this up if necessary
# Can speed this up if necessary
def project_onto_slab(X, Y, v, radii, centroids, class_map):
"""
v^T x needs to be within radius of v^T centroid.
v is 1 x d and normalized.
"""
v = np.reshape(v / np.linalg.norm(v), (1, -1))
for y in set(Y):
idx = class_map[y]
radius = radii[idx]
centroid = centroids[idx, :]
# If v^T x is too large, then dists_along_v is positive
# If it's too small, then dists_along_v is negative
dists_along_v = (X[Y == y, :] - centroid).dot(v.T)
shifts_along_v = np.reshape(
dists_along_v - np.clip(dists_along_v, -radius, radius),
(1, -1))
X[Y == y, :] -= shifts_along_v.T.dot(v)
print("Number of (%s) points projected onto slab: %s" % (y, np.sum(np.abs(dists_along_v) > radius)))
return X
| 31.610272 | 136 | 0.581 |
5a23d3f4e52679a350233bbde834e4fd8f3310ec | 74 | py | Python | pytracetable/__init__.py | filwaitman/pytracetable | eb884953e179fc65677a9e3b3c70fde1b1439ccb | [
"MIT"
] | 1 | 2016-02-10T20:28:00.000Z | 2016-02-10T20:28:00.000Z | pytracetable/__init__.py | filwaitman/pytracetable | eb884953e179fc65677a9e3b3c70fde1b1439ccb | [
"MIT"
] | 1 | 2020-05-27T18:12:10.000Z | 2020-05-27T18:12:10.000Z | pytracetable/__init__.py | filwaitman/pytracetable | eb884953e179fc65677a9e3b3c70fde1b1439ccb | [
"MIT"
] | null | null | null | from pytracetable.core import tracetable
__all__ = [
'tracetable',
]
| 12.333333 | 40 | 0.716216 |
5a24938eab3876854f2631917fd72abe26cefe64 | 1,518 | py | Python | quandl_data_retriever/server.py | fabiomolinar/quandl-data-retriever | d9359922cb222ac519f7d9e4dd892bbcf6b1b2d0 | [
"MIT"
] | null | null | null | quandl_data_retriever/server.py | fabiomolinar/quandl-data-retriever | d9359922cb222ac519f7d9e4dd892bbcf6b1b2d0 | [
"MIT"
] | null | null | null | quandl_data_retriever/server.py | fabiomolinar/quandl-data-retriever | d9359922cb222ac519f7d9e4dd892bbcf6b1b2d0 | [
"MIT"
] | null | null | null | """ Server module
Quandl API limits:
Authenticated users have a limit of 300 calls per 10 seconds,
2,000 calls per 10 minutes and a limit of 50,000 calls per day.
"""
import urllib
import logging
from twisted.internet import reactor
from twisted.web.client import Agent, readBody
from . import settings
from . import resources
logger = logging.getLogger(settings.LOG_NAME + ".server")
if __name__ == "__main__":
main() | 26.172414 | 94 | 0.614625 |
5a24a53e97cdff184ba28a85fbb3b5ee4e244277 | 4,696 | py | Python | actingweb/deprecated_db_gae/db_subscription_diff.py | gregertw/actingweb | e1c8f66451f547c920c64c4e2a702698e3a0d299 | [
"BSD-3-Clause"
] | null | null | null | actingweb/deprecated_db_gae/db_subscription_diff.py | gregertw/actingweb | e1c8f66451f547c920c64c4e2a702698e3a0d299 | [
"BSD-3-Clause"
] | null | null | null | actingweb/deprecated_db_gae/db_subscription_diff.py | gregertw/actingweb | e1c8f66451f547c920c64c4e2a702698e3a0d299 | [
"BSD-3-Clause"
] | null | null | null | from builtins import object
from google.appengine.ext import ndb
import logging
"""
DbSubscriptionDiff handles all db operations for a subscription diff
DbSubscriptionDiffList handles list of subscriptions diffs
Google datastore for google is used as a backend.
"""
__all__ = [
'DbSubscriptionDiff',
'DbSubscriptionDiffList',
]
| 33.304965 | 134 | 0.5296 |
5a24c50dca0ab02ce229e044f402eb5085a1288a | 1,703 | py | Python | azure-mgmt-iothub/azure/mgmt/iothub/models/ip_filter_rule.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-iothub/azure/mgmt/iothub/models/ip_filter_rule.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-iothub/azure/mgmt/iothub/models/ip_filter_rule.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
| 36.234043 | 77 | 0.603641 |
5a29b2a7e94aa859d4fcd87428416a71deaf7e01 | 551 | py | Python | 4344.py | yzkim9501/Baekjoon | 222e55d0bd65cbb66f1f5486652ad8c697817844 | [
"Unlicense"
] | null | null | null | 4344.py | yzkim9501/Baekjoon | 222e55d0bd65cbb66f1f5486652ad8c697817844 | [
"Unlicense"
] | null | null | null | 4344.py | yzkim9501/Baekjoon | 222e55d0bd65cbb66f1f5486652ad8c697817844 | [
"Unlicense"
] | null | null | null | # 90% . .
# C .
# N(1 N 1000, N ) , N . 0 , 100 .
# .
t=int(input())
for _ in range(t):
a=list(map(int,input().split()))
c=0
for j in range(1,a[0]+1):
if a[j]>avg(a):
c+=1
print(str('{:,.3f}'.format(round(c/a[0]*100,3)))+"%")
| 25.045455 | 114 | 0.555354 |
5a29f8551225fbef514f169502222d4f73af2984 | 4,531 | py | Python | tests/dualtor/test_standby_tor_upstream_mux_toggle.py | AndoniSanguesa/sonic-mgmt | bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec | [
"Apache-2.0"
] | 1 | 2021-09-24T08:40:57.000Z | 2021-09-24T08:40:57.000Z | tests/dualtor/test_standby_tor_upstream_mux_toggle.py | AndoniSanguesa/sonic-mgmt | bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec | [
"Apache-2.0"
] | null | null | null | tests/dualtor/test_standby_tor_upstream_mux_toggle.py | AndoniSanguesa/sonic-mgmt | bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec | [
"Apache-2.0"
] | null | null | null | import pytest
import logging
import ipaddress
import json
import re
import time
from tests.common.dualtor.dual_tor_mock import *
from tests.common.helpers.assertions import pytest_assert as pt_assert
from tests.common.dualtor.dual_tor_utils import rand_selected_interface, verify_upstream_traffic, get_crm_nexthop_counter
from tests.common.utilities import compare_crm_facts
from tests.common.config_reload import config_reload
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, run_icmp_responder
logger = logging.getLogger(__file__)
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.usefixtures('apply_mock_dual_tor_tables', 'apply_mock_dual_tor_kernel_configs', 'run_garp_service', 'run_icmp_responder')
]
PAUSE_TIME = 10
def get_l2_rx_drop(host, itfs):
"""
Return L2 rx packet drop counter for given interface
"""
res = {}
stdout = host.shell("portstat -j")['stdout']
match = re.search("Last cached time was.*\n", stdout)
if match:
stdout = re.sub("Last cached time was.*\n", "", stdout)
data = json.loads(stdout)
return int(data[itfs]['RX_DRP'])
| 41.568807 | 154 | 0.670492 |
5a2a01adbfb1b632775069e902a5a1facd9c2f69 | 3,308 | py | Python | birdsong_recognition/dataset.py | YingyingF/birdsong_recognition | 4f8a2ccb900898a02d4454a5f1c206125f23fa44 | [
"Apache-2.0"
] | null | null | null | birdsong_recognition/dataset.py | YingyingF/birdsong_recognition | 4f8a2ccb900898a02d4454a5f1c206125f23fa44 | [
"Apache-2.0"
] | null | null | null | birdsong_recognition/dataset.py | YingyingF/birdsong_recognition | 4f8a2ccb900898a02d4454a5f1c206125f23fa44 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: dataset.ipynb (unless otherwise specified).
__all__ = ['load_mp3', 'get_sample_label', 'preprocess_file', 'pad_by_zeros', 'split_file_by_window_size',
'wrapper_split_file_by_window_size', 'create_dataset_fixed_size', 'get_spectrogram', 'add_channel_dim']
# Cell
# Cell
# Cell
# Cell
# Cell
# Cell
# Cell
# Cell
# Cell | 39.380952 | 145 | 0.703144 |
5a2a02d3be8c76f34df0d751d6f767892052893d | 492 | py | Python | Lib/objc/_SplitKit.py | kanishpatel/Pyto | feec7a1a54f635a6375fa7ede074ff35afbfbb95 | [
"MIT"
] | null | null | null | Lib/objc/_SplitKit.py | kanishpatel/Pyto | feec7a1a54f635a6375fa7ede074ff35afbfbb95 | [
"MIT"
] | null | null | null | Lib/objc/_SplitKit.py | kanishpatel/Pyto | feec7a1a54f635a6375fa7ede074ff35afbfbb95 | [
"MIT"
] | null | null | null | '''
Classes from the 'SplitKit' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
PodsDummy_SplitKit = _Class('PodsDummy_SplitKit')
InstantPanGestureRecognizer = _Class('SplitKit.InstantPanGestureRecognizer')
HandleView = _Class('SplitKit.HandleView')
SPKSplitViewController = _Class('SPKSplitViewController')
| 21.391304 | 76 | 0.731707 |
5a2a102330d36f9fe8e0e169c14680aef835ac84 | 3,743 | py | Python | wiki_music/gui_lib/search_and_replace.py | marian-code/wikipedia-music-tags | e8836c23b7b7e43661b59afd1bfc18d381b95d4a | [
"MIT"
] | 5 | 2019-01-28T21:53:14.000Z | 2020-06-27T08:52:36.000Z | wiki_music/gui_lib/search_and_replace.py | marian-code/wikipedia-music-tags | e8836c23b7b7e43661b59afd1bfc18d381b95d4a | [
"MIT"
] | 4 | 2019-01-15T16:33:59.000Z | 2020-05-20T08:09:02.000Z | wiki_music/gui_lib/search_and_replace.py | marian-code/wikipedia-music-tags | e8836c23b7b7e43661b59afd1bfc18d381b95d4a | [
"MIT"
] | 1 | 2020-04-15T11:00:20.000Z | 2020-04-15T11:00:20.000Z | """Module controling search and replace tab."""
import logging
from wiki_music.constants import GUI_HEADERS
from wiki_music.gui_lib import BaseGui, CheckableListModel
from wiki_music.gui_lib.qt_importer import QMessageBox, QPushButton, QIcon, QStyle
__all__ = ["Replacer"]
log = logging.getLogger(__name__)
log.debug("finished gui search & replace imports")
| 35.647619 | 82 | 0.6428 |
5a2a33ed323999913f0d3da3c440981176e3bcd7 | 159 | py | Python | Dashboard with Django/updates/forms.py | reddyprasade/Data-Analysis-with-Python- | 2440e23486856eea5556c8262467b3a618032bc2 | [
"MIT"
] | 1 | 2021-06-29T23:15:05.000Z | 2021-06-29T23:15:05.000Z | Dashboard with Django/updates/forms.py | reddyprasade/Data-Analysis-with-Python- | 2440e23486856eea5556c8262467b3a618032bc2 | [
"MIT"
] | null | null | null | Dashboard with Django/updates/forms.py | reddyprasade/Data-Analysis-with-Python- | 2440e23486856eea5556c8262467b3a618032bc2 | [
"MIT"
] | 1 | 2021-12-20T10:04:53.000Z | 2021-12-20T10:04:53.000Z | from django.forms import ModelForm
from updates.models import Post
| 17.666667 | 34 | 0.72956 |
5a2a74b028d05464645069f119b32c24c0d83261 | 1,965 | py | Python | main.py | neuroidss/eeglstm | 693d39347afb3c7fa8272e813ce1f841b892a212 | [
"MIT"
] | 21 | 2018-11-17T11:46:46.000Z | 2021-12-15T01:54:31.000Z | main.py | neuroidss/eeglstm | 693d39347afb3c7fa8272e813ce1f841b892a212 | [
"MIT"
] | 1 | 2018-05-15T14:04:49.000Z | 2018-05-15T14:04:49.000Z | main.py | neuroidss/eeglstm | 693d39347afb3c7fa8272e813ce1f841b892a212 | [
"MIT"
] | 4 | 2018-12-21T03:16:20.000Z | 2020-05-02T09:37:39.000Z | #%% [markdown]
#
# We will load EEG data from the lab and attemp to build a classifier that distinguishes between learners and non-learners
#%%
import mne
import numpy as np
import os.path
import glob
import re
import pandas as pd
# try to enable cuda support to speed up filtering, make sure the MNE_USE_CUDA environment variable is set to true
mne.cuda.init_cuda()
DATA_DIR = "../../EEGdata/Fish_5Block"
event_dict = {
"cat":{
"1": 20,
"2": 21
}
}
data_path = os.path.join(DATA_DIR, "Tail/Learner/126670_EXP_FISH.bdf")
test_data = mne.io.read_raw_edf(data_path, preload=True)
# find the related behavioral data
participant_number = re.search(r"^(\d+)_EXP_FISH", os.path.basename(data_path))[1]
behav_path = [filename for filename in glob.glob(os.path.join(DATA_DIR, "EXP_fish2_Tomy/Cat_data/*.csv")) if participant_number in filename][0]
behav_df = pd.read_csv(behav_path)
learning_curve = behav_df["Resultat"].rolling(20).mean() # our in house definition of current learning performance
learning_time = (learning_curve >= 0.8).idxmax() # using a 80% correct categorization threshold
#%% [markdown]
# We now need to find the event times and give the same code to all stimulus presentation events since we don't want to differentiate among category 1 or 2
#%%
events = mne.find_events(test_data)
events = np.array(events)
events[events[:,2]==event_dict["cat"]["2"],2] = 20
events = events.tolist()
#%% [markdown]
# visualize data
#%%
#test_data.plot()
#%%
test_data.set_eeg_reference("average", projection=False)
test_data.filter(0.1, 50.0, n_jobs="cuda")
stim_epochs = mne.Epochs(test_data, events=events, event_id={"stimulus presentation":20}, tmin=-0.2, tmax=0.8, reject={"eeg":200-6})
# do basic cleaning by bandpass filtering, we will need to load the data
stim_epochs.load_data()
stim_epochs.resample(256)
#%% building the pytorch model
pass
| 31.190476 | 156 | 0.707379 |
5a2a96f1206233db3ee9862dbb3187153e48e3d9 | 241 | py | Python | ex066.py | dsjocimar/python | 5716f46a9fa7f64aa78a39df9c262c5392571340 | [
"MIT"
] | null | null | null | ex066.py | dsjocimar/python | 5716f46a9fa7f64aa78a39df9c262c5392571340 | [
"MIT"
] | null | null | null | ex066.py | dsjocimar/python | 5716f46a9fa7f64aa78a39df9c262c5392571340 | [
"MIT"
] | null | null | null | # Exerccio 066
soma = total = 0
while True:
n = int(input('Digite um valor [999 para parar]: '))
if n == 999:
break
soma += n
total += 1
print(f'O total de nmeros digitados foi {total} e a soma deles vale {soma}') | 24.1 | 78 | 0.59751 |
5a2aef76ad354c4dafd74c644c7cdf56a923d14d | 749 | py | Python | test/test_api_data_utils.py | onap/optf-osdf | 2b9e7f4fca3d510a201283a8561f6ff3424f5fd6 | [
"Apache-2.0"
] | 3 | 2019-04-15T13:33:57.000Z | 2019-10-21T17:19:19.000Z | test/test_api_data_utils.py | onap/optf-osdf | 2b9e7f4fca3d510a201283a8561f6ff3424f5fd6 | [
"Apache-2.0"
] | null | null | null | test/test_api_data_utils.py | onap/optf-osdf | 2b9e7f4fca3d510a201283a8561f6ff3424f5fd6 | [
"Apache-2.0"
] | null | null | null | import json
import os
from osdf.utils import api_data_utils
from collections import defaultdict
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, "placement-tests/request.json")) as json_data:
req_json = json.load(json_data)
| 34.045455 | 147 | 0.750334 |
5a2b70864ff65608d3a0ed95eba0ce2781b1326a | 10,396 | py | Python | Model_SIR/no.py | AP-2020-1S/covid-19-guaya-kilera | f307d17b6540e881a93596ecd4b7857f5d7d9a18 | [
"CC-BY-3.0",
"MIT"
] | null | null | null | Model_SIR/no.py | AP-2020-1S/covid-19-guaya-kilera | f307d17b6540e881a93596ecd4b7857f5d7d9a18 | [
"CC-BY-3.0",
"MIT"
] | null | null | null | Model_SIR/no.py | AP-2020-1S/covid-19-guaya-kilera | f307d17b6540e881a93596ecd4b7857f5d7d9a18 | [
"CC-BY-3.0",
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import integrate, optimize
from scipy.signal import savgol_filter
from dane import population as popu
dias_restar = 4 # Los ltimos das de informacin que no se tienen en cuenta
dias_pred = 31 # Das sobre los cules se har la prediccin a corto plazo
media_movil = 4 # Das que se promediaran en las series para mitigar errores en los datos
Ciudades_dicc = {'Bog': 'Bogot D.C.', 'Mde': 'Medelln', 'Cal': 'Cali', 'Brr': 'Barranquilla',
'Ctg': 'Cartagena de Indias'}
Ciudades = ['Bog','Mde','Cal', 'Brr', 'Ctg']
Covid_Col = pd.read_csv("https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD", sep=',',
encoding='utf-8', low_memory=False)
#%%
limpieza_datos()
#%%
#%%
casos()
#%%
# t = np.linspace(0,400,400)
# import plotly.offline as py
#
# for ciudad in Ciudades:
# py.iplot([{
# 'x': t,
# 'y': globals()['real_' + str(ciudad)]
# }], filename='cufflinks/multiple-lines-on-same-chart')
#
# max(globals()['real_' + str(ciudad)])
#%%
valores = [(popt[0],popt[1])]
import cufflinks as cf
import plotly.offline as py
py.iplot([{
'x':t,
'y': modelo(*valor),
'name': str(valor),
} for valor in valores], filename = 'cufflinks/multiple-lines-on-same-chart')
# plt.figure(figsize=(12, 8))
# #plt.plot(modelo(0.42715777117416, 0.36645292847392247)[0])
# plt.plot(modelo(0.42715777117416, 0.36645292847392247)[1])
# # plt.plot(modelo(0.42715777117416, 0.36645292847392247)[2])
# plt.ylabel('Poblacin')
# plt.legend(['Susceptible', 'Infectados', 'Recuperados'])
# plt.xlabel('Das')
# plt.show()
| 47.907834 | 194 | 0.60379 |
5a2c7e2ea60e80d086779df6d65e7f9d20374ff7 | 733 | py | Python | backend/cw_backend/views/admin_courses.py | veronikks/pyladies-courseware | e1151a704159141e0b1cb649c17cfdd5ca5f689b | [
"MIT"
] | null | null | null | backend/cw_backend/views/admin_courses.py | veronikks/pyladies-courseware | e1151a704159141e0b1cb649c17cfdd5ca5f689b | [
"MIT"
] | null | null | null | backend/cw_backend/views/admin_courses.py | veronikks/pyladies-courseware | e1151a704159141e0b1cb649c17cfdd5ca5f689b | [
"MIT"
] | null | null | null | import aiohttp
from aiohttp import web
from aiohttp_session import get_session
import asyncio
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
routes = web.RouteTableDef()
| 28.192308 | 76 | 0.721692 |
5a2e4a10cc2ee782907da20e988df75437125628 | 751 | py | Python | duplicate_csv.py | AronFreyr/de1-project | 9e95346db9a6955ee017d59c73c83251d529d8ff | [
"Apache-2.0"
] | null | null | null | duplicate_csv.py | AronFreyr/de1-project | 9e95346db9a6955ee017d59c73c83251d529d8ff | [
"Apache-2.0"
] | null | null | null | duplicate_csv.py | AronFreyr/de1-project | 9e95346db9a6955ee017d59c73c83251d529d8ff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import os
write_to_csv_file = 'million_song_subset.csv'
csv_file_read = open(write_to_csv_file,'r')
csv_file_write = open(write_to_csv_file,'a')
while True:
next_line = csv_file_read.readline()
if not next_line:
break
csv_file_size = os.path.getsize(write_to_csv_file)
print("file size: {}".format(str(csv_file_size/1048576)))
# if the csv file larger than or euqal to 5GB exist for loop
if csv_file_size >= 5368709120:
break
if next_line.startswith("song_id"):
continue
csv_file_write.write(next_line)
print("appended: {}".format(next_line))
csv_file_read.close()
csv_file_write.close()
# In[ ]:
| 17.465116 | 64 | 0.660453 |
5a2e5a469bcfb11fd51f01901cb6f4cfecb26b08 | 4,444 | py | Python | src/main/python/graphing-scripts/utils.py | DistributedSystemsGroup/cluster-scheduler-simulator | 9733dc644736dd0f8c2e1baff09efeb680d6a4d8 | [
"BSD-3-Clause"
] | 2 | 2018-06-28T04:31:55.000Z | 2019-06-24T02:18:24.000Z | src/main/python/graphing-scripts/utils.py | DistributedSystemsGroup/cluster-scheduler-simulator | 9733dc644736dd0f8c2e1baff09efeb680d6a4d8 | [
"BSD-3-Clause"
] | null | null | null | src/main/python/graphing-scripts/utils.py | DistributedSystemsGroup/cluster-scheduler-simulator | 9733dc644736dd0f8c2e1baff09efeb680d6a4d8 | [
"BSD-3-Clause"
] | 3 | 2017-06-22T11:32:41.000Z | 2019-10-28T01:22:26.000Z | # Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import os
from matplotlib import use, rc
use('Agg')
import matplotlib.pyplot as plt
# plot saving utility function
# plt.savefig("%s.%s" % (filename_base, fmt), format=fmt)
# Append e to the array at position (i,k).
# d - a dictionary of dictionaries of arrays, essentially a 2d dictionary.
# i, k - essentially a 2 element tuple to use as the key into this 2d dict.
# e - the value to add to the array indexed by key (i,k).
# Append e to the array at position (i,k).
# d - a dictionary of dictionaries of arrays, essentially a 2d dictionary.
# i, k - essentially a 2 element tuple to use as the key into this 2d dict.
# e - the value to add to the array indexed by key (i,k).
| 33.666667 | 84 | 0.645815 |
5a2f26092de22be2a78e6f158531b00a44283d31 | 4,351 | py | Python | ror/CopelandVoter.py | jakub-tomczak/ror | cf9ab38a2d66f4816a1289b9726911960059fce7 | [
"MIT"
] | null | null | null | ror/CopelandVoter.py | jakub-tomczak/ror | cf9ab38a2d66f4816a1289b9726911960059fce7 | [
"MIT"
] | null | null | null | ror/CopelandVoter.py | jakub-tomczak/ror | cf9ab38a2d66f4816a1289b9726911960059fce7 | [
"MIT"
] | null | null | null | from typing import List, Tuple
import numpy as np
import pandas as pd
import os
import logging
| 54.3875 | 154 | 0.652264 |
5a2fba5afd104e89bb7c06d80b25ac575e16cde2 | 2,528 | py | Python | app/auth/forms/__init__.py | jg-725/IS219-FlaskAppProject | 316aa298eda1bcda766ed085bb6f26ca7da7dfee | [
"BSD-3-Clause"
] | null | null | null | app/auth/forms/__init__.py | jg-725/IS219-FlaskAppProject | 316aa298eda1bcda766ed085bb6f26ca7da7dfee | [
"BSD-3-Clause"
] | null | null | null | app/auth/forms/__init__.py | jg-725/IS219-FlaskAppProject | 316aa298eda1bcda766ed085bb6f26ca7da7dfee | [
"BSD-3-Clause"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import validators
from wtforms.fields import *
| 32.410256 | 120 | 0.679589 |
5a31ca41c47a23fa18c352e7e70fee2a9750f1a1 | 11,220 | py | Python | tern/analyze/default/dockerfile/lock.py | mzachar/tern | ac9dea4c907f27c9a3b7d85d79806c8fdab1d7e7 | [
"BSD-2-Clause"
] | 2 | 2020-05-21T00:00:36.000Z | 2020-12-28T20:43:25.000Z | tern/analyze/default/dockerfile/lock.py | mzachar/tern | ac9dea4c907f27c9a3b7d85d79806c8fdab1d7e7 | [
"BSD-2-Clause"
] | null | null | null | tern/analyze/default/dockerfile/lock.py | mzachar/tern | ac9dea4c907f27c9a3b7d85d79806c8fdab1d7e7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Docker specific functions - used when trying to retrieve packages when
given a Dockerfile
"""
import logging
import os
import re
import sys
from tern.classes.docker_image import DockerImage
from tern.classes.notice import Notice
from tern.utils import constants
from tern.utils import general
from tern.report import errors
from tern.report import formats
from tern.analyze.default import filter as fltr
from tern.analyze.default.command_lib import command_lib
from tern.analyze.default.dockerfile import parse
from tern.utils.general import check_image_string
# dockerfile
dockerfile_global = ''
# dockerfile commands
docker_commands = []
# global logger
logger = logging.getLogger(constants.logger_name)
def load_docker_commands(dfobj):
'''Given a dockerfile object get a persistent list of docker commands'''
if not os.path.isfile(dfobj.filepath):
raise IOError('{} does not exist'.format(dfobj.filepath))
global docker_commands
docker_commands = dfobj.structure
global dockerfile_global
dockerfile_global = dfobj.filepath
def get_dockerfile_base():
'''Get the base image object from the dockerfile base instructions
1. get the instructions around FROM
2. get the base image and tag
3. Make notes based on what the image and tag rules are
4. Return an image object and the base instructions string
NOTE: Potential ARG values in the Dockerfile object have already been
expanded at this point. However, Dockerfile rules say that if no
--build-arg is passed during docker build and ARG has no default, the
build will fail. We assume for now that we will not be passing build
arguments in which case if there is no default ARG, we will raise an
exception indicating that since the build arguments are determined by
the user we will not be able to determine what the user wanted'''
try:
# Get the base image tag.
# NOTE: ARG values have already been expanded.
base_image_string, from_line = get_base_image_tag(docker_commands)
# check for scratch
if base_image_string == 'scratch':
# there is no base image to pull
raise ValueError("Cannot pull 'scratch' base image.")
# there should be some image object here
base_image = DockerImage(base_image_string)
base_image.origins.add_notice_origin(from_line)
base_image.name = base_image_string.split(':')[0]
# check if there is a tag
if not check_image_string(base_image_string):
message_string = errors.dockerfile_no_tag.format(
dockerfile_line=from_line)
base_image.origins.add_notice_to_origins(
docker_commands, Notice(message_string, 'warning'))
base_image.tag = 'latest'
else:
base_image.tag = base_image_string.split(':')[1]
# check if the tag is 'latest'
if base_image.tag == 'latest':
message_string = errors.dockerfile_using_latest.format(
dockerfile_line=from_line)
base_image.origins.add_notice_to_origins(
docker_commands, Notice(message_string, 'warning'))
return base_image, from_line
except ValueError as e:
logger.fatal("%s", errors.cannot_parse_base_image.format(
dockerfile=dockerfile_global, error_msg=e))
sys.exit(1)
def get_base_image_tag(dockerfile_lines):
'''Get the instructions around FROM, return the base image string
and the line containing FROM command'''
base_image_string = ''
from_line = ''
for i, cmd_dict in enumerate(dockerfile_lines):
if cmd_dict['instruction'] == 'FROM':
# Account for "as" keyword in FROM line
base_image_string = re.split(" as", cmd_dict['value'],
flags=re.IGNORECASE)[0]
from_line = 'FROM' + base_image_string
# Check that potential ARG values has default
if i != 0 and dockerfile_lines[i-1]['instruction'] == 'ARG':
if len(dockerfile_lines[i-1]['value'].split('=')) == 1:
raise ValueError('No ARG default value to pass to '
'FROM command in Dockerfile.')
break
return base_image_string, from_line
def get_dockerfile_image_tag():
'''Return the image and tag used to build an image from the dockerfile'''
image_tag_string = constants.image + parse.tag_separator + \
constants.tag
return image_tag_string
def created_to_instruction(created_by):
'''The 'created_by' key in a Docker image config gives the shell
command that was executed unless it is a #(nop) instruction which is
for the other Docker directives. Convert this line into a Dockerfile
instruction'''
instruction = re.sub('/bin/sh -c ', '', created_by).strip()
instruction = re.sub(re.escape('#(nop) '), '', instruction).strip()
first = instruction.split(' ').pop(0)
if first and first not in parse.directives and \
'RUN' not in instruction:
instruction = 'RUN ' + instruction
return instruction
def get_commands_from_history(image_layer):
'''Given the image layer object and the shell, get the list of command
objects that created the layer'''
# set up notice origin for the layer
origin_layer = 'Layer {}'.format(image_layer.layer_index)
if image_layer.created_by:
instruction = created_to_instruction(image_layer.created_by)
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
formats.dockerfile_line.format(dockerfile_instruction=instruction),
'info'))
command_line = instruction.split(' ', 1)[1]
else:
instruction = ''
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
formats.no_created_by, 'warning'))
command_line = instruction
# Image layers are created with the directives RUN, ADD and COPY
# For ADD and COPY instructions, there is no information about the
# packages added
if 'ADD' in instruction or 'COPY' in instruction:
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
errors.unknown_content.format(files=command_line), 'warning'))
# return an empty list as we cannot find any commands
return []
# for RUN instructions we can return a list of commands
command_list, msg = fltr.filter_install_commands(command_line)
if msg:
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
msg, 'warning'))
return command_list
def set_imported_layers(docker_image):
'''Given a Docker image object that was built from a Dockerfile, set the
layers that were imported using the Dockerfile's FROM command or the ones
that came before it'''
index = -1
from_line = ''
dockerfile_lines = docker_commands
for cmd in dockerfile_lines:
if cmd['instruction'] == 'FROM':
from_line = cmd['content'].rstrip()
break
command_list = parse.get_command_list(dockerfile_lines)
for layer in docker_image.layers:
instr = created_to_instruction(layer.created_by)
if instr in command_list:
index = docker_image.layers.index(layer)
break
if index != -1:
# index was set so all layers before this index has been imported
for i in range(0, index-1):
docker_image.layers[i].import_str = from_line
def get_env_vars(image_obj):
'''Given a docker image object, return the list of environment variables,
if any, based on their values in the config.'''
config = image_obj.get_image_config(image_obj.get_image_manifest())
# replace '\t' with '\\t' in the ENV
for idx, env_str in enumerate(config['config']['Env']):
config['config']['Env'][idx] = env_str.replace('\t', '\\t')
return config['config']['Env']
def lock_layer_instruction(dfobj, line_index, commands, image_layer):
"""Given the Dockerfile object, the line index that we are replacing,
the list command objects that installed packages, and the image layer,
rewrite the corresponding line in the Dockerfile with the package and
the version installed"""
for command in commands:
# get the version separator
vsep = command_lib.check_pinning_separator(command.name)
# replace the packages with package separators for each of the words
for word in command.words:
for pkg in image_layer.packages:
if pkg.name == word:
parse.expand_package(
dfobj.structure[line_index], pkg.name, pkg.version,
vsep)
return dfobj
def lock_dockerfile(dfobj, image_obj):
"""Given a Dockerfile object and the corresponding Image object, rewrite
the content to pin packages to their versions"""
# get all the RUN commands in the dockerfile
run_list = parse.get_run_layers(dfobj)
# go through the image layers to find the ones corresponding to the
# run commands
for layer in image_obj.layers:
if not layer.import_str:
# this layer is not from a FROM line
# we get the layer instruction
cmd, instr = fltr.get_run_command(layer.created_by)
if instr == 'RUN':
# find the line in the Dockerfile that matches this command
for run_dict in run_list:
if run_dict['value'] == cmd:
# get the list of install commands
command_list, _ = fltr.filter_install_commands(
general.clean_command(run_dict['value']))
# pin packages installed by each command
run_index = dfobj.structure.index(run_dict)
dfobj = lock_layer_instruction(
dfobj, run_index, command_list, layer)
return dfobj
def create_locked_dockerfile(dfobj):
'''Given a dockerfile object, the information in a new Dockerfile object
Copy the dfobj info to the destination output Dockerfile location'''
# packages in RUN lines, ENV, and ARG values are already expanded
parse.expand_from_images(dfobj)
parse.expand_add_command(dfobj)
# create the output file
dfile = ''
prev_endline = 0
for command_dict in dfobj.structure:
endline = command_dict["endline"]
diff = endline - prev_endline
# calculate number of new line characters to
# add before each line of content
delimeter = "\n" * (diff - 1) if diff > 1 else ""
dfile = dfile + delimeter + command_dict['content']
prev_endline = endline
return dfile
def write_locked_dockerfile(dfile, destination=None):
'''Write the pinned Dockerfile to a file'''
if destination is not None:
file_name = destination
else:
file_name = constants.locked_dockerfile
with open(file_name, 'w') as f:
f.write(dfile)
| 41.555556 | 79 | 0.668717 |
5a3209a99cbad4e38fb7649cdcdb53c050ccbf17 | 2,003 | py | Python | utils/firebase.py | YangWanjun/sales-encrypt | dcf0975164f60dd53385661029c4a270abdfd30e | [
"Apache-2.0"
] | null | null | null | utils/firebase.py | YangWanjun/sales-encrypt | dcf0975164f60dd53385661029c4a270abdfd30e | [
"Apache-2.0"
] | null | null | null | utils/firebase.py | YangWanjun/sales-encrypt | dcf0975164f60dd53385661029c4a270abdfd30e | [
"Apache-2.0"
] | null | null | null | import os
import firebase_admin
from firebase_admin import credentials, messaging
from django.conf import settings
from utils import common, constants
logger = common.get_system_logger()
cred = credentials.Certificate(os.path.join(
settings.BASE_DIR,
'data',
'sales-yang-firebase-adminsdk-2ga7e-17745491f0.json'
))
firebase_admin.initialize_app(credential=cred)
# def subscribe_to_topic(registration_tokens, topic):
# """
#
# :param registration_tokens: Instance ID
# :param topic:
# :return:
# """
# res = messaging.subscribe_to_topic(registration_tokens, topic)
# return res.success_count, res.failure_count, res.errors
#
#
# def unsubscribe_from_topic(registration_tokens, topic):
# """
#
# :param registration_tokens: Instance ID
# :param topic:
# :return:
# """
# res = messaging.unsubscribe_from_topic(registration_tokens, topic)
# return res.success_count, res.failure_count, res.errors
def send_message_to_topic(topic, title, body, forward=None):
"""
:param topic: Firebase
:param title:
:param body:
:param forward:
:return:
"""
from account.models import Notification
from master.models import FirebaseDevice
Notification.add_by_topic(topic.name, title, body, forward=forward)
devices = FirebaseDevice.objects.filter(user__in=topic.users.all())
if devices.count() == 0:
#
logger.info(constants.INFO_FIREBASE_NO_DEVICE.format(topic=topic.name))
return
#
message = messaging.MulticastMessage(data={
'title': title,
'body': body
}, tokens=[item.token for item in devices])
res = messaging.send_multicast(message)
logger.info(constants.INFO_FIREBASE_SEND_MESSAGE.format(topic=topic.name))
| 29.028986 | 79 | 0.724413 |
5a3220a6933b741f74449b702618162293bca339 | 1,944 | py | Python | tests/settings.py | matrixorz/firefly | fb8082ccc525bf7b266960ae49fc0b15e522fd92 | [
"MIT"
] | 247 | 2015-04-13T05:58:10.000Z | 2021-01-21T07:31:58.000Z | tests/settings.py | qiluosheng/firefly | fb8082ccc525bf7b266960ae49fc0b15e522fd92 | [
"MIT"
] | 57 | 2015-04-13T15:10:50.000Z | 2016-04-08T09:15:27.000Z | tests/settings.py | qiluosheng/firefly | fb8082ccc525bf7b266960ae49fc0b15e522fd92 | [
"MIT"
] | 94 | 2015-04-12T06:03:30.000Z | 2020-05-11T14:26:56.000Z | # coding=utf-8
DEBUG = True
TESTING = True
SECRET_KEY = 'secret_key for test'
# mongodb
MONGODB_SETTINGS = {
'db': 'firefly_test',
'username': '',
'password': '',
'host': '127.0.0.1',
'port': 27017
}
# redis cache
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = '127.0.0.1'
CACHE_REDIS_PORT = 6379
CACHE_REDIS_DB = 9
CACHE_REDIS_PASSWORD = ''
# mail sender
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = 'MAIL_USERNAME'
MAIL_PASSWORD = 'MAIL_PASSWORD'
MAIL_DEFAULT_SENDER = 'admin@python-cn.org'
SECURITY_PASSWORD_SALT = "abc"
SECURITY_PASSWORD_HASH = "bcrypt"
# SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_EMAIL_SENDER = "support@python-cn.org"
SECURITY_CONFIRM_SALT = "570be5f24e690ce5af208244f3e539a93b6e4f05"
SECURITY_REMEMBER_SALT = "de154140385c591ea771dcb3b33f374383e6ea47"
# Set secret keys for CSRF protection
CSRF_ENABLED = False
WTF_CSRF_ENABLED = False
SERVER_EMAIL = 'Python-China <support@python-cn.org>'
# Flask-SocialBlueprint
SOCIAL_BLUEPRINT = {
# https://developers.facebook.com/apps/
"flask_social_blueprint.providers.Facebook": {
# App ID
'consumer_key': '197',
# App Secret
'consumer_secret': 'c956c1'
},
# https://apps.twitter.com/app/new
"flask_social_blueprint.providers.Twitter": {
# Your access token from API Keys tab
'consumer_key': 'bkp',
# access token secret
'consumer_secret': 'pHUx'
},
# https://console.developers.google.com/project
"flask_social_blueprint.providers.Google": {
# Client ID
'consumer_key': '797.apps.googleusercontent.com',
# Client secret
'consumer_secret': 'bDG'
},
# https://github.com/settings/applications/new
"flask_social_blueprint.providers.Github": {
# Client ID
'consumer_key': '6f6',
# Client Secret
'consumer_secret': '1a9'
},
}
| 25.578947 | 67 | 0.679012 |
5a3481b2ed60e03ed802eb9ef17136804b5ee7a0 | 981 | py | Python | pyhack/boris_stag.py | Krissmedt/runko | 073306de9284f1502d0538d33545bc14c80e8b93 | [
"MIT"
] | null | null | null | pyhack/boris_stag.py | Krissmedt/runko | 073306de9284f1502d0538d33545bc14c80e8b93 | [
"MIT"
] | null | null | null | pyhack/boris_stag.py | Krissmedt/runko | 073306de9284f1502d0538d33545bc14c80e8b93 | [
"MIT"
] | null | null | null | import numpy as np
from pyhack.py_runko_aux import *
from pyhack.boris import *
| 18.509434 | 48 | 0.579001 |
5a36eda2f990b0b613ca5b9070e7a670400461bc | 1,806 | py | Python | mbed_connector_api/tests/mock_data.py | ARMmbed/mbed-connector-python | a5024a01dc67cc192c8bf7a70b251fcf0a3f279b | [
"Apache-2.0"
] | 2 | 2017-01-05T07:16:03.000Z | 2018-09-04T02:26:19.000Z | mbed_connector_api/tests/mock_data.py | ARMmbed/mbed-connector-python | a5024a01dc67cc192c8bf7a70b251fcf0a3f279b | [
"Apache-2.0"
] | 13 | 2016-02-29T17:31:56.000Z | 2017-02-07T22:46:17.000Z | mbed_connector_api/tests/mock_data.py | ARMmbed/mbed-connector-python | a5024a01dc67cc192c8bf7a70b251fcf0a3f279b | [
"Apache-2.0"
] | 2 | 2017-02-07T22:10:41.000Z | 2017-03-06T06:38:58.000Z | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
| 42 | 448 | 0.613511 |
5a37802b395a4a964c1285e03e992f8b1712b575 | 2,134 | py | Python | examples/demo/eager_demo/src/demo_1_pybullet.py | eager-dev/eager | f10ccbd7452acb3a29881ecd95c759f632c91da9 | [
"Apache-2.0"
] | 16 | 2021-07-02T14:48:53.000Z | 2022-02-23T02:53:01.000Z | examples/demo/eager_demo/src/demo_1_pybullet.py | eager-dev/eager | f10ccbd7452acb3a29881ecd95c759f632c91da9 | [
"Apache-2.0"
] | 37 | 2021-06-30T12:10:29.000Z | 2022-02-02T09:46:34.000Z | examples/demo/eager_demo/src/demo_1_pybullet.py | eager-dev/eager | f10ccbd7452acb3a29881ecd95c759f632c91da9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import rospy
# Import eager packages
from eager_core.utils.file_utils import launch_roscore, load_yaml
from eager_core.eager_env import EagerEnv
from eager_core.objects import Object
from eager_core.wrappers.flatten import Flatten
from eager_bridge_pybullet.pybullet_engine import PyBulletEngine # noqa: F401
# Required for action processor
from eager_process_safe_actions.safe_actions_processor import SafeActionsProcessor
if __name__ == '__main__':
roscore = launch_roscore() # First launch roscore
rospy.init_node('eager_demo', anonymous=True, log_level=rospy.WARN)
rate = rospy.Rate(1/0.08)
# Define the engine
engine = PyBulletEngine(gui=True)
# Create robot
robot = Object.create('robot', 'eager_robot_vx300s', 'vx300s')
# Add action preprocessing
processor = SafeActionsProcessor(robot_type='vx300s',
vel_limit=0.25,
collision_height=0.15,
)
robot.actuators['joints'].add_preprocess(
processor=processor,
observations_from_objects=[robot],
)
# Add a camera for rendering
calibration = load_yaml('eager_demo', 'calibration')
cam = Object.create('cam', 'eager_sensor_realsense', 'd435',
position=calibration['position'],
orientation=calibration['orientation'],
)
# Create environment
env = EagerEnv(name='demo_env',
engine=engine,
objects=[robot, cam],
render_sensor=cam.sensors['camera_rgb'],
)
env = Flatten(env)
env.render()
obs = env.reset() # TODO: if code does not close properly, render seems to keep a thread open....
for i in range(200):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
if done:
obs = env.reset()
rate.sleep()
# todo: create a env.close(): close render screen, and env.shutdown() to shutdown the environment cleanly.
env.close()
| 33.873016 | 110 | 0.627929 |
5a3924093bca8ec08e3a6779656c4151c0bb55bf | 3,811 | py | Python | kerastuner/engine/tuner_utils.py | DL-2020-Shakespeare/keras-tuner | 5f35f101883a7884e9521de7db4eb632ab659775 | [
"Apache-2.0"
] | 1 | 2021-06-08T01:19:58.000Z | 2021-06-08T01:19:58.000Z | kerastuner/engine/tuner_utils.py | DL-2020-Shakespeare/keras-tuner | 5f35f101883a7884e9521de7db4eb632ab659775 | [
"Apache-2.0"
] | null | null | null | kerastuner/engine/tuner_utils.py | DL-2020-Shakespeare/keras-tuner | 5f35f101883a7884e9521de7db4eb632ab659775 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
import tensorflow as tf
from tensorflow import keras
from ..abstractions import display
# TODO: Add more extensive display.
def average_histories(histories):
"""Averages the per-epoch metrics from multiple executions."""
averaged = {}
metrics = histories[0].keys()
for metric in metrics:
values = []
for epoch_values in six.moves.zip_longest(
*[h[metric] for h in histories],
fillvalue=np.nan):
values.append(np.nanmean(epoch_values))
averaged[metric] = values
# Convert {str: [float]} to [{str: float}]
averaged = [dict(zip(metrics, vals)) for vals in zip(*averaged.values())]
return averaged
| 31.758333 | 78 | 0.680399 |
5a395024f625042332e48560226cfb73aaa1b4a7 | 14,129 | py | Python | angr/procedures/definitions/win32_d3dcompiler_47.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_d3dcompiler_47.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_d3dcompiler_47.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("d3dcompiler_47.dll")
prototypes = \
{
#
'D3DDisassemble11Trace': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeBottom(label="ID3D11ShaderTrace"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pTrace", "StartStep", "NumSteps", "Flags", "ppDisassembly"]),
#
'D3DReadFileToBlob': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pFileName", "ppContents"]),
#
'D3DWriteBlobToFile': SimTypeFunction([SimTypeBottom(label="ID3DBlob"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pBlob", "pFileName", "bOverwrite"]),
#
'D3DCompile': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pSourceName", "pDefines", "pInclude", "pEntrypoint", "pTarget", "Flags1", "Flags2", "ppCode", "ppErrorMsgs"]),
#
'D3DCompile2': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pSourceName", "pDefines", "pInclude", "pEntrypoint", "pTarget", "Flags1", "Flags2", "SecondaryDataFlags", "pSecondaryData", "SecondaryDataSize", "ppCode", "ppErrorMsgs"]),
#
'D3DCompileFromFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pFileName", "pDefines", "pInclude", "pEntrypoint", "pTarget", "Flags1", "Flags2", "ppCode", "ppErrorMsgs"]),
#
'D3DPreprocess': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pSourceName", "pDefines", "pInclude", "ppCodeText", "ppErrorMsgs"]),
#
'D3DGetDebugInfo': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppDebugInfo"]),
#
'D3DReflect': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pInterface", "ppReflector"]),
#
'D3DReflectLibrary': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "riid", "ppReflector"]),
#
'D3DDisassemble': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Flags", "szComments", "ppDisassembly"]),
#
'D3DDisassembleRegion': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Flags", "szComments", "StartByteOffset", "NumInsts", "pFinishByteOffset", "ppDisassembly"]),
#
'D3DCreateLinker': SimTypeFunction([SimTypePointer(SimTypeBottom(label="ID3D11Linker"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ppLinker"]),
#
'D3DLoadModule': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3D11Module"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "cbSrcDataSize", "ppModule"]),
#
'D3DCreateFunctionLinkingGraph': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3D11FunctionLinkingGraph"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["uFlags", "ppFunctionLinkingGraph"]),
#
'D3DGetTraceInstructionOffsets': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), label="LPArray", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Flags", "StartInstIndex", "NumInsts", "pOffsets", "pTotalInsts"]),
#
'D3DGetInputSignatureBlob': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppSignatureBlob"]),
#
'D3DGetOutputSignatureBlob': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppSignatureBlob"]),
#
'D3DGetInputAndOutputSignatureBlob': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppSignatureBlob"]),
#
'D3DStripShader': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pShaderBytecode", "BytecodeLength", "uStripFlags", "ppStrippedBlob"]),
#
'D3DGetBlobPart': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="D3D_BLOB_PART"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Part", "Flags", "ppPart"]),
#
'D3DSetBlobPart': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="D3D_BLOB_PART"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Part", "Flags", "pPart", "PartSize", "ppNewShader"]),
#
'D3DCreateBlob': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Size", "ppBlob"]),
#
'D3DCompressShaders': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"pBytecode": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "BytecodeLength": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="D3D_SHADER_DATA", pack=False, align=None), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["uNumShaders", "pShaderData", "uFlags", "ppCompressedData"]),
#
'D3DDecompressShaders': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "uNumShaders", "uStartIndex", "pIndices", "uFlags", "ppShaders", "pTotalShaders"]),
#
'D3DDisassemble10Effect': SimTypeFunction([SimTypeBottom(label="ID3D10Effect"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pEffect", "Flags", "ppDisassembly"]),
}
lib.set_prototypes(prototypes)
| 190.932432 | 1,222 | 0.737561 |
5a3bb304d53c998d16ff4c3d532be4b3380720b2 | 16,392 | py | Python | explorer/explorer.py | holarchy/Holon | 2a557b300bce10fb2c2ab85a1db4bdfd5df470aa | [
"MIT"
] | null | null | null | explorer/explorer.py | holarchy/Holon | 2a557b300bce10fb2c2ab85a1db4bdfd5df470aa | [
"MIT"
] | null | null | null | explorer/explorer.py | holarchy/Holon | 2a557b300bce10fb2c2ab85a1db4bdfd5df470aa | [
"MIT"
] | null | null | null | from flask import Flask, render_template, flash, abort, redirect, url_for, request
import os
import common
import json
import numbers
import urllib.parse
import pandas as pd
from datetime import datetime
from math import log10, floor
base_dir = '/home/nick/Data/_ensembles'
app = Flask(__name__)
app.config['ENV'] = 'development'
app.config['DEBUG'] = True
app.config['TESTING'] = True
app.config.from_mapping(
SECRET_KEY='dev'
)
# predictions_home_dir = os.path.join(base_dir, 'outlier-predictions-2019_11_13-15_38_28')
predictions_home_dir = os.path.join(base_dir, 'outlier-predictions-2020_01_03-11_15_41')
file_config = common.load_file_config(predictions_home_dir)
labels_dir = os.path.join(predictions_home_dir, 'labels')
# priors_parent_dir = os.path.join(base_dir, 'priors-2019_11_12-19_33_13')
priors_parent_dir = os.path.join(base_dir, 'priors-2019_12_30-18_30_22')
predictions_dir = os.path.join(predictions_home_dir, 'predictions')
priors_dir = os.path.join(priors_parent_dir, 'priors')
prediction_summary = pd.read_csv(os.path.join(predictions_home_dir, 'summary.csv'))
prediction_summary = prediction_summary.sort_values('prediction', ascending=False)
prediction_summary = prediction_summary.reset_index()
def resolve_user_label(flow, request):
if request.method == "POST":
if request.form.get('threatLevel') is not None: # if user added new label
make_label(flow, username=request.form.get('userName'), threat_level=request.form.get('threatLevel'),
classifier=request.form.get('classifier'), description=request.form.get('description'))
else: # if user trying to delete label
i = 1
while i <= len(get_labels(flow)):
if request.form.get(str(i)) is not None:
print(i)
remove_label(flow, i-1)
i += 1
| 37.944444 | 135 | 0.586872 |
5a3c1f4058904f112a823d0ce1fa4d2ba743c174 | 6,151 | py | Python | models/grammateus.py | monotasker/Online-Critical-Pseudepigrapha | 456ef828834aeaedda8204a6107729f277063b9f | [
"W3C"
] | 1 | 2017-09-03T12:59:19.000Z | 2017-09-03T12:59:19.000Z | models/grammateus.py | OnlineCriticalPseudepigrapha/Online-Critical-Pseudepigrapha | 456ef828834aeaedda8204a6107729f277063b9f | [
"W3C"
] | 18 | 2018-05-11T17:08:48.000Z | 2018-06-29T20:15:37.000Z | models/grammateus.py | monotasker/Online-Critical-Pseudepigrapha | 456ef828834aeaedda8204a6107729f277063b9f | [
"W3C"
] | 1 | 2017-09-17T16:13:45.000Z | 2017-09-17T16:13:45.000Z | #! /usr/bin/python2.7
# -*- coding: utf8 -*-
import datetime
# from plugin_ajaxselect import AjaxSelect
if 0:
from gluon import db, Field, auth, IS_EMPTY_OR, IS_IN_DB, current, URL
response = current.response
response.files.insert(5, URL('static',
'plugin_ajaxselect/plugin_ajaxselect.js'))
#response.files.append(URL('static', 'plugin_ajaxselect/plugin_ajaxselect.css'))
response.files.append(URL('static', 'plugin_listandedit/plugin_listandedit.css'))
db.define_table('genres',
Field('genre', 'string'),
format='%(genre)s')
db.define_table('biblical_figures',
Field('figure', 'string'),
format='%(figure)s')
db.define_table('draftdocs',
Field('name'),
Field('filename'),
Field('editor', db.auth_user),
Field('editor2', db.auth_user),
Field('editor3', db.auth_user),
Field('editor4', db.auth_user),
Field('assistant_editor', db.auth_user),
Field('assistant_editor2', db.auth_user),
Field('assistant_editor3', db.auth_user),
Field('proofreader', db.auth_user),
Field('proofreader2', db.auth_user),
Field('proofreader3', db.auth_user),
Field('version', 'double'),
Field('introduction', 'text'),
Field('provenance', 'text'),
Field('themes', 'text'),
Field('status', 'text'),
Field('manuscripts', 'text'),
Field('bibliography', 'text'),
Field('corrections', 'text'),
Field('sigla', 'text'),
Field('copyright', 'text'),
Field('citation_format', 'text'),
Field('genres', 'list:reference genres'),
Field('figures', 'list:reference biblical_figures'),
format='%(name)s')
db.draftdocs.editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor4.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.genres.requires = IS_EMPTY_OR(IS_IN_DB(db, 'genres.id',
db.genres._format,
multiple=True))
db.draftdocs.figures.requires = IS_EMPTY_OR(IS_IN_DB(db, 'biblical_figures.id',
db.biblical_figures._format,
multiple=True))
db.define_table('docs',
Field('name'),
Field('filename'),
Field('editor', db.auth_user),
Field('editor2', db.auth_user),
Field('editor3', db.auth_user),
Field('editor4', db.auth_user),
Field('assistant_editor', db.auth_user),
Field('assistant_editor2', db.auth_user),
Field('assistant_editor3', db.auth_user),
Field('proofreader', db.auth_user),
Field('proofreader2', db.auth_user),
Field('proofreader3', db.auth_user),
Field('version', 'double'),
Field('introduction', 'text'),
Field('provenance', 'text'),
Field('themes', 'text'),
Field('status', 'text'),
Field('manuscripts', 'text'),
Field('bibliography', 'text'),
Field('corrections', 'text'),
Field('sigla', 'text'),
Field('copyright', 'text'),
Field('citation_format', 'text'),
Field('genres', 'list:reference genres'),
Field('figures', 'list:reference biblical_figures'),
format='%(name)s')
db.docs.editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor4.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.genres.requires = IS_EMPTY_OR(IS_IN_DB(db, 'genres.id',
db.genres._format,
multiple=True))
db.docs.figures.requires = IS_EMPTY_OR(IS_IN_DB(db, 'biblical_figures.id',
db.biblical_figures._format,
multiple=True))
db.define_table('biblio',
Field('record'),
format='%(record)s')
db.define_table('pages',
Field('page_label', 'string'),
Field('title', 'string'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime', default=datetime.datetime.utcnow()),
format='%(title)s')
db.define_table('news',
Field('news_token', 'string'),
Field('title', 'string'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime', default=datetime.datetime.utcnow()),
format='%(title)s')
db.define_table('bugs',
Field('title'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime'),
format='%(title)s')
| 44.572464 | 105 | 0.662656 |
5a3ccdb8281af1ea0b8a669045afc2025efc659b | 12,559 | py | Python | interface.py | Kryptagora/pysum | 5281d47b7fa4d5500230b6b30797ab1a3adabcc2 | [
"MIT"
] | 3 | 2021-01-08T21:07:37.000Z | 2021-11-29T19:26:56.000Z | interface.py | Kryptagora/pysum | 5281d47b7fa4d5500230b6b30797ab1a3adabcc2 | [
"MIT"
] | null | null | null | interface.py | Kryptagora/pysum | 5281d47b7fa4d5500230b6b30797ab1a3adabcc2 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import filedialog
from urllib.request import urlopen
from pathlib import Path
from tkinter import ttk
import numpy as np
import base64
import io
import re
from src.theme import theme
from src.algorithm import blosum
from src.utils import RichText
def qopen(path:str):
'''Opens and returns file content'''
with open(path, 'r') as fh:
content = fh.read()
return content
| 40.124601 | 155 | 0.597022 |
5a3d662e5f34dbe67eeb69437b64718da7a2b8ce | 4,050 | py | Python | view/python_core/movies/colorizer/aux_funcs.py | galizia-lab/pyview | 07bef637b0c60fae8830c1b3947e4a7bcd14bb2c | [
"BSD-3-Clause"
] | 2 | 2021-11-07T10:17:16.000Z | 2021-11-07T10:17:19.000Z | view/python_core/movies/colorizer/aux_funcs.py | galizia-lab/pyview | 07bef637b0c60fae8830c1b3947e4a7bcd14bb2c | [
"BSD-3-Clause"
] | 5 | 2021-11-03T12:43:03.000Z | 2021-12-16T10:34:52.000Z | view/python_core/movies/colorizer/aux_funcs.py | galizia-lab/pyview | 07bef637b0c60fae8830c1b3947e4a7bcd14bb2c | [
"BSD-3-Clause"
] | 1 | 2021-09-23T15:46:26.000Z | 2021-09-23T15:46:26.000Z | import numpy as np
import re
def apply_colormaps_based_on_mask(mask, data_for_inside_mask, data_for_outside_mask,
colormap_inside_mask, colormap_outside_mask):
"""
Returns the combination of applying two colormaps to two datasets on two mutually exclusive sets of pixels
as follows. Applies <colormap_inside_mask> to <data_for_inside_mask> for pixels where <thresh_mask> is True and applies
<colormap_outside_mask> to <data_for_outside_mask> for pixels where <thresh_mask> is False.
:param mask: boolean numpy.ndarray
:param data_for_inside_mask: float numpy.ndarray, having the same shape as thresh_mask
:param data_for_outside_mask: float numpy.ndarray, having the same shape as thresh_mask
:param colormap_inside_mask: matplotlib colormap
:param colormap_outside_mask: matplotlib colormap
:return: numpy.ndarray, having the same shape as thresh_mask
"""
assert data_for_inside_mask.shape == data_for_outside_mask.shape, f"data_within_mask and data_outside_mask " \
f"must have " \
f"the same shape. Given: {data_for_inside_mask.shape} " \
f"and {data_for_outside_mask.shape}"
assert mask.shape == data_for_inside_mask.shape, f"The shape of given thresh_mask ({mask.shape}) " \
f"does not match shape of data given " \
f"({data_for_inside_mask.shape})"
data_colorized = np.empty(list(data_for_inside_mask.shape) + [4])
data_colorized[mask, :] = colormap_inside_mask(data_for_inside_mask[mask])
data_colorized[~mask, :] = colormap_outside_mask(data_for_outside_mask[~mask])
return data_colorized
#
# data_masked_inside = np.ma.MaskedArray(data_for_outside_mask, mask, fill_value=0)
# data_masked_outside = np.ma.MaskedArray(data_for_inside_mask, ~mask, fill_value=0)
#
# data_colorized_outside = colormap_outside_mask(data_masked_inside)
# data_colorized_inside = colormap_inside_mask(data_masked_outside)
#
# return data_colorized_inside + data_colorized_outside
def stack_duplicate_frames(frame, depth):
"""
Retuns a numpy.ndarray formed by stacking <frame> along the third axis
:param frame: numpy.ndarray, of 2 dimensions
:param depth: int
:return: numpy.ndarray of shape (frame.shape[0], frame.shape[1], depth)
"""
return np.stack([frame] * depth, axis=2)
def resolve_thresholdOnValue(data, mv_thresholdOnValue):
"""
Interprets <mv_thresholdOnValue> in the context of <data>, calculates the threshold and returns it
:param data: numpy.ndarray
:param mv_thresholdOnValue: str
:return: float
"""
assert re.fullmatch(r"[ra][\-\.0-9]+", mv_thresholdOnValue) is not None, f"{mv_thresholdOnValue} is not a valid" \
f"threshold indicator. Valid formats are " \
f"'rxxx' for relative threshold and 'ayyy' " \
f" for absolute threshold where 'xxx' and" \
f"'yyy' represent numbers. " \
f"E.g.: a123.123, r0.4 and r-0.12533"
threshold_value = float(mv_thresholdOnValue[1:])
if mv_thresholdOnValue.startswith("r"):
thres_pc = np.clip(threshold_value, 0, 100)
data_min, data_max = data.min(), data.max()
threshold = data_min + 0.01 * thres_pc * (data_max - data_min)
elif mv_thresholdOnValue.startswith("a"):
threshold = threshold_value
else:
# Should not come here
raise ValueError()
return threshold
| 46.551724 | 123 | 0.604691 |
5a3e53b2797ea32423806b35230113ec63c34d58 | 4,242 | py | Python | bigml/tests/create_cluster_steps.py | javinp/python | bdec1e206ed028990503ed4bebcbc7023d3ff606 | [
"Apache-2.0"
] | 1 | 2021-06-20T11:51:22.000Z | 2021-06-20T11:51:22.000Z | bigml/tests/create_cluster_steps.py | javinp/python | bdec1e206ed028990503ed4bebcbc7023d3ff606 | [
"Apache-2.0"
] | null | null | null | bigml/tests/create_cluster_steps.py | javinp/python | bdec1e206ed028990503ed4bebcbc7023d3ff606 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from read_cluster_steps import i_get_the_cluster
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
#@step(r'I create a cluster$')
#@step(r'I create a cluster from a dataset list$')
#@step(r'I create a cluster with options "(.*)"$')
#@step(r'I wait until the cluster status code is either (\d) or (-\d) less than (\d+)')
#@step(r'I wait until the cluster is ready less than (\d+)')
#@step(r'I make the cluster shared')
#@step(r'I get the cluster sharing info')
#@step(r'I check the cluster status using the model\'s shared url')
#@step(r'I check the cluster status using the model\'s shared key')
| 37.539823 | 87 | 0.698963 |
5a3f02391584923bfc3115e774e687008ccfb69b | 3,649 | py | Python | tests/ptp_clock_sim_time/test_ptp_clock_sim_time.py | psumesh/cocotbext-eth | 39c585a8dd8dcdcfd56822a4f879ef059653757b | [
"MIT"
] | 15 | 2020-11-26T14:40:54.000Z | 2022-03-25T06:42:30.000Z | tests/ptp_clock_sim_time/test_ptp_clock_sim_time.py | psumesh/cocotbext-eth | 39c585a8dd8dcdcfd56822a4f879ef059653757b | [
"MIT"
] | 1 | 2021-03-24T06:28:20.000Z | 2021-03-25T06:10:02.000Z | tests/ptp_clock_sim_time/test_ptp_clock_sim_time.py | psumesh/cocotbext-eth | 39c585a8dd8dcdcfd56822a4f879ef059653757b | [
"MIT"
] | 7 | 2020-12-06T09:59:39.000Z | 2021-08-25T04:15:37.000Z | #!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.utils import get_sim_time
from cocotbext.eth import PtpClockSimTime
# cocotb-test
tests_dir = os.path.dirname(__file__)
| 28.960317 | 107 | 0.693615 |
5a3f1fd52edcbc6a770d3bea9dab8192d49a92e5 | 1,838 | py | Python | dex/section/section.py | callmejacob/dexfactory | 2de996927ee9f036b2c7fc6cb04f43ac790f35af | [
"BSD-2-Clause"
] | 7 | 2018-06-14T10:40:47.000Z | 2021-05-18T08:55:34.000Z | dex/section/section.py | callmejacob/dexfactory | 2de996927ee9f036b2c7fc6cb04f43ac790f35af | [
"BSD-2-Clause"
] | 1 | 2020-05-28T08:59:50.000Z | 2020-05-28T08:59:50.000Z | dex/section/section.py | callmejacob/dexfactory | 2de996927ee9f036b2c7fc6cb04f43ac790f35af | [
"BSD-2-Clause"
] | 3 | 2018-02-28T02:08:06.000Z | 2018-09-12T03:09:18.000Z | # -- coding: utf-8 --
from section_base import *
from section_map_item import *
from section_header import *
from section_string_id import *
from section_type_id import *
from section_proto_id import *
from section_field_id import *
from section_method_id import *
from section_class_def import *
from section_type_list import *
from section_class_data import *
from section_annotation_set_ref_list import *
from section_annotation_set_item import *
from section_annotation_item import *
from section_string_list import *
from section_encoded_array import *
from section_annotations_directory import *
from section_code import *
from section_debug_info import *
'''
section: (Section)
'''
section_class_map = {
TYPE_HEADER_ITEM : HeaderSection,
TYPE_STRING_ID_ITEM : StringIdListSection,
TYPE_TYPE_ID_ITEM : TypeIdListSection,
TYPE_PROTO_ID_ITEM : ProtoIdListSection,
TYPE_FIELD_ID_ITEM : FieldIdListSection,
TYPE_METHOD_ID_ITEM : MethodIdListSection,
TYPE_CLASS_DEF_ITEM : ClassDefListSection,
TYPE_MAP_LIST : MapItemListSection,
TYPE_TYPE_LIST : TypeListSection,
TYPE_ANNOTATION_SET_REF_LIST : AnnotationSetRefListSection,
TYPE_ANNOTATION_SET_ITEM : AnnotationSetItemSection,
TYPE_CLASS_DATA_ITEM : ClassDataListSection,
TYPE_CODE_ITEM : CodeSection,
TYPE_STRING_DATA_ITEM : StringListSection,
TYPE_DEBUG_INFO_ITEM : DebugInfoSection,
TYPE_ANNOTATION_ITEM : AnnotationItemSection,
TYPE_ENCODED_ARRAY_ITEM : EncodedArraySection,
TYPE_ANNOTATIONS_DIRECTORY_ITEM : AnnotationsDirectorySection,
} | 39.106383 | 69 | 0.699674 |
5a408ec9d28877bdb362b94265d0d74be34141c1 | 91 | py | Python | Code coach problems/Easy/Python/Skee-Ball.py | Djivs/sololearn-code-solutions | 7727dd97f79863a88841548770481f6f2abdc7bf | [
"MIT"
] | 1 | 2020-07-27T07:32:57.000Z | 2020-07-27T07:32:57.000Z | Code coach problems/Easy/Python/Skee-Ball.py | Djivs/sololearn-code-solutions | 7727dd97f79863a88841548770481f6f2abdc7bf | [
"MIT"
] | null | null | null | Code coach problems/Easy/Python/Skee-Ball.py | Djivs/sololearn-code-solutions | 7727dd97f79863a88841548770481f6f2abdc7bf | [
"MIT"
] | 1 | 2020-11-07T12:45:21.000Z | 2020-11-07T12:45:21.000Z | a = int(input())
b = int(input())
if a >=b*12:
print("Buy it!")
else:
print("Try again")
| 13 | 19 | 0.56044 |
5a41217fc99d7ef188d90f55041a7803b426c258 | 22 | py | Python | gsb/rest/__init__.py | pfrancois/grisbi_django | 4e27149522847c78ab9c0f0a06f0b1d371f7c205 | [
"BSD-3-Clause"
] | null | null | null | gsb/rest/__init__.py | pfrancois/grisbi_django | 4e27149522847c78ab9c0f0a06f0b1d371f7c205 | [
"BSD-3-Clause"
] | null | null | null | gsb/rest/__init__.py | pfrancois/grisbi_django | 4e27149522847c78ab9c0f0a06f0b1d371f7c205 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
# init
| 7.333333 | 14 | 0.636364 |
5a4164758499f35ed2ad174d38480235b72e03a1 | 4,416 | py | Python | chris_turtlebot_dashboard/src/chris_turtlebot_dashboard/dashboard.py | xabigarde/chris_ros_turtlebot | ca26db3eafcb8aba7a322cca8fd44443f015e125 | [
"BSD-3-Clause"
] | null | null | null | chris_turtlebot_dashboard/src/chris_turtlebot_dashboard/dashboard.py | xabigarde/chris_ros_turtlebot | ca26db3eafcb8aba7a322cca8fd44443f015e125 | [
"BSD-3-Clause"
] | null | null | null | chris_turtlebot_dashboard/src/chris_turtlebot_dashboard/dashboard.py | xabigarde/chris_ros_turtlebot | ca26db3eafcb8aba7a322cca8fd44443f015e125 | [
"BSD-3-Clause"
] | 1 | 2021-07-23T14:09:18.000Z | 2021-07-23T14:09:18.000Z | import roslib;roslib.load_manifest('kobuki_dashboard')
import rospy
import diagnostic_msgs
from rqt_robot_dashboard.dashboard import Dashboard
from rqt_robot_dashboard.widgets import ConsoleDashWidget, MenuDashWidget, IconToolButton
from python_qt_binding.QtWidgets import QMessageBox, QAction
from python_qt_binding.QtCore import QSize,QTimer
from .battery_widget import BatteryWidget
from .led_widget import LedWidget
from .motor_widget import MotorWidget
from .monitor_dash_widget import MonitorDashWidget
| 47.483871 | 147 | 0.673234 |
5a421a3520f2cd9636eea2d36b206d6735096aca | 3,339 | py | Python | msgpack_lz4block/__init__.py | AlsidOfficial/python-msgpack-lz4block | 4cfa6fc69799530c72b73c660d0beabb4ebd5a81 | [
"MIT"
] | 1 | 2021-07-01T12:41:41.000Z | 2021-07-01T12:41:41.000Z | msgpack_lz4block/__init__.py | AlsidOfficial/python-msgpack-lz4block | 4cfa6fc69799530c72b73c660d0beabb4ebd5a81 | [
"MIT"
] | null | null | null | msgpack_lz4block/__init__.py | AlsidOfficial/python-msgpack-lz4block | 4cfa6fc69799530c72b73c660d0beabb4ebd5a81 | [
"MIT"
] | null | null | null | import msgpack
import lz4.block
from msgpack.ext import Timestamp, ExtType
import re
PATTERN_1 = re.compile(
rb'\xd9jSystem.Object\[\], System.Private.CoreLib, Version=[0-9][0-9.]*, Culture=neutral, PublicKeyToken=7cec85d7bea7798e.*?\xd9.(?P<payload>.*)')
def deserialize(bytes_data, key_map=None, buffer_size=100 * 1024 * 1024):
"""
Deserialize the bytes array data outputted by the MessagePack-CSharp lib using using lz4block compression
:param bytes_data: Serialized bytes array data that has been generated by the MessagePack-CSharp lib using using
lz4block compression.
:param key_map: A key list to produce a key value dict.
:param buffer_size: Buffer size to be used when decompressing.
:return: deserialized data
"""
deserialized = msgpack.unpackb(bytes_data)
decompressed = b''
for data in deserialized:
if isinstance(data, bytes):
decompressed += lz4.block.decompress(data, uncompressed_size=buffer_size)
obj = msgpack.unpackb(decompressed, ext_hook=ext_hook, raw=False)
obj = jsonify(obj)
if key_map is not None:
return __map_obj(obj, key_map)
return obj
| 42.265823 | 150 | 0.634921 |
5a42367cb5c3c6ae30a847d5d4575149e7bc2d38 | 2,169 | py | Python | scilpy/version.py | fullbat/scilpy | 8f5b95a0b298ac95268c94d04a162b14fe2773ad | [
"MIT"
] | null | null | null | scilpy/version.py | fullbat/scilpy | 8f5b95a0b298ac95268c94d04a162b14fe2773ad | [
"MIT"
] | null | null | null | scilpy/version.py | fullbat/scilpy | 8f5b95a0b298ac95268c94d04a162b14fe2773ad | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import glob
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "Scilpy: diffusion MRI tools and utilities"
# Long description will go up on the pypi page
long_description = """
Scilpy
========
Scilpy is a small library mainly containing small tools and utilities
to quickly work with diffusion MRI. Most of the tools are based
on or wrapper of the Dipy_ library.
.. _Dipy: http://dipy.org
License
=======
``scilpy`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2012--, Sherbrooke Connectivity Imaging Lab [SCIL],
Universit de Sherbrooke.
"""
NAME = "scilpy"
MAINTAINER = "Jean-Christophe Houde"
MAINTAINER_EMAIL = "jean.christophe.houde@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "https://github.com/scilus/scilpy"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "The SCIL developers"
AUTHOR_EMAIL = ""
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
REQUIRES = ["numpy"]
SCRIPTS = glob.glob("scripts/*.py")
| 30.985714 | 77 | 0.720609 |
5a448e7214b3790abd510a4b2f97d52ddcfd5d87 | 3,765 | py | Python | fireflies.py | dvsd/Firefly-Synchronization | 89aec8513a386cf274f333ba8b4fa64555766619 | [
"MIT"
] | 1 | 2021-04-22T14:04:19.000Z | 2021-04-22T14:04:19.000Z | fireflies.py | dvsd/Firefly-Synchronization | 89aec8513a386cf274f333ba8b4fa64555766619 | [
"MIT"
] | null | null | null | fireflies.py | dvsd/Firefly-Synchronization | 89aec8513a386cf274f333ba8b4fa64555766619 | [
"MIT"
] | null | null | null | from graphics import *
import math
import random
windowWidth = 400
windowHeight = 400
fireflyRadius = 3
win = GraphWin("Fireflies",windowWidth,windowHeight,autoflush=False)
win.setBackground('black')
closeWindow = False
fireflies = []
flashedFliesOpenSet = [] # flies that need to reset urge of neighbors
flashedFliesClosedSet = [] # flies that have already flashed and reset its urge
colorTraits = [
[255,0,0], #red
[0,255,0], # green
[0,0,255], # blue
[255,255,0], # yellow
[255,0,255], # purple
[0,255,255], # cyan
[232, 30, 99], # pink
[255, 152, 0], # orange
[96, 125, 139], # blue gray
[255,87,51] # blood orange
]
# As time progresses, increase urge every second
for i in range(random.randint(40,85)): # randomly generate Firefly instances at random coordinates within frame
fireflies.append(Firefly(random.randint(fireflyRadius,windowWidth-fireflyRadius),random.randint(fireflyRadius,windowHeight-fireflyRadius)))
for fly in fireflies:
fly.draw()
previousTime = time.time()
while not closeWindow:
currentTime = time.time() # get currentTime in seconds
if (currentTime-previousTime) > .1: # if one second has elapsed
previousTime = currentTime # previous time becomes the old current time
for fly in fireflies: # for all fireflies
if fly.flashed:
fly.flashed = False
fly.compute_hue(fly.colorTrait)
fly.circle.setFill(color_rgb(fly.hue[0],fly.hue[1],fly.hue[2]))
fly.circle.setOutline(color_rgb(fly.hue[0],fly.hue[1],fly.hue[2]))
fly.currentUrge += 1 # increase urge by one every one second
win.flush()
if fly.currentUrge >= fly.threshold: # if current urge exceeds the fireflies' threshold
fly.flashed = True
flashedFliesOpenSet.append(fly)
fly.currentUrge = 0 # reset phase/currentUrge
for flashedFly in flashedFliesOpenSet:
# TODO: alter this loop to eliminate every visited fly to reduce iterations.
# Would need to reset the list of flies on the outside of the loop to ensure every fly is visitied.
for fly in fireflies:
if fly not in flashedFliesOpenSet and fly not in flashedFliesClosedSet:
if distbetween(flashedFly,fly) <= 50 and (flashedFly!= fly) and fly.currentUrge < fly.threshold and fly.currentUrge != 0:
fly.currentUrge = 0
fly.colorTrait = flashedFly.colorTrait
flashedFliesOpenSet.append(fly)
flashedFliesOpenSet.remove(flashedFly)
flashedFliesClosedSet.append(flashedFly)
if win.checkKey():
closeWindow = True
win.getMouse()
| 34.227273 | 140 | 0.712882 |
5a44e929a11797422604acb7129e5a00747b908f | 2,350 | py | Python | gb/tests/test_gibbs_sampler.py | myozka/granger-busca | e6922f85aa58ab0809951ec4d60b5df43d6c74e8 | [
"BSD-3-Clause"
] | 5 | 2018-09-06T13:37:04.000Z | 2019-12-16T13:53:26.000Z | gb/tests/test_gibbs_sampler.py | myozka/granger-busca | e6922f85aa58ab0809951ec4d60b5df43d6c74e8 | [
"BSD-3-Clause"
] | 1 | 2021-06-09T06:08:25.000Z | 2021-07-13T18:10:09.000Z | gb/tests/test_gibbs_sampler.py | myozka/granger-busca | e6922f85aa58ab0809951ec4d60b5df43d6c74e8 | [
"BSD-3-Clause"
] | 4 | 2020-03-30T14:54:27.000Z | 2021-09-23T18:48:14.000Z | # -*- coding: utf8
from gb.randomkit.random import RNG
from gb.samplers import BaseSampler
from gb.samplers import CollapsedGibbsSampler
from gb.stamps import Timestamps
from gb.sloppy import SloppyCounter
from numpy.testing import assert_equal
import numpy as np
| 28.313253 | 73 | 0.631064 |
5a44f541b7846b979545c92ddcc2e62d26b600d3 | 9,163 | py | Python | python/tHome/sma/Link.py | ZigmundRat/T-Home | 5dc8689f52d87dac890051e540b338b009293ced | [
"BSD-2-Clause"
] | 18 | 2016-04-17T19:39:28.000Z | 2020-11-19T06:55:20.000Z | python/tHome/sma/Link.py | ZigmundRat/T-Home | 5dc8689f52d87dac890051e540b338b009293ced | [
"BSD-2-Clause"
] | 6 | 2016-10-31T13:53:45.000Z | 2019-03-20T20:47:03.000Z | python/tHome/sma/Link.py | ZigmundRat/T-Home | 5dc8689f52d87dac890051e540b338b009293ced | [
"BSD-2-Clause"
] | 12 | 2016-10-31T12:29:08.000Z | 2021-12-28T12:18:28.000Z | #===========================================================================
#
# Primary SMA API.
#
#===========================================================================
import socket
from .. import util
from . import Auth
from . import Reply
from . import Request
#==============================================================================
#==============================================================================
| 38.020747 | 79 | 0.460984 |
5a453d50864469ccb2ceb29c181778bf81f77b45 | 1,988 | py | Python | src/tinerator/visualize/qt_app.py | lanl/tinerator | b34112f01d64801b6539650af2e40edff33f9f9b | [
"BSD-3-Clause"
] | 2 | 2021-09-13T17:10:25.000Z | 2021-09-17T18:36:21.000Z | src/tinerator/visualize/qt_app.py | lanl/tinerator | b34112f01d64801b6539650af2e40edff33f9f9b | [
"BSD-3-Clause"
] | 15 | 2021-08-16T18:23:58.000Z | 2022-02-03T04:38:24.000Z | src/tinerator/visualize/qt_app.py | lanl/tinerator | b34112f01d64801b6539650af2e40edff33f9f9b | [
"BSD-3-Clause"
] | null | null | null | import sys
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtCore import QCoreApplication, QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWebEngineWidgets import QWebEngineProfile
| 26.864865 | 86 | 0.65493 |
5a46d6b1d5ad18765586dcbd1b433a5a6d49394a | 2,487 | py | Python | openstack/tests/unit/clustering/v1/test_receiver.py | anton-sidelnikov/openstacksdk | 98f0c67120b65814c3bd1663415e302551a14536 | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/clustering/v1/test_receiver.py | anton-sidelnikov/openstacksdk | 98f0c67120b65814c3bd1663415e302551a14536 | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/clustering/v1/test_receiver.py | anton-sidelnikov/openstacksdk | 98f0c67120b65814c3bd1663415e302551a14536 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.clustering.v1 import receiver
from openstack.tests.unit import base
FAKE_ID = 'ae63a10b-4a90-452c-aef1-113a0b255ee3'
FAKE_NAME = 'test_receiver'
FAKE = {
'id': FAKE_ID,
'name': FAKE_NAME,
'type': 'webhook',
'cluster_id': 'FAKE_CLUSTER',
'action': 'CLUSTER_RESIZE',
'created_at': '2015-10-10T12:46:36.000000',
'updated_at': '2016-10-10T12:46:36.000000',
'actor': {},
'params': {
'adjustment_type': 'CHANGE_IN_CAPACITY',
'adjustment': 2
},
'channel': {
'alarm_url': 'http://host:port/webhooks/AN_ID/trigger?V=1',
},
'user': 'FAKE_USER',
'project': 'FAKE_PROJECT',
'domain': '',
}
| 34.541667 | 75 | 0.668275 |
5a48e8486f10a1984a1d5c43962af125191eae02 | 4,137 | py | Python | gan/kdd_utilities.py | mesarcik/Efficient-GAN-Anomaly-Detection | 15568abb57d2965ce70d4fd0dc70f3fe00c68d1b | [
"MIT"
] | 408 | 2018-02-27T05:10:49.000Z | 2022-03-24T10:32:07.000Z | gan/kdd_utilities.py | phuccuongngo99/Efficient-GAN-Anomaly-Detection | 849ffd91436f4ab8908e0d0ae9e6eadff5f67110 | [
"MIT"
] | 21 | 2018-05-21T09:18:02.000Z | 2021-08-30T21:51:38.000Z | gan/kdd_utilities.py | phuccuongngo99/Efficient-GAN-Anomaly-Detection | 849ffd91436f4ab8908e0d0ae9e6eadff5f67110 | [
"MIT"
] | 139 | 2018-03-05T13:42:11.000Z | 2022-03-20T09:02:41.000Z | import tensorflow as tf
"""Class for KDD10 percent GAN architecture.
Generator and discriminator.
"""
learning_rate = 0.00001
batch_size = 50
layer = 1
latent_dim = 32
dis_inter_layer_dim = 128
init_kernel = tf.contrib.layers.xavier_initializer()
def generator(z_inp, is_training=False, getter=None, reuse=False):
""" Generator architecture in tensorflow
Generates data from the latent space
Args:
z_inp (tensor): variable in the latent space
reuse (bool): sharing variables or not
Returns:
(tensor): last activation layer of the generator
"""
with tf.variable_scope('generator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(z_inp,
units=64,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=121,
kernel_initializer=init_kernel,
name='fc')
return net
def discriminator(x_inp, is_training=False, getter=None, reuse=False):
""" Discriminator architecture in tensorflow
Discriminates between real data and generated data
Args:
x_inp (tensor): input data for the encoder.
reuse (bool): sharing variables or not
Returns:
logits (tensor): last activation layer of the discriminator (shape 1)
intermediate_layer (tensor): intermediate layer for feature matching
"""
with tf.variable_scope('discriminator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(x_inp,
units=256,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_3'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=dis_inter_layer_dim,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net,
rate=0.2,
name='dropout',
training=is_training)
intermediate_layer = net
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=1,
kernel_initializer=init_kernel,
name='fc')
net = tf.squeeze(net)
return net, intermediate_layer | 32.833333 | 79 | 0.513899 |
5a48f16367b8db551ede0ba75c39ecf9f879f676 | 646 | py | Python | setup.py | jhakonen/wotdisttools | 2194761baaf1f6ade5fa740d134553b77300211b | [
"MIT"
] | 9 | 2019-08-15T14:59:39.000Z | 2021-06-24T22:03:31.000Z | setup.py | jhakonen/wotdisttools | 2194761baaf1f6ade5fa740d134553b77300211b | [
"MIT"
] | 1 | 2019-08-06T19:22:44.000Z | 2019-08-11T09:23:31.000Z | setup.py | jhakonen/setuptools-wotmod | 2194761baaf1f6ade5fa740d134553b77300211b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='setuptools-wotmod',
version='0.2',
packages=find_packages(),
description='setuptools integration for creating World of Tanks mods',
long_description=open('README.md').read(),
author='jhakonen',
url='https://github.com/jhakonen/setuptools-wotmod/',
license='MIT License',
setup_requires=['pytest-runner'],
tests_require=[
'mock',
'nose',
'pytest<5',
],
entry_points={
"distutils.commands": [
"bdist_wotmod = setuptools_wotmod.bdist_wotmod:bdist_wotmod",
],
},
)
| 24.846154 | 74 | 0.630031 |
5a492602297201d4f7e69fbf52b8fafe45beb71d | 2,264 | py | Python | services/prepare_snps_data.py | eliorav/Population-Genotype-Frequency | 11780b182bf417ac10ae86919ee313e39158267d | [
"Apache-2.0"
] | null | null | null | services/prepare_snps_data.py | eliorav/Population-Genotype-Frequency | 11780b182bf417ac10ae86919ee313e39158267d | [
"Apache-2.0"
] | null | null | null | services/prepare_snps_data.py | eliorav/Population-Genotype-Frequency | 11780b182bf417ac10ae86919ee313e39158267d | [
"Apache-2.0"
] | null | null | null | import os
from glob import glob
import pandas as pd
from tqdm import tqdm
from constants import SNPS_DATA_PATH, SNPS_DATA_FOLDER, SNPS_DATA_FILE_NAME
from services.docker_runner import Hg38dbDockerRunner
def fetch_snps_data(snps_file_path):
"""
Fetch SNPs data from hg38 db
:param snps_file_path: the path of the SNPs list
"""
print("retrieving SNPs data (chrom, position)")
snps_df = pd.read_csv(snps_file_path, sep="\t", names=['snp', 'allele'])
snps = snps_df['snp'].unique()
step_size = 500
steps = int(len(snps) / step_size) + 1
hg38db_docker_runner = Hg38dbDockerRunner()
with tqdm(total=len(snps)) as pbar:
for step in range(steps):
start = step * step_size
end = -1 if step == (steps - 1) else (step + 1) * step_size
snps_query = '", "'.join(snps[start:end])
pbar.set_description(f"Processing snps in range {start} - {end if end != -1 else len(snps)}")
hg38db_docker_runner(environment={
'QUERY': f'select chrom, chromEnd, name from snp150 where name in ("{snps_query}")',
'FILE_NAME': f'{SNPS_DATA_FOLDER}/snps_data_{step}'
})
pbar.update(step_size if step != (steps - 1) else len(snps) - step * step_size)
def merge_snps_data():
"""
Merge the multiple files from hg38 db to a single file
"""
print("merge SNPs data to a single file")
snps_files = SNPS_DATA_PATH.glob('*.csv')
snps_df = pd.concat([pd.read_csv(snps_file) for snps_file in snps_files], ignore_index=True)
snps_df = snps_df[~snps_df['chrom'].str.contains('alt')]
snps_df.sort_values(by=['chrom', 'chromEnd'], inplace=True)
snps_df.rename(columns={"chrom": "#chrom", "chromEnd": "position", "name": "rsid"}, inplace=True)
snps_df.to_csv(SNPS_DATA_PATH/SNPS_DATA_FILE_NAME, index=False)
def prepare_snps_data(args):
"""
Prepare SNPs data
:param args: script args - should include snps_file_path - the path of the SNPs list
"""
if not SNPS_DATA_PATH.exists():
SNPS_DATA_PATH.mkdir(exist_ok=True, parents=True)
fetch_snps_data(args.snps_file_path)
merge_snps_data()
else:
print(f"SNPs data: {SNPS_DATA_PATH} already exist")
| 39.034483 | 105 | 0.659452 |
5a4bcf1b59efc03b155e47a1a800ec05299ddea9 | 258 | py | Python | lab1/lab1/views/home.py | ZerocksX/Service-Oriented-Computing-2019 | eac6b0e9a40eed76b452f6524fd899e7107b0f69 | [
"Apache-2.0"
] | null | null | null | lab1/lab1/views/home.py | ZerocksX/Service-Oriented-Computing-2019 | eac6b0e9a40eed76b452f6524fd899e7107b0f69 | [
"Apache-2.0"
] | null | null | null | lab1/lab1/views/home.py | ZerocksX/Service-Oriented-Computing-2019 | eac6b0e9a40eed76b452f6524fd899e7107b0f69 | [
"Apache-2.0"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render, redirect
from lab1.views import login
| 23.454545 | 45 | 0.763566 |
5a4c04b5d165286adafed51f08e73b407e82dac3 | 2,154 | py | Python | ssdp/socketserver.py | vintozver/ssdp | ab3199068e3af93d95b00dcd79fbb444aa4ba13b | [
"MIT"
] | null | null | null | ssdp/socketserver.py | vintozver/ssdp | ab3199068e3af93d95b00dcd79fbb444aa4ba13b | [
"MIT"
] | null | null | null | ssdp/socketserver.py | vintozver/ssdp | ab3199068e3af93d95b00dcd79fbb444aa4ba13b | [
"MIT"
] | null | null | null | import logging
import socket
import socketserver
import struct
import typing
from ssdp.entity import *
from ssdp.network import *
logger = logging.getLogger("ssdp.socketserver")
| 31.217391 | 87 | 0.606778 |
5a4c53204a1b7bd48e50214561ae151641713f7f | 1,040 | py | Python | giggleliu/tba/hgen/multithreading.py | Lynn-015/Test_01 | 88be712b2d17603f7a3c38836dabe8dbdee2aba3 | [
"MIT"
] | 2 | 2015-11-12T01:11:20.000Z | 2015-11-12T23:32:28.000Z | giggleliu/tba/hgen/multithreading.py | Lynn-015/Test_01 | 88be712b2d17603f7a3c38836dabe8dbdee2aba3 | [
"MIT"
] | 3 | 2015-10-28T02:25:48.000Z | 2015-11-25T18:21:22.000Z | giggleliu/tba/hgen/multithreading.py | Lynn-015/NJU_DMRG | 88be712b2d17603f7a3c38836dabe8dbdee2aba3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from numpy import *
from mpi4py import MPI
from matplotlib.pyplot import *
#MPI setting
try:
COMM=MPI.COMM_WORLD
SIZE=COMM.Get_size()
RANK=COMM.Get_rank()
except:
COMM=None
SIZE=1
RANK=0
__all__=['mpido']
def mpido(func,inputlist,bcastouputmesh=True):
'''
MPI for list input.
func:
The function defined on inputlist.
inputlist:
The input list.
bcastouputmesh:
broadcast output mesh if True.
'''
N=len(inputlist)
ntask=(N-1)/SIZE+1
datas=[]
for i in xrange(N):
if i/ntask==RANK:
datas.append(func(inputlist[i]))
datal=COMM.gather(datas,root=0)
if RANK==0:
datas=[]
for datai in datal:
datas+=datai
#broadcast mesh
if bcastouputmesh:
datas=COMM.bcast(datas,root=0)
return datas
if __name__=='__main__':
test_mpido()
| 19.622642 | 46 | 0.6 |
5a4c57677f4df8cc0dad6ecf21973ff01725bd89 | 1,480 | py | Python | manage.py | forestmonster/flask-microservices-users | 84b6edb1d57bd5882a48346bba5ff67a2ce44d9c | [
"MIT"
] | null | null | null | manage.py | forestmonster/flask-microservices-users | 84b6edb1d57bd5882a48346bba5ff67a2ce44d9c | [
"MIT"
] | null | null | null | manage.py | forestmonster/flask-microservices-users | 84b6edb1d57bd5882a48346bba5ff67a2ce44d9c | [
"MIT"
] | null | null | null | import unittest
import coverage
from flask_script import Manager
from project import create_app, db
from project.api.models import User
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/server/config.py',
'project/server/*/__init__.py'
]
)
COV.start()
app = create_app()
manager = Manager(app)
if __name__ == '__main__':
manager.run()
| 21.449275 | 79 | 0.646622 |
5a4d72f7295e946813a914b8b8596cf8a6802ccb | 2,691 | py | Python | cocotb/_py_compat.py | lavanyajagan/cocotb | 2f98612016e68510e264a2b4963303d3588d8404 | [
"BSD-3-Clause"
] | 350 | 2015-01-09T12:50:13.000Z | 2019-07-12T09:08:17.000Z | cocotb/_py_compat.py | lavanyajagan/cocotb | 2f98612016e68510e264a2b4963303d3588d8404 | [
"BSD-3-Clause"
] | 710 | 2015-01-05T16:42:29.000Z | 2019-07-16T13:40:00.000Z | cocotb/_py_compat.py | lavanyajagan/cocotb | 2f98612016e68510e264a2b4963303d3588d8404 | [
"BSD-3-Clause"
] | 182 | 2015-01-08T09:35:20.000Z | 2019-07-12T18:41:37.000Z | # Copyright (c) cocotb contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Backports and compatibility shims for newer python features.
These are for internal use - users should use a third party library like `six`
if they want to use these shims in their own code
"""
import sys
# backport of Python 3.7's contextlib.nullcontext
# On python 3.7 onwards, `dict` is guaranteed to preserve insertion order.
# Since `OrderedDict` is a little slower that `dict`, we prefer the latter
# when possible.
if sys.version_info[:2] >= (3, 7):
insertion_ordered_dict = dict
else:
import collections
insertion_ordered_dict = collections.OrderedDict
| 42.046875 | 81 | 0.751394 |
5a4dfb65c9293913510af1677af7923d5236e918 | 8,259 | py | Python | tomopal/crtomopy/demo/pjt_demo.py | robinthibaut/TomoPal | bb3d1f9d56afc53c641a72b47e4419ee0cfd587b | [
"BSD-3-Clause"
] | 2 | 2021-03-01T11:06:17.000Z | 2021-09-24T11:49:31.000Z | tomopal/crtomopy/demo/pjt_demo.py | robinthibaut/TomoPal | bb3d1f9d56afc53c641a72b47e4419ee0cfd587b | [
"BSD-3-Clause"
] | 53 | 2021-03-30T14:05:17.000Z | 2022-03-31T09:55:14.000Z | tomopal/crtomopy/demo/pjt_demo.py | robinthibaut/TomoPal | bb3d1f9d56afc53c641a72b47e4419ee0cfd587b | [
"BSD-3-Clause"
] | 1 | 2020-06-16T11:16:39.000Z | 2020-06-16T11:16:39.000Z | # Copyright (c) 2020. Robin Thibaut, Ghent University
from os.path import join as jp
import numpy as np
from tomopal.crtomopy.crtomo.crc import (
Crtomo,
datread,
import_res,
mesh_geometry,
mtophase,
)
from ..parent import inventory
from ...geoview.diavatly import model_map # To plot results
# %% Directories
# Input here the folders to structure your project. It is not necessary to previously create them
# (except the data folder)
# they will be automatically generated once you initialize a crtomo object.
# Note: the function 'jp' simply joins the arguments to build a path.
main_dir = inventory.hello() # Current working directory of the project
data_dir = jp(main_dir, "data", "demo") # Data files directory
mesh_dir = jp(main_dir, "mesh", "demo") # Mesh files directory
iso_dir = jp(main_dir, "iso", "demo") # ISO file dir
ref_dir = jp(main_dir, "ref", "demo") # Reference model files dir
start_dir = jp(main_dir, "start", "demo") # Start model files dir
results_dir = jp(main_dir, "results", "demo") # Results files directory
# %% Exe names
# Input here the path to your exe files.
mesh_exe_name = jp(main_dir, "mesh.exe")
crtomo_exe_name = jp(main_dir, "crtomo.exe")
# %% Create crtomo object
# Folders will be generated here if they don't exist already.
myinv = Crtomo(
working_dir=main_dir,
data_dir=data_dir,
mesh_dir=mesh_dir,
iso_dir=iso_dir,
ref_dir=ref_dir,
start_dir=start_dir,
crtomo_exe=crtomo_exe_name,
mesh_exe=mesh_exe_name,
)
# %% Generating the mesh
# Data file name A B M N in meters
df = jp(data_dir, "demo_elecs.dat") # Path to electrode configuration file
dat = datread(df) # Use built-in function to extract data (optional)
# Electrode spacing in meters
es = 5
# Electrodes elevation
# Data elevation file name X Z in meters
ef = jp(data_dir, "demo_elevation.dat")
elev = datread(ef) # Use built-in function to extract data (optional)
# %% Build the mesh
# The following command generates the mesh in the folder indicated previously.
# It requires 3 arguments:
# the numpy array of electrodes position of shape (n, 4) (required)
# the electrode spacing (required)
# the elevation data (optional)
myinv.meshmaker(abmn=dat[:, [0, 1, 2, 3]], electrode_spacing=es, elevation_data=elev)
# If you already have generated a mesh, comment the line above and instead
# load the previously generated Mesh.dat file as described below.
# %% Read the mesh data (number of cells, blocks coordinates, x-y coordinates of the center of the blocks) from Mesh.dat
mshf = jp(mesh_dir, "Mesh.dat") # Path to the generated 'Mesh.dat' file.
ncol, nlin, nelem, blocks, centerxy = mesh_geometry(mshf) # Extract mesh properties
# %% Build configuration file
# 0 Mesh.dat file
mesh_file = mshf
# 1 elec.dat file
elec_file = jp(mesh_dir, "elec.dat")
# 2 Data file
data_file = jp(data_dir, "demo_data.dat")
# 3 Results folder file
# Specify the path where the results will be loaded
frname = (
"" # If you want to save the results in a sub-folder in the main results folder
)
result_folder = jp(results_dir, frname)
# 8 Flag for reference model constraint (0/1)
reference_model = 0
#
reference_model_file = None
# %% 12 File for reference model (model weights)
reference_weights_file = None
# You can use the tool ModelMaker from mohinh to interactively create prior models, and automatically save the results
# in a dat file if you provide a file name.
# Otherwise you can access the final results with (ModelMaker object).final_results and export it yourself.
# Example with a background resistivity of 100 ohm.m :
# rfwm = ModelMaker(blocks=blocks, values_log=1, bck=100)
# my_model = rfwm.final_results
# Alternatively, use a simpler approach to produce a reference model file:
# with open(reference_weights_file, 'w') as rw:
# rw.write(str(nelem)+'\n')
# [rw.write('0.1'+'\n') for i in range(nelem)]
# rw.close()
# %% 22 Maximum numbers of iterations
iterations = 20
# 23 Min data RMS
rms = 1.0000
# 24 Flag for DC inversion (0 = with IP / 1 = only DC)
dc = 1
# 25 Flag for robust inversion (0/1)
robust = 1
# 26 Flag for checking polarity (0/1)
check_polarity = 1
# 27 Flag for final phase improvement (0/1)
final_phase_improvement = 1
# 29 Relative magnitude error level (%)
error_level = 2.5
# 30 Minimum absolute magnitude error (ohm)
min_abs_error = 0.00015
# 31 Error in phase (mrad)
phase_error = 0.5
# 36 Flag for MGS inversion (0/1)
mgs = 0
# 37 Beta value
beta = 0.002
# 38 Flag for starting model (0/1)
starting_model = 0
# 39 Starting model file
starting_model_file = None
# %% 19 ISO file 1
iso_file1 = jp(iso_dir, "iso.dat")
# dm = datread(starting_model_file, start=1)[:, 0]
# isom = ModelMaker(blocks=blocks, values=dm, values_log=1, bck=1)
# #
# with open(iso_file1, 'w') as rw:
# rw.write(str(nelem)+'\n')
# [rw.write('{} 1'.format(str(i))+'\n') for i in isom.final_results]
# rw.close()
# %% Generate configuration file
# If erase = 1, every item in the result folder will be deleted. If you don't want that, pick 0 instead.
# Use help(Crtomo.write_config) to see which parameters you can implement.
myinv.write_config(
erase=1,
mesh_file=mesh_file,
elec_file=elec_file,
data_file=data_file,
result_folder=result_folder,
reference_model=reference_model,
reference_model_file=reference_model_file,
reference_weights_file=reference_weights_file,
iso_file1=iso_file1,
iterations=iterations,
rms=rms,
dc=dc,
robust=robust,
check_polarity=check_polarity,
final_phase_improvement=final_phase_improvement,
error_level=error_level,
min_abs_error=min_abs_error,
phase_error=phase_error,
mgs=mgs,
beta=beta,
starting_model=starting_model,
starting_model_file=starting_model_file,
)
# Forward modeling example :
# # Results folder file
# fwname = 'fwd' # If you want to save the results in a sub-folder in the main results folder
#
# result_folder_fwd = jp(results_dir, fwname)
#
# myfwd = Crtomo(working_dir=cwd,
# data_dir=data_dir,
# mesh_dir=mesh_dir,
# crtomo_exe=crtomo_exe_name)
#
# # # res2mod(jp(result_folder, 'rho1.txt'))
# myfwd.write_config(mesh_file=mesh_file,
# elec_file=elec_file,
# fwd_only=1,
# result_folder=result_folder_fwd,
# starting_model_file=jp(cwd, 'rho1.dat'))
# myfwd.run()
# %% Run CRTOMO
# This will make your Crtomo object run the inversion. The configuration files are
# automatically saved in the results folder
myinv.run()
# %% Import results
if dc == 0: # If you have IP results to load
res, ip = import_res(result_folder=result_folder)
m2p = mtophase(ncycles=1, pulse_l=3.5, tmin=0.02, tmax=2.83)
ipt = ip[:] * m2p
else: # if you only have resistivity data to load
res, files = import_res(result_folder=result_folder, return_file=1)
rest = np.copy(res[0])
# If you want to convert a crtomo result file in a prior model for future inversions for example:
# modf = res2mod(files[0])
# Let's plot the results:
# Remove outliers (arbitrary)
cut = np.log10(4500)
rest[rest > cut] = cut
# Define a linear space for the color map
res_levels = 10 ** np.linspace(min(rest), cut, 10)
rtp = 10 ** np.copy(rest)
# Use the model_map function to display the computed resistivity:
# log=1 because we want a logarithmic scale.
# cbpos is for the position of the color bar.
model_map(
polygons=blocks,
vals=rtp,
log=1,
cbpos=0.4,
levels=res_levels,
folder=result_folder,
figname="demo_res_levels",
)
# %% if IP
if dc == 0:
ip = np.copy(res[1])
# crtomo works in phase so we perform the conversion to go back to "mv/v".
m2p = mtophase(ncycles=1, pulse_l=3.5, tmin=0.02, tmax=2.83)
ipt = np.copy(np.abs(ip / m2p))
# Arbitrarily cut outliers
hist = np.histogram(ipt, bins="auto")
cut = 260
ipt[ipt > cut] = cut
# Define levels to be plotted
ip_levels = [0, 10, 20, 30, 40, 50, 60, 70, 260]
model_map(
polygons=blocks,
vals=ipt,
log=0,
levels=ip_levels,
folder=result_folder,
figname="demo_ip_level",
)
| 27.808081 | 120 | 0.698632 |
5a4e07f2b94ab476e5ae09d4fd2d5f84fb6f63e2 | 72 | py | Python | __init__.py | VASemenov/Genetica | 5f51159e182a628c2d33c8a401719924b3611df5 | [
"MIT"
] | null | null | null | __init__.py | VASemenov/Genetica | 5f51159e182a628c2d33c8a401719924b3611df5 | [
"MIT"
] | null | null | null | __init__.py | VASemenov/Genetica | 5f51159e182a628c2d33c8a401719924b3611df5 | [
"MIT"
] | null | null | null | from genetica.dna import DNA, genify
from genetica.model import Genetica | 36 | 36 | 0.847222 |
5a4ed98e41bcfbfb4f87bc36a45fc26e1aa68177 | 1,015 | py | Python | client_code/utils/__init__.py | daviesian/anvil-extras | 84fd5ca5144808d4ce2b333995e801a4ddff60e6 | [
"MIT"
] | null | null | null | client_code/utils/__init__.py | daviesian/anvil-extras | 84fd5ca5144808d4ce2b333995e801a4ddff60e6 | [
"MIT"
] | null | null | null | client_code/utils/__init__.py | daviesian/anvil-extras | 84fd5ca5144808d4ce2b333995e801a4ddff60e6 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
from functools import cache
__version__ = "1.4.0"
| 26.710526 | 83 | 0.715271 |
5a50502deca1083175f893a1ac12f341ff7d78ec | 13,984 | py | Python | evaluate/evaluate_debug.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 4 | 2020-08-15T02:14:03.000Z | 2021-01-30T08:18:18.000Z | evaluate/evaluate_debug.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 23 | 2020-01-24T07:25:40.000Z | 2021-06-02T00:50:32.000Z | evaluate/evaluate_debug.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 1 | 2020-07-02T12:26:45.000Z | 2020-07-02T12:26:45.000Z | import os
import os.path as op
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
import settings
from config import opts
from tfrecords.tfrecord_reader import TfrecordReader
import utils.util_funcs as uf
import utils.convert_pose as cp
from model.synthesize.synthesize_base import SynthesizeMultiScale
from model.train_val import ModelValidater, merge_results
from model.model_main import set_configs, get_dataset, create_training_parts
from model.model_util.logger import stack_reconstruction_images
import model.loss_and_metric.losses as lm
def evaluate_for_debug(data_dir_name, model_name):
"""
function to check if learning process is going right
to evaluate current model, save losses and error metrics to csv files and save debugging images
- debug_depth.csv: predicted depth error smootheness loss
- debug_pose.csv: photometric loss, trajectory error, rotation error
- trajectory.csv: gt trajectory, pred trajectory
- debug_imgs(directory): loss metric inspection view
1) target image
2) reconstructed target from gt
3) reconstructed target from pred
4) source image
5) predicted target depth
"""
if not uf.check_tfrecord_including(op.join(opts.DATAPATH_TFR, data_dir_name), ["pose_gt", "depth_gt"]):
print("Evaluation is NOT possible without pose_gt and depth_gt")
return
set_configs(model_name)
model = create_model()
model = try_load_weights(model, model_name)
model.compile(optimizer="sgd", loss="mean_absolute_error")
dataset = TfrecordReader(op.join(opts.DATAPATH_TFR, data_dir_name), batch_size=1).get_dataset()
depth_result = []
pose_result = []
trajectory = []
steps_per_epoch = uf.count_steps(data_dir_name, 1)
for i, x in enumerate(dataset):
uf.print_numeric_progress(i, steps_per_epoch)
depth_res, pose_res, traj = evaluate_batch(i, x, model)
depth_result.append(depth_res)
pose_result.append(pose_res)
trajectory.append(traj)
print("")
depth_result = save_depth_result_and_get_df(depth_result, model_name)
pose_result = save_pose_result_and_get_df(pose_result, model_name)
save_trajectories(trajectory, model_name)
depth_sample_inds = find_worst_depth_samples(depth_result, 5)
print("worst depth sample indices\n", depth_sample_inds[0])
pose_sample_inds = find_worst_pose_samples(pose_result, 5)
print("worst pose sample indices\n", pose_sample_inds[0])
worst_sample_inds = depth_sample_inds + pose_sample_inds
pathname = op.join(opts.DATAPATH_EVL, model_name, 'debug_imgs')
os.makedirs(pathname, exist_ok=True)
for i, x in enumerate(dataset):
uf.print_numeric_progress(i, steps_per_epoch)
for sample_inds in worst_sample_inds:
# sample_inds: df['frame', 'srcidx', metric or loss]
save_worst_views(i, x, model, sample_inds, pathname)
def compute_trajectory_error(pose_pred_mat, pose_true_mat, scale):
"""
:param pose_pred_mat: predicted snippet pose matrices, [batch, numsrc, 4, 4]
:param pose_true_mat: ground truth snippet pose matrices, [batch, numsrc, 4, 4]
:param scale: scale for pose_pred to have real scale
:return: trajectory error in meter [batch, numsrc]
"""
xyz_pred = pose_pred_mat[:, :, :3, 3]
xyz_true = pose_true_mat[:, :, :3, 3]
# adjust the trajectory scaling due to ignorance of abolute scale
# scale = tf.reduce_sum(xyz_true * xyz_pred, axis=2) / tf.reduce_sum(xyz_pred ** 2, axis=2)
# scale = tf.expand_dims(scale, -1)
traj_error = xyz_true - xyz_pred * tf.constant([[[scale]]])
traj_error = tf.sqrt(tf.reduce_sum(traj_error ** 2, axis=2))
traj_len = tf.sqrt(tf.reduce_sum(xyz_true ** 2, axis=2))
return traj_error, traj_len
if __name__ == "__main__":
np.set_printoptions(precision=3, suppress=True, linewidth=100)
inspect_results()
# evaluate_for_debug('kitti_raw_test', 'vode1')
| 43.974843 | 112 | 0.693292 |
5a50e3662524ec61048e74d97bc09d7305717136 | 7,018 | py | Python | tests/test_utils.py | h4ck3rm1k3/requests | 46184236dc177fb68c7863445609149d0ac243ea | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | h4ck3rm1k3/requests | 46184236dc177fb68c7863445609149d0ac243ea | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | h4ck3rm1k3/requests | 46184236dc177fb68c7863445609149d0ac243ea | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import os
from io import BytesIO
import pytest
from requests import compat
from requests.utils import (
address_in_network, dotted_netmask,
get_auth_from_url, get_encodings_from_content,
get_environ_proxies, guess_filename,
is_ipv4_address, is_valid_cidr, requote_uri,
select_proxy, super_len)
from .compat import StringIO, cStringIO
USER = PASSWORD = "%!*'();:@&=+$,/?#[] "
ENCODED_USER = compat.quote(USER, '')
ENCODED_PASSWORD = compat.quote(PASSWORD, '')
| 30.25 | 95 | 0.58008 |
5a5102204d83caa3f795bc8eb2cf30cd51108dd9 | 37,008 | py | Python | clorm/orm/factbase.py | florianfischer91/clorm | 3569a91daa1d691f0a7f5a9534db925e027cdbf9 | [
"MIT"
] | 10 | 2019-01-11T03:31:17.000Z | 2019-12-18T08:18:44.000Z | clorm/orm/factbase.py | florianfischer91/clorm | 3569a91daa1d691f0a7f5a9534db925e027cdbf9 | [
"MIT"
] | 21 | 2018-12-06T04:06:53.000Z | 2019-12-17T00:04:56.000Z | clorm/orm/factbase.py | florianfischer91/clorm | 3569a91daa1d691f0a7f5a9534db925e027cdbf9 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Clorm ORM FactBase implementation. FactBase provides a set-like container
# specifically for storing facts (Predicate instances).
# ------------------------------------------------------------------------------
import abc
import io
import itertools
import sys
from typing import (Any, Callable, Iterable, Iterator, List, Optional, TextIO,
Tuple, Type, Union, cast, overload)
from ._typing import _T0, _T1, _T2, _T3, _T4
from ._queryimpl import UnGroupedQuery
from .core import (Predicate, PredicateDefn, PredicatePath, and_,
validate_root_paths)
from .factcontainers import FactMap, factset_equality
from .query import (QueryExecutor, QuerySpec, make_query_plan, process_orderby,
process_where)
__all__ = [
'FactBase',
'Select',
'Delete',
]
#------------------------------------------------------------------------------
# Global
#------------------------------------------------------------------------------
_Facts = Union[Iterable[Predicate], Callable[[], Iterable[Predicate]]]
#------------------------------------------------------------------------------
# Support function for printing ASP facts: Note: _trim_docstring() is taken from
# PEP 257 (modified for Python 3): https://www.python.org/dev/peps/pep-0257/
# ------------------------------------------------------------------------------
_builtin_sorted=sorted
#------------------------------------------------------------------------------
# A FactBase consisting of facts of different types
#------------------------------------------------------------------------------
def remove(self, arg: Predicate) -> None:
"""Remove a fact from the fact base (raises an exception if no fact). """
self._check_init() # Check for delayed init
return self._remove(arg, raise_on_missing=True)
def discard(self, arg: Predicate) -> None:
"""Remove a fact from the fact base. """
self._check_init() # Check for delayed init
return self._remove(arg, raise_on_missing=False)
def pop(self) -> Predicate:
"""Pop an element from the FactBase. """
self._check_init() # Check for delayed init
for pt, fm in self._factmaps.items():
if fm: return fm.pop()
raise KeyError("pop from an empty FactBase")
def clear(self):
"""Clear the fact base of all facts."""
self._check_init() # Check for delayed init
for pt, fm in self._factmaps.items(): fm.clear()
#--------------------------------------------------------------------------
# Special FactBase member functions
#--------------------------------------------------------------------------
def select(self, root):
"""Define a select query using the old Query API.
.. note::
This interface will eventually be deprecated when the new
:class:`Query API<Query>` is finalised. The entry point to this Query
API is through the :meth:`FactBase.query` method.
Args:
predicate: The predicate to query.
Returns:
Returns a Select query object for specifying a query.
"""
self._check_init() # Check for delayed init
roots = validate_root_paths([root])
ptypes = set([ root.meta.predicate for root in roots])
# Make sure there are factmaps for each referenced predicate type
for ptype in ptypes: self._factmaps.setdefault(ptype, FactMap(ptype))
return SelectImpl(self, QuerySpec(roots=roots))
# START OVERLOADED FUNCTIONS self.query;UnGroupedQuery[{0}];1;5;Type;
# code within this block is **programmatically,
# statically generated** by generate_overloads.py
# END OVERLOADED FUNCTIONS self.query
def query(self, *roots):
"""Define a query using the new Query API :class:`Query`.
The parameters consist of a predicates (or aliases) to query (like an
SQL FROM clause).
Args:
*predicates: predicate or predicate aliases
Returns:
Returns a Query object for specifying a query.
"""
self._check_init() # Check for delayed init
# Make sure there are factmaps for each referenced predicate type
ptypes = set([r.meta.predicate for r in validate_root_paths(roots)])
for ptype in ptypes: self._factmaps.setdefault(ptype, FactMap(ptype))
qspec = QuerySpec(roots=roots)
return UnGroupedQuery(self._factmaps, qspec)
def facts(self) -> List[Predicate]:
"""Return all facts."""
self._check_init() # Check for delayed init
tmp = [ fm.factset for fm in self._factmaps.values() if fm]
return list(itertools.chain(*tmp))
def asp_str(self, *, width: int = 0, commented: bool = False, sorted: bool = False) -> str:
"""Return a ASP string representation of the fact base.
The generated ASP string representation is syntactically correct ASP
code so is suitable for adding as the input to to an ASP program (or
writing to a file for later use in an ASP program).
By default the order of the facts in the string is arbitrary. Because
`FactBase` is built on a `OrderedDict` (which preserves insertion
order) the order of the facts will be deterministic between runs of the
same program. However two FactBases containing the same facts but
constructed in different ways will not produce the same output
string. In order to guarantee the same output the `sorted` flag can be
specified.
Args:
width: tries to fill to a given width by putting more than one
fact on a line if necessary (default: 0).
commented: produces commented ASP code by adding a predicate
signature and turning the Predicate sub-class docstring
into a ASP comments (default: False).
sorted: sort the output facts, first by predicates (name,arity) and
then by the natural order of the instances for that
predicate (default :False).
"""
self._check_init() # Check for delayed init
out = io.StringIO()
first=True
if sorted:
names = _builtin_sorted(self._factmaps.keys(),key=lambda pt:
(pt.meta.name, pt.meta.arity,pt.__name__))
fms = [self._factmaps[n] for n in names]
else:
fms = self._factmaps.values()
for fm in fms:
if commented:
if first: first=False
else: print("",file=out)
_format_commented(fm,out)
if sorted:
_format_asp_facts(_builtin_sorted(fm.factset),out,width)
else:
_format_asp_facts(fm.factset,out,width)
data = out.getvalue()
out.close()
return data
def __str__(self) -> str:
self._check_init() # Check for delayed init
tmp = ", ".join([str(f) for f in self])
return '{' + tmp + '}'
def __repr__(self):
return self.__str__()
#--------------------------------------------------------------------------
# Special functions to support set and container operations
#--------------------------------------------------------------------------
def __contains__(self, fact):
"""Implemement set 'in' operator."""
self._check_init() # Check for delayed init
if not isinstance(fact,Predicate): return False
ptype = type(fact)
if ptype not in self._factmaps: return False
return fact in self._factmaps[ptype].factset
def __bool__(self):
"""Implemement set bool operator."""
self._check_init() # Check for delayed init
for fm in self._factmaps.values():
if fm: return True
return False
def __eq__(self, other):
"""Overloaded boolean operator."""
# If other is not a FactBase then create one
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init(); other._check_init() # Check for delayed init
self_fms = { p: fm for p,fm in self._factmaps.items() if fm }
other_fms = { p: fm for p,fm in other._factmaps.items() if fm }
if self_fms.keys() != other_fms.keys(): return False
for p, fm1 in self_fms.items():
fm2 = other_fms[p]
if not factset_equality(fm1.factset,fm2.factset): return False
return True
def __lt__(self,other):
"""Implemement set < operator."""
# If other is not a FactBase then create one
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() ; other._check_init() # Check for delayed init
self_fms = { p: fm for p,fm in self._factmaps.items() if fm }
other_fms = { p: fm for p,fm in other._factmaps.items() if fm }
if len(self_fms) > len(other_fms): return False
known_ne=False
for p, spfm in self_fms.items():
if p not in other_fms: return False
opfm = other_fms[p]
if spfm.factset < opfm.factset: known_ne=True
elif spfm.factset > opfm.factset: return False
if known_ne: return True
return False
def __le__(self,other):
"""Implemement set <= operator."""
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() ; other._check_init() # Check for delayed init
self_fms = { p: fm for p,fm in self._factmaps.items() if fm }
other_fms = { p: fm for p,fm in other._factmaps.items() if fm }
if len(self_fms) > len(other_fms): return False
for p, spfm in self_fms.items():
if p not in other_fms: return False
opfm = other_fms[p]
if spfm.factset > opfm.factset: return False
return True
def __gt__(self,other):
"""Implemement set > operator."""
if not isinstance(other, self.__class__): other=FactBase(other)
return other.__lt__(self)
def __ge__(self,other):
"""Implemement set >= operator."""
if not isinstance(other, self.__class__): other=FactBase(other)
return other.__le__(self)
def __or__(self,other):
"""Implemement set | operator."""
return self.union(other)
def __and__(self,other):
"""Implemement set & operator."""
return self.intersection(other)
def __sub__(self,other):
"""Implemement set - operator."""
return self.difference(other)
def __xor__(self,other):
"""Implemement set ^ operator."""
return self.symmetric_difference(other)
def __ior__(self,other):
"""Implemement set |= operator."""
self.update(other)
return self
def __iand__(self,other):
"""Implemement set &= operator."""
self.intersection_update(other)
return self
def __isub__(self,other):
"""Implemement set -= operator."""
self.difference_update(other)
return self
def __ixor__(self,other):
"""Implemement set ^= operator."""
self.symmetric_difference_update(other)
return self
#--------------------------------------------------------------------------
# Set functions
#--------------------------------------------------------------------------
def union(self, *others: _Facts) -> 'FactBase':
"""Implements the set union() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
for o in factbases: predicates.update(o._factmaps.keys())
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
if p in self._factmaps:
fb._factmaps[p] = self._factmaps[p].union(*pothers)
else:
fb._factmaps[p] = FactMap(p).union(*pothers)
return fb
def intersection(self, *others: _Facts) -> 'FactBase':
"""Implements the set intersection() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
for fb_ in factbases: predicates.intersection_update(fb_._factmaps.keys())
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
fb._factmaps[p] = self._factmaps[p].intersection(*pothers)
return fb
def difference(self, *others: _Facts) -> 'FactBase':
"""Implements the set difference() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
fb._factmaps[p] = self._factmaps[p].difference(*pothers)
return fb
def symmetric_difference(self, other: _Facts) -> 'FactBase':
"""Implements the set symmetric_difference() function"""
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() # Check for delayed init
other._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
predicates.update(other._factmaps.keys())
for p in predicates:
in_self = p in self._factmaps ; in_other = p in other._factmaps
if in_self and in_other:
fb._factmaps[p] = self._factmaps[p].symmetric_difference(other._factmaps[p])
elif in_self:
fb._factmaps[p] = self._factmaps[p].copy()
elif in_other:
fb._factmaps[p] = other._factmaps[p].copy()
return fb
def update(self, *others: _Facts) -> None:
"""Implements the set update() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
for fb in factbases:
for p,fm in fb._factmaps.items():
if p in self._factmaps: self._factmaps[p].update(fm)
else: self._factmaps[p] = fm.copy()
def intersection_update(self, *others: _Facts) -> None:
"""Implements the set intersection_update() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
predicates = set(self._factmaps.keys())
for fb in factbases: predicates.intersection_update(fb._factmaps.keys())
pred_to_delete = set(self._factmaps.keys()) - predicates
for p in pred_to_delete: self._factmaps[p].clear()
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
self._factmaps[p].intersection_update(*pothers)
def difference_update(self, *others: _Facts) -> None:
"""Implements the set difference_update() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
for p in self._factmaps.keys():
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
self._factmaps[p].difference_update(*pothers)
def symmetric_difference_update(self, other: _Facts) -> None:
"""Implements the set symmetric_difference_update() function"""
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() # Check for delayed init
other._check_init()
predicates = set(self._factmaps.keys())
predicates.update(other._factmaps.keys())
for p in predicates:
if p in self._factmaps and p in other._factmaps:
self._factmaps[p].symmetric_difference_update(other._factmaps[p])
else:
if p in other._factmaps: self._factmaps[p] = other._factmaps[p].copy()
def copy(self) -> 'FactBase':
"""Implements the set copy() function"""
self._check_init() # Check for delayed init
fb = FactBase()
for p, _ in self._factmaps.items():
fb._factmaps[p] = self._factmaps[p].copy()
return fb
#------------------------------------------------------------------------------
# Select is an interface query over a FactBase.
# ------------------------------------------------------------------------------
def count(self, *args, **kwargs):
"""Return the number of matches."""
pass
#------------------------------------------------------------------------------
# Delete is an interface to perform a query delete from a FactBase.
# ------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Query API version 1 with new query engine
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# The Delete class
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
if __name__ == "__main__":
raise RuntimeError('Cannot run modules')
| 37.879222 | 114 | 0.568769 |
5a53a6326b7c2b2399d98404ebe43ef902465e91 | 13,470 | py | Python | blender/2.79/scripts/addons/modules/extensions_framework/__init__.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
] | 2 | 2019-11-27T09:05:42.000Z | 2020-02-20T01:25:23.000Z | blender/2.79/scripts/addons/modules/extensions_framework/__init__.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
] | null | null | null | blender/2.79/scripts/addons/modules/extensions_framework/__init__.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
] | 4 | 2020-02-19T20:02:26.000Z | 2022-02-11T18:47:56.000Z | # -*- coding: utf-8 -*-
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# --------------------------------------------------------------------------
# Blender 2.5 Extensions Framework
# --------------------------------------------------------------------------
#
# Authors:
# Doug Hammond
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENCE BLOCK *****
#
import time
import bpy
from extensions_framework.ui import EF_OT_msg
bpy.utils.register_class(EF_OT_msg)
del EF_OT_msg
def log(str, popup=False, module_name='EF'):
"""Print a message to the console, prefixed with the module_name
and the current time. If the popup flag is True, the message will
be raised in the UI as a warning using the operator bpy.ops.ef.msg.
"""
print("[%s %s] %s" %
(module_name, time.strftime('%Y-%b-%d %H:%M:%S'), str))
if popup:
bpy.ops.ef.msg(
msg_type='WARNING',
msg_text=str
)
added_property_cache = {}
def init_properties(obj, props, cache=True):
"""Initialise custom properties in the given object or type.
The props list is described in the declarative_property_group
class definition. If the cache flag is False, this function
will attempt to redefine properties even if they have already been
added.
"""
if not obj in added_property_cache.keys():
added_property_cache[obj] = []
for prop in props:
try:
if cache and prop['attr'] in added_property_cache[obj]:
continue
if prop['type'] == 'bool':
t = bpy.props.BoolProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","options","subtype","update"]}
elif prop['type'] == 'bool_vector':
t = bpy.props.BoolVectorProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","options","subtype","size",
"update"]}
elif prop['type'] == 'collection':
t = bpy.props.CollectionProperty
a = {k: v for k,v in prop.items() if k in ["ptype","name",
"description","default","options"]}
a['type'] = a['ptype']
del a['ptype']
elif prop['type'] == 'enum':
t = bpy.props.EnumProperty
a = {k: v for k,v in prop.items() if k in ["items","name",
"description","default","options","update"]}
elif prop['type'] == 'float':
t = bpy.props.FloatProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"step","precision","options","subtype","unit","update"]}
elif prop['type'] == 'float_vector':
t = bpy.props.FloatVectorProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"step","precision","options","subtype","size","update"]}
elif prop['type'] == 'int':
t = bpy.props.IntProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"step","options","subtype","update"]}
elif prop['type'] == 'int_vector':
t = bpy.props.IntVectorProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"options","subtype","size","update"]}
elif prop['type'] == 'pointer':
t = bpy.props.PointerProperty
a = {k: v for k,v in prop.items() if k in ["ptype", "name",
"description","options","update"]}
a['type'] = a['ptype']
del a['ptype']
elif prop['type'] == 'string':
t = bpy.props.StringProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","maxlen","options","subtype",
"update"]}
else:
continue
setattr(obj, prop['attr'], t(**a))
added_property_cache[obj].append(prop['attr'])
except KeyError:
# Silently skip invalid entries in props
continue
| 36.209677 | 89 | 0.587231 |
5a54a96d2f3cc1d14a3c5a24eab90fe8dfc58c84 | 16,305 | py | Python | tests/test_common.py | NOAA-GSL/adb_graphics | b9a3d567efa0de5a175be8404f351b901a8f382f | [
"MIT"
] | 2 | 2020-11-06T16:30:50.000Z | 2021-01-15T19:42:13.000Z | tests/test_common.py | NOAA-GSL/adb_graphics | b9a3d567efa0de5a175be8404f351b901a8f382f | [
"MIT"
] | 10 | 2020-11-20T16:02:57.000Z | 2021-03-31T23:35:56.000Z | tests/test_common.py | NOAA-GSL/adb_graphics | b9a3d567efa0de5a175be8404f351b901a8f382f | [
"MIT"
] | 1 | 2021-04-09T20:55:06.000Z | 2021-04-09T20:55:06.000Z | # pylint: disable=invalid-name
'''
Pytests for the common utilities included in this package. Includes:
- conversions.py
- specs.py
- utils.py
To run the tests, type the following in the top level repo directory:
python -m pytest --nat-file [path/to/gribfile] --prs-file [path/to/gribfile]
'''
from inspect import getfullargspec
from string import ascii_letters, digits
import warnings
from matplotlib import cm
from matplotlib import colors as mcolors
from metpy.plots import ctables
import numpy as np
import adb_graphics.conversions as conversions
import adb_graphics.specs as specs
import adb_graphics.utils as utils
import adb_graphics.datahandler.gribdata as gribdata
def test_conversion():
''' Test that conversions return at numpy array for input of np.ndarray,
list, or int '''
a = np.ones([3, 2]) * 300
c = a[0, 0]
# Check for the right answer
assert np.array_equal(conversions.k_to_c(a), a - 273.15)
assert np.array_equal(conversions.k_to_f(a), (a - 273.15) * 9/5 + 32)
assert np.array_equal(conversions.kgm2_to_in(a), a * 0.03937)
assert np.array_equal(conversions.m_to_dm(a), a / 10)
assert np.array_equal(conversions.m_to_in(a), a * 39.3701)
assert np.array_equal(conversions.m_to_kft(a), a / 304.8)
assert np.array_equal(conversions.m_to_mi(a), a / 1609.344)
assert np.array_equal(conversions.ms_to_kt(a), a * 1.9438)
assert np.array_equal(conversions.pa_to_hpa(a), a / 100)
assert np.array_equal(conversions.percent(a), a * 100)
assert np.array_equal(conversions.to_micro(a), a * 1E6)
assert np.array_equal(conversions.vvel_scale(a), a * -10)
assert np.array_equal(conversions.vort_scale(a), a / 1E-05)
assert np.array_equal(conversions.weasd_to_1hsnw(a), a * 10)
functions = [
conversions.k_to_c,
conversions.k_to_f,
conversions.kgm2_to_in,
conversions.m_to_dm,
conversions.m_to_in,
conversions.m_to_kft,
conversions.m_to_mi,
conversions.ms_to_kt,
conversions.pa_to_hpa,
conversions.percent,
conversions.to_micro,
conversions.vvel_scale,
conversions.vort_scale,
conversions.weasd_to_1hsnw,
]
# Check that all functions return a np.ndarray given a collection, or single float
for f in functions:
for collection in [a, c]:
assert isinstance(f(collection), type(collection))
def test_specs():
''' Test VarSpec properties. '''
config = 'adb_graphics/default_specs.yml'
varspec = MockSpecs(config)
# Ensure correct return type
assert isinstance(varspec.t_colors, np.ndarray)
assert isinstance(varspec.ps_colors, np.ndarray)
assert isinstance(varspec.yml, dict)
# Ensure the appropriate number of colors is returned
assert np.shape(varspec.t_colors) == (len(varspec.clevs), 4)
assert np.shape(varspec.ps_colors) == (105, 4)
def test_utils():
''' Test that utils works appropriately. '''
assert callable(utils.get_func('conversions.k_to_c'))
def is_a_contourf_dict(self, entry):
''' Set up the accepted arguments for plt.contourf, and check the given
arguments. '''
args = ['X', 'Y', 'Z', 'levels',
'corner_mask', 'colors', 'alpha', 'cmap', 'labels', 'norm', 'vmin',
'vmax', 'origin', 'extent', 'locator', 'extend', 'xunits',
'yunits', 'antialiased', 'nchunk', 'linewidths',
'hatches']
if entry is None:
return True
return self.check_kwargs(args, entry)
def is_a_color(self, color):
''' Returns true if color is contained in the list of recognized colors. '''
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS,
**ctables.colortables)
if color in colors.keys():
return True
if color in dir(self.varspec):
return True
return False
def is_a_key(self, key):
''' Returns true if key exists as a key in the config file. '''
return self.cfg.get(key) is not None
def is_callable(self, funcs):
''' Returns true if func in funcs list is the name of a callable function. '''
funcs = funcs if isinstance(funcs, list) else [funcs]
callables = []
for func in funcs:
callable_ = self.get_callable(func)
callable_ = callable_ if isinstance(callable_, list) else [callable_]
for clbl in callable_:
if isinstance(clbl, np.ndarray):
callables.append(True)
elif callable(clbl):
callables.append(True)
else:
callables.append(False)
return all(callables)
def is_wind(self, wind):
''' Returns true if wind is a bool or is_a_level. '''
return isinstance(wind, bool) or self.is_a_level(wind)
def check_keys(self, d, depth=0):
''' Helper function that recursively checks the keys in the dictionary by calling the
function defined in allowable. '''
max_depth = 2
# Only proceed if d is a dictionary
if not isinstance(d, dict):
return
# Proceed only up to max depth.
if depth >= max_depth:
return
level = depth+1
for k, v in d.items():
# Check that the key is allowable
assert (k in self.allowable.keys()) or self.is_a_level(k)
# Call a checker if one exists for the key, otherwise descend into
# next level of dict
checker = self.allowable.get(k)
if checker:
if isinstance(checker, bool):
assert checker
else:
assert checker(v)
else:
if isinstance(v, dict):
self.check_keys(v, depth=level)
def test_keys(self):
''' Tests each of top-level variables in the config file by calling the helper function. '''
for short_name, spec in self.cfg.items():
assert '_' not in short_name
self.check_keys(spec)
| 30.138632 | 100 | 0.557068 |
5a54ab45f8f150e828680b7baff870b193da03be | 6,448 | py | Python | ggpy/cruft/grammar.py | hobson/ggpy | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | [
"MIT"
] | 1 | 2015-01-26T19:07:45.000Z | 2015-01-26T19:07:45.000Z | ggpy/cruft/grammar.py | hobson/ggpy | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | [
"MIT"
] | null | null | null | ggpy/cruft/grammar.py | hobson/ggpy | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# package: org.ggp.base.util.symbol.grammar
import threading
| 32.079602 | 100 | 0.622519 |
5a57e614d9b55b36163878bad041ba8ed0614d30 | 948 | py | Python | cortical/models/context.py | npd15393/ResumeMiner | 9644ae97aaad869c3739b2b7b92e4e5a6f857206 | [
"BSD-2-Clause"
] | null | null | null | cortical/models/context.py | npd15393/ResumeMiner | 9644ae97aaad869c3739b2b7b92e4e5a6f857206 | [
"BSD-2-Clause"
] | null | null | null | cortical/models/context.py | npd15393/ResumeMiner | 9644ae97aaad869c3739b2b7b92e4e5a6f857206 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
/*******************************************************************************
* Copyright (c) cortical.io GmbH. All rights reserved.
*
* This software is confidential and proprietary information.
* You shall use it only in accordance with the terms of the
* license agreement you entered into with cortical.io GmbH.
******************************************************************************/
"""
from cortical.models.fingerprint import Fingerprint
| 43.090909 | 117 | 0.597046 |
5a58135dc9e13b466cba75e814598ea999f2751b | 705 | py | Python | COMP-2080/Week-11/knapRecursive.py | kbrezinski/Candidacy-Prep | f4610fb611e6300a7d657af124728d46a8659ba5 | [
"BSD-3-Clause"
] | null | null | null | COMP-2080/Week-11/knapRecursive.py | kbrezinski/Candidacy-Prep | f4610fb611e6300a7d657af124728d46a8659ba5 | [
"BSD-3-Clause"
] | null | null | null | COMP-2080/Week-11/knapRecursive.py | kbrezinski/Candidacy-Prep | f4610fb611e6300a7d657af124728d46a8659ba5 | [
"BSD-3-Clause"
] | null | null | null |
# [weight, value]
I = [[4, 8], [4, 7], [6, 14]]
k = 8
print(knapRecursive(I, k))
| 22.03125 | 69 | 0.455319 |
5a59bbf41d09d9b1b99e57b30f3e8db2c9734a9d | 232 | py | Python | digits/inference/__init__.py | PhysicsTeacher13/Digits-NVIDIA | 80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a | [
"BSD-3-Clause"
] | 111 | 2017-04-21T06:03:04.000Z | 2021-04-26T06:36:54.000Z | digits/inference/__init__.py | PhysicsTeacher13/Digits-NVIDIA | 80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a | [
"BSD-3-Clause"
] | 6 | 2017-05-15T22:02:49.000Z | 2018-03-16T10:25:26.000Z | digits/inference/__init__.py | PhysicsTeacher13/Digits-NVIDIA | 80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a | [
"BSD-3-Clause"
] | 40 | 2017-04-21T07:04:16.000Z | 2019-11-14T14:20:32.000Z | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import ImageInferenceJob
from .job import InferenceJob
__all__ = [
'InferenceJob',
'ImageInferenceJob',
]
| 21.090909 | 63 | 0.762931 |
5a5a27414d864ca463175f98377b3d5b7fff1510 | 3,592 | py | Python | homework/homework 21.py | CoderLoveMath/Jeju-IOSEFTGS-python | 0efe26e3840817197c1584ac4cf90d35c3699988 | [
"FSFAP"
] | null | null | null | homework/homework 21.py | CoderLoveMath/Jeju-IOSEFTGS-python | 0efe26e3840817197c1584ac4cf90d35c3699988 | [
"FSFAP"
] | null | null | null | homework/homework 21.py | CoderLoveMath/Jeju-IOSEFTGS-python | 0efe26e3840817197c1584ac4cf90d35c3699988 | [
"FSFAP"
] | null | null | null | # Import a library of functions called 'pygame'
import pygame
# Initialize the game engine
pygame.init()
# Define the colors we will use in RGB format
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
# Set the height and width of the screen
size = [491, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Heron's Note")
# Loop until the user clicks the close button.
done = False
clock = pygame.time.Clock()
scene_count = 0
while not done:
scene_count += 0.1
clock.tick(10)
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
screen.fill(WHITE)
screen.blit(pygame.image.load('bg.png'), pygame.image.load('bg.png').get_rect())
font = pygame.font.Font('font.ttf', 80) # font setting
title = font.render(" ", True, (28, 0, 0))
font = pygame.font.Font('font.ttf', 20) # font setting
subtitle = font.render(" !", True, (28, 0, 0))
screen.blit(title, (120, 50))
screen.blit(subtitle, (170, 150))
pygame.draw.polygon(screen, BLACK, [[120, 400], [245.5, 200], [371, 400]], 5)
if scene_count < 3:
font = pygame.font.Font('font.ttf', 40)
text = font.render(" .", True, (28, 0, 0))
screen.blit(text, (50, 500))
elif scene_count < 6:
font = pygame.font.Font('font.ttf', 40)
text = font.render(" .", True, (28, 0, 0))
screen.blit(text, (30, 500))
elif scene_count < 9:
font = pygame.font.Font('font.ttf', 40)
text = font.render("3", True, (28, 0, 0))
screen.blit(text, (250, 500))
elif scene_count < 10:
font = pygame.font.Font('font.ttf', 40)
text = font.render("3, 14", True, (28, 0, 0))
screen.blit(text, (250, 500))
elif scene_count < 13:
font = pygame.font.Font('font.ttf', 40)
text = font.render("3, 14, 15", True, (28, 0, 0))
screen.blit(text, (200, 500))
elif scene_count < 15:
font = pygame.font.Font('font.ttf', 40)
text = font.render(" s ", True, (28, 0, 0))
screen.blit(text, (70, 500))
elif scene_count < 18:
font = pygame.font.Font('font.ttf', 30)
text = font.render(" s(s-3)(s-14)(2-5) .", True, (28, 0, 0))
screen.blit(text, (70, 500))
elif scene_count < 21:
font = pygame.font.Font('font.ttf', 30)
text = font.render(" , 20.4 .", True, (28, 0, 0))
screen.blit(text, (40, 500))
elif scene_count < 23:
font = pygame.font.Font('font.ttf', 30)
text = font.render(" ,", True, (28, 0, 0))
screen.blit(text, (200, 500))
elif scene_count < 26:
font = pygame.font.Font('font.ttf', 30)
text = font.render(" a, b, c ", True, (28, 0, 0))
screen.blit(text, (40, 500))
else:
font = pygame.font.Font('font.ttf', 30)
prev_text = font.render(" a, b, c ", True, (28, 0, 0))
screen.blit(prev_text, (40, 450))
font = pygame.font.Font('font.ttf', 40)
text = font.render("s(s-a)(s-b)(s-c) ", True, (28, 0, 0))
font = pygame.font.Font('font.ttf', 30)
subtext = font.render("(, s = (a+b+c) / 2)", True, (28, 0, 0))
screen.blit(text, (40, 500))
screen.blit(subtext, (200, 550))
pygame.display.flip()
# Be IDLE friendly
pygame.quit() | 35.92 | 84 | 0.578786 |
5a5bfad53218db468fff1b6bf7d577e4b9d5e32d | 2,929 | py | Python | pyte/ops/for_.py | Fuyukai/Pyte | 7ef04938d80f8b646bd73d976ac9787a5b88edd9 | [
"MIT"
] | 2 | 2020-01-10T22:08:38.000Z | 2021-06-21T15:34:47.000Z | pyte/ops/for_.py | Fuyukai/Pyte | 7ef04938d80f8b646bd73d976ac9787a5b88edd9 | [
"MIT"
] | 6 | 2016-04-17T21:28:14.000Z | 2016-08-24T02:14:01.000Z | pyte/ops/for_.py | SunDwarf/Pyte | 7ef04938d80f8b646bd73d976ac9787a5b88edd9 | [
"MIT"
] | null | null | null | from pyte import tokens, util
from pyte.superclasses import _PyteAugmentedValidator, _PyteOp
from pyte.util import PY36
| 31.494624 | 101 | 0.609082 |
5a5cd7e8aa4acb388f0ef7bcdc817349add0a810 | 1,212 | py | Python | web/hottubapi.py | pwschuurman/hottub_controller | be9faeabcaf9f5bb7aba3ec03eba60276b27cf80 | [
"MIT"
] | 1 | 2020-06-03T18:32:50.000Z | 2020-06-03T18:32:50.000Z | web/hottubapi.py | pwschuurman/hottub_controller | be9faeabcaf9f5bb7aba3ec03eba60276b27cf80 | [
"MIT"
] | null | null | null | web/hottubapi.py | pwschuurman/hottub_controller | be9faeabcaf9f5bb7aba3ec03eba60276b27cf80 | [
"MIT"
] | null | null | null | from gpioapi import GpioAPI
import rx
MAX_TEMP = 38
COOL_TEMP = 30
| 24.734694 | 82 | 0.710396 |
5a5e3e187f9834c9b5e31410232316fcaa6ec9f3 | 7,711 | py | Python | src/biocluster_pipeline.py | zocean/Norma | 4c45c1540f7d7d13f9b71a6772044d3772a451f8 | [
"MIT"
] | 1 | 2020-02-17T22:59:46.000Z | 2020-02-17T22:59:46.000Z | src/biocluster_pipeline.py | zocean/Norma | 4c45c1540f7d7d13f9b71a6772044d3772a451f8 | [
"MIT"
] | null | null | null | src/biocluster_pipeline.py | zocean/Norma | 4c45c1540f7d7d13f9b71a6772044d3772a451f8 | [
"MIT"
] | 2 | 2020-02-24T02:54:04.000Z | 2020-07-07T22:16:35.000Z | #!/usr/bin/python
# Programmer : Yang Zhang
# Contact: zocean636@gmail.com
# Last-modified: 24 Jan 2019 15:20:08
import os,sys,argparse
def parse_arg():
''' This Function Parse the Argument '''
p=argparse.ArgumentParser( description = 'Example: %(prog)s -h', epilog='Library dependency :')
p.add_argument('-v','--version',action='version',version='%(prog)s 0.1')
p.add_argument('--conf',type=str,dest="conf",help="configure file")
p.add_argument('--dry_run',dest="dry_run",action="store_true",help="set this parameter if just want to test environment. No real script will be procssed")
if len(sys.argv) < 2:
print p.print_help()
exit(1)
return p.parse_args()
if __name__=="__main__":
main()
| 46.451807 | 191 | 0.625989 |
5a5e3edccfdfe1e9cbd18ca904e258b6b8bd5b04 | 5,404 | py | Python | env/lib/python3.5/site-packages/cartopy/tests/test_shapereader.py | project-pantheon/pantheon_glob_planner | c0d50a53b36c4678192ec75ad7a4cd68c570daef | [
"BSD-3-Clause"
] | null | null | null | env/lib/python3.5/site-packages/cartopy/tests/test_shapereader.py | project-pantheon/pantheon_glob_planner | c0d50a53b36c4678192ec75ad7a4cd68c570daef | [
"BSD-3-Clause"
] | null | null | null | env/lib/python3.5/site-packages/cartopy/tests/test_shapereader.py | project-pantheon/pantheon_glob_planner | c0d50a53b36c4678192ec75ad7a4cd68c570daef | [
"BSD-3-Clause"
] | null | null | null | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import os.path
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
import cartopy.io.shapereader as shp
| 42.21875 | 79 | 0.622687 |
5a5f41145e46fd5342cd880863fcd045e36493b6 | 268 | py | Python | inmembrane/plugins/__init__.py | pansapiens/inmembrane | 382eee3b2bacc9c567f65d7c48f1ddf9a86c253c | [
"BSD-2-Clause"
] | 4 | 2015-03-09T02:08:34.000Z | 2021-02-06T13:52:21.000Z | inmembrane/plugins/__init__.py | pansapiens/inmembrane | 382eee3b2bacc9c567f65d7c48f1ddf9a86c253c | [
"BSD-2-Clause"
] | 5 | 2015-01-29T03:36:04.000Z | 2021-12-08T07:20:42.000Z | inmembrane/plugins/__init__.py | pansapiens/inmembrane | 382eee3b2bacc9c567f65d7c48f1ddf9a86c253c | [
"BSD-2-Clause"
] | 6 | 2015-03-09T02:08:43.000Z | 2021-06-07T17:33:16.000Z | # This little bit of magic fills the __all__ list
# with every plugin name, and means that calling:
# from plugins import *
# within inmembrane.py will import every plugin
import pkgutil
__all__ = []
for p in pkgutil.iter_modules(__path__):
__all__.append(p[1])
| 26.8 | 50 | 0.75 |
5a6488350ce9cd310eada5196eabccb1e9f79524 | 1,984 | py | Python | dvc/output/__init__.py | amjadsaadeh/dvc | f405168619c2bb85430c4ded2585b57ebfd01bd7 | [
"Apache-2.0"
] | null | null | null | dvc/output/__init__.py | amjadsaadeh/dvc | f405168619c2bb85430c4ded2585b57ebfd01bd7 | [
"Apache-2.0"
] | null | null | null | dvc/output/__init__.py | amjadsaadeh/dvc | f405168619c2bb85430c4ded2585b57ebfd01bd7 | [
"Apache-2.0"
] | null | null | null | import schema
from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency import SCHEMA, urlparse
from dvc.dependency.base import DependencyBase
from dvc.output.s3 import OutputS3
from dvc.output.gs import OutputGS
from dvc.output.local import OutputLOCAL
from dvc.output.hdfs import OutputHDFS
from dvc.output.ssh import OutputSSH
from dvc.remote import Remote
OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]
OUTS_MAP = {'hdfs': OutputHDFS,
's3': OutputS3,
'gs': OutputGS,
'ssh': OutputSSH,
'': OutputLOCAL}
SCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool
SCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA
| 31.492063 | 77 | 0.621472 |
5a65af496e71e8ad9c61c888ed0b5d903da6928e | 343 | py | Python | company_logo.py | DomirScire/HackerRank_answers | 0432185a472aeae7062cf4e406d0e7a5ed2cc979 | [
"MIT"
] | 1 | 2021-03-19T13:05:16.000Z | 2021-03-19T13:05:16.000Z | company_logo.py | DomirScire/HackerRank_answers | 0432185a472aeae7062cf4e406d0e7a5ed2cc979 | [
"MIT"
] | null | null | null | company_logo.py | DomirScire/HackerRank_answers | 0432185a472aeae7062cf4e406d0e7a5ed2cc979 | [
"MIT"
] | null | null | null | # DomirScire
import math
import os
import random
import re
import sys
import collections
if __name__ == '__main__':
s = sorted(input().strip())
s_counter = collections.Counter(s).most_common()
s_counter = sorted(s_counter, key=lambda x: (x[1] * -1, x[0]))
for i in range(0, 3):
print(s_counter[i][0], s_counter[i][1])
| 22.866667 | 66 | 0.661808 |
5a65da8fa8ec5fbb64d2b18d96b4bb40c2a9a8c1 | 2,600 | py | Python | ltr/models/loss/kl_regression.py | Jee-King/ICCV2021_Event_Frame_Tracking | ea86cdd331748864ffaba35f5efbb3f2a02cdb03 | [
"MIT"
] | 15 | 2021-08-31T13:32:12.000Z | 2022-03-24T01:55:41.000Z | ltr/models/loss/kl_regression.py | Jee-King/ICCV2021_Event_Frame_Tracking | ea86cdd331748864ffaba35f5efbb3f2a02cdb03 | [
"MIT"
] | 2 | 2022-01-13T12:53:29.000Z | 2022-03-31T08:14:42.000Z | ltr/models/loss/kl_regression.py | Jee-King/ICCV2021_Event_Frame_Tracking | ea86cdd331748864ffaba35f5efbb3f2a02cdb03 | [
"MIT"
] | 2 | 2021-11-08T16:27:16.000Z | 2021-12-08T14:24:27.000Z | import math
import torch
import torch.nn as nn
from torch.nn import functional as F
| 36.619718 | 106 | 0.627308 |
5a6600ba347d74c16e50529d4d48201c7ed9b11e | 2,478 | py | Python | custom/mixins.py | luoyangC/django_template | e2fec854e2ba028b1d1981053b5398c21b9f9a25 | [
"Apache-2.0"
] | null | null | null | custom/mixins.py | luoyangC/django_template | e2fec854e2ba028b1d1981053b5398c21b9f9a25 | [
"Apache-2.0"
] | 8 | 2020-06-05T22:21:55.000Z | 2021-09-22T18:50:27.000Z | custom/mixins.py | luoyangC/django_template | e2fec854e2ba028b1d1981053b5398c21b9f9a25 | [
"Apache-2.0"
] | null | null | null | """
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from rest_framework import status
from rest_framework import mixins
from custom.response import JsonResponse
| 33.04 | 93 | 0.700565 |
5a66649d8c1a6d7c9c60e1d964b3f1eb9d459b10 | 893 | py | Python | rl_trainer/algo/network.py | jidiai/Competition_Olympics-Curling | a3f1e1316a9e9a060bcca623aff2004878c50c78 | [
"MIT"
] | 7 | 2022-02-01T14:45:03.000Z | 2022-02-28T08:21:13.000Z | rl_trainer/algo/network.py | jidiai/Competition_Olympics-Curling | a3f1e1316a9e9a060bcca623aff2004878c50c78 | [
"MIT"
] | 1 | 2022-02-19T15:03:56.000Z | 2022-02-25T08:59:22.000Z | rl_trainer/algo/network.py | jidiai/Competition_Olympics-Curling | a3f1e1316a9e9a060bcca623aff2004878c50c78 | [
"MIT"
] | 5 | 2022-02-08T14:16:12.000Z | 2022-03-08T01:56:37.000Z | import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available() else 'cpu'
| 28.806452 | 66 | 0.666293 |