hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c084907ab6f7a66d8c89aefdff3de051c8499ac | 1,406 | py | Python | encryption_client.py | salmanhiro/fernet-rabbitmq | 8130514e6d21b7df9c78a28130c603512f500a23 | [
"MIT"
] | null | null | null | encryption_client.py | salmanhiro/fernet-rabbitmq | 8130514e6d21b7df9c78a28130c603512f500a23 | [
"MIT"
] | null | null | null | encryption_client.py | salmanhiro/fernet-rabbitmq | 8130514e6d21b7df9c78a28130c603512f500a23 | [
"MIT"
] | null | null | null | import pika
import uuid
import time
import json
message = {"text":"kulikuli"}
fernet_result = FernetRpc()
print(" [x] Requesting user")
start = time.time()
response = fernet_result.call(message)
end = time.time() - start
print(" [v] Got %r" % response)
print(" [.] Time elapsed %r s" %end) | 26.528302 | 69 | 0.619488 |
0c0862941d8ae706603317f21fde751ca0bd01fb | 3,225 | py | Python | services/cert_server/project/tests/test_cert_server.py | EvaldoNeto/openvpn-http | 73d75a990d5d7ed7f89a526c0ce324db42c37f1f | [
"MIT"
] | 5 | 2019-11-19T02:54:05.000Z | 2020-03-03T19:48:41.000Z | services/cert_server/project/tests/test_cert_server.py | EvaldoNeto/openvpn-http | 73d75a990d5d7ed7f89a526c0ce324db42c37f1f | [
"MIT"
] | 23 | 2019-10-31T12:00:37.000Z | 2019-11-22T21:00:28.000Z | services/cert_server/project/tests/test_cert_server.py | EvaldoNeto/openvpn-http | 73d75a990d5d7ed7f89a526c0ce324db42c37f1f | [
"MIT"
] | null | null | null | # services/ovpn_server/project/tests/test_ovpn_server.py
import os
import json
import io
from flask import current_app
from project.tests.base import BaseTestCase
| 37.068966 | 78 | 0.556279 |
0c08971682b47651e14df294d06cff25310ada7b | 956 | py | Python | powerline/lib/watcher/stat.py | MrFishFinger/powerline | 361534bafecf836e100eaff257c93eb4805f48db | [
"MIT"
] | 11,435 | 2015-01-01T03:32:34.000Z | 2022-03-31T20:39:05.000Z | powerline/lib/watcher/stat.py | ritiek/powerline | 82c1373ba0b424c57e8c12cb5f6f1a7ee3829c27 | [
"MIT"
] | 879 | 2015-01-02T11:59:30.000Z | 2022-03-24T09:52:17.000Z | powerline/lib/watcher/stat.py | ritiek/powerline | 82c1373ba0b424c57e8c12cb5f6f1a7ee3829c27 | [
"MIT"
] | 1,044 | 2015-01-05T22:37:53.000Z | 2022-03-17T19:43:16.000Z | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from threading import RLock
from powerline.lib.path import realpath
| 21.244444 | 84 | 0.706067 |
0c08a69ecbe4701e579ed0c55e6c61397156d087 | 2,531 | py | Python | refData/mlpy/mlpy-3.5.0/mlpy/bordacount/borda.py | xrick/DTW-Tutorial | bbbce1c2beff91384cdcb7dbf503f93ad2fa285c | [
"MIT"
] | null | null | null | refData/mlpy/mlpy-3.5.0/mlpy/bordacount/borda.py | xrick/DTW-Tutorial | bbbce1c2beff91384cdcb7dbf503f93ad2fa285c | [
"MIT"
] | null | null | null | refData/mlpy/mlpy-3.5.0/mlpy/bordacount/borda.py | xrick/DTW-Tutorial | bbbce1c2beff91384cdcb7dbf503f93ad2fa285c | [
"MIT"
] | null | null | null | ## This code is written by Davide Albanese, <albanese@fbk.eu>.
## (C) 2010 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['borda_count']
import numpy as np
import sys
if sys.version >= '3':
from . import cborda
else:
import cborda
def borda_count(x, k=None):
"""Given N ranked ids lists of length P compute the number of
extractions on top-k positions and the mean position for each id.
Sort the element ids with decreasing number of extractions, and
element ids with equal number of extractions will be sorted with
increasing mean positions.
:Parameters:
x : 2d array_like object integer (N, P)
ranked ids lists. For each list ids must be unique
in [0, P-1].
k : None or integer
compute borda on top-k position (None -> k = P)
:Returns:
borda : 1d numpy array objects
sorted-ids, number of extractions, mean positions
Example:
>>> import numpy as np
>>> import mlpy
>>> x = [[2,4,1,3,0], # first ranked list
... [3,4,1,2,0], # second ranked list
... [2,4,3,0,1], # third ranked list
... [0,1,4,2,3]] # fourth ranked list
>>> mlpy.borda_count(x=x, k=3)
(array([4, 1, 2, 3, 0]), array([4, 3, 2, 2, 1]), array([ 1.25 , 1.66666667, 0. , 1. , 0. ]))
* Id 4 is in the first position with 4 extractions and mean position 1.25.
* Id 1 is in the first position with 3 extractions and mean position 1.67.
* ...
"""
x_arr = np.asarray(x, dtype=np.int)
n, p = x_arr.shape
if k == None:
k = p
if k < 1 or k > p:
raise ValueError('k must be in [1, %d]' % p)
ext, pos = cborda.core(x_arr, k)
invpos = (pos + 1)**(-1) # avoid zero division
idx = np.lexsort(keys=(invpos, ext))[::-1]
return idx, ext[idx], pos[idx]
| 32.448718 | 126 | 0.614382 |
0c08ae96e8b31b452042a012ea2cbfe21f5f54d5 | 2,641 | py | Python | envs/base_mujoco_env.py | zaynahjaved/AWAC | e225eeb8c0cd3498ab55ce15a9de60cb4e957c50 | [
"MIT"
] | null | null | null | envs/base_mujoco_env.py | zaynahjaved/AWAC | e225eeb8c0cd3498ab55ce15a9de60cb4e957c50 | [
"MIT"
] | null | null | null | envs/base_mujoco_env.py | zaynahjaved/AWAC | e225eeb8c0cd3498ab55ce15a9de60cb4e957c50 | [
"MIT"
] | null | null | null | '''
All cartgripper env modules built on cartrgipper implementation in
https://github.com/SudeepDasari/visual_foresight
'''
from abc import ABC
from mujoco_py import load_model_from_path, MjSim
import numpy as np
from base_env import BaseEnv
| 29.344444 | 96 | 0.632336 |
0c097274adeceb2e1e44250ea00c4016e23c60ed | 191 | py | Python | Desafios/desafio009.py | LucasHenrique-dev/Exercicios-Python | b1f6ca56ea8e197a89a044245419dc6079bdb9c7 | [
"MIT"
] | 1 | 2020-04-09T23:18:03.000Z | 2020-04-09T23:18:03.000Z | Desafios/desafio009.py | LucasHenrique-dev/Exercicios-Python | b1f6ca56ea8e197a89a044245419dc6079bdb9c7 | [
"MIT"
] | null | null | null | Desafios/desafio009.py | LucasHenrique-dev/Exercicios-Python | b1f6ca56ea8e197a89a044245419dc6079bdb9c7 | [
"MIT"
] | null | null | null | n1 = int(input('Digite um nmero e veja qual a sua tabuada: '))
n = 0
print('{} X {:2} = {:2}'.format(n1, 0, n1*n))
while n < 10:
n += 1
print('{} X {:2} = {:2}'.format(n1, n, n1*n))
| 27.285714 | 63 | 0.502618 |
0c09891ffb40760a1dcac5e46984a7d055ce0caf | 2,587 | py | Python | web/app/djrq/admin/admin.py | bmillham/djrq2 | c84283b75a7c15da1902ebfc32b7d75159c09e20 | [
"MIT"
] | 1 | 2016-11-23T20:50:00.000Z | 2016-11-23T20:50:00.000Z | web/app/djrq/admin/admin.py | bmillham/djrq2 | c84283b75a7c15da1902ebfc32b7d75159c09e20 | [
"MIT"
] | 15 | 2017-01-15T04:18:40.000Z | 2017-02-25T04:13:06.000Z | web/app/djrq/admin/admin.py | bmillham/djrq2 | c84283b75a7c15da1902ebfc32b7d75159c09e20 | [
"MIT"
] | null | null | null | # encoding: utf-8
from web.ext.acl import when
from ..templates.admin.admintemplate import page as _page
from ..templates.admin.requests import requeststemplate, requestrow
from ..templates.requests import requestrow as rr
from ..send_update import send_update
import cinje
| 43.847458 | 198 | 0.709702 |
0c0c0154d635c140279cd61ef15b6dfc6c89cd23 | 755 | py | Python | test_knot_hasher.py | mmokko/aoc2017 | 0732ac440775f9e6bd4a8447c665c9b0e6969f74 | [
"MIT"
] | null | null | null | test_knot_hasher.py | mmokko/aoc2017 | 0732ac440775f9e6bd4a8447c665c9b0e6969f74 | [
"MIT"
] | null | null | null | test_knot_hasher.py | mmokko/aoc2017 | 0732ac440775f9e6bd4a8447c665c9b0e6969f74 | [
"MIT"
] | null | null | null | from unittest import TestCase
from day10 import KnotHasher
| 30.2 | 72 | 0.658278 |
0c0c55cfe0bc18dae70bf566cb7d439dd048fafe | 602 | py | Python | udp/src/server.py | matthewchute/net-prot | 82d2d92b3c88afb245161780fdd7909d7bf15eb1 | [
"MIT"
] | null | null | null | udp/src/server.py | matthewchute/net-prot | 82d2d92b3c88afb245161780fdd7909d7bf15eb1 | [
"MIT"
] | null | null | null | udp/src/server.py | matthewchute/net-prot | 82d2d92b3c88afb245161780fdd7909d7bf15eb1 | [
"MIT"
] | null | null | null | import constants, helpers, os
temp_msg = None
whole_msg = b''
file_path = None
helpers.sock.bind(constants.IP_PORT)
print "Server Ready"
# recieve
while temp_msg != constants.EOF:
datagram = helpers.sock.recvfrom(constants.BUFFER_SIZE)
temp_msg = datagram[0]
if file_path is None:
print("Receiving " + temp_msg.decode() + "...")
file_path = os.path.join(constants.SERVER_FILE_PATH, temp_msg.decode())
else:
whole_msg += temp_msg
whole_msg = whole_msg.strip(constants.EOF)
with open(file_path, 'wb') as sFile:
sFile.write(whole_msg)
print "Received"
| 22.296296 | 79 | 0.696013 |
0c0dcfc232bbe604e854e762de0825bd246ecc01 | 3,697 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/hostname_configuration.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/hostname_configuration.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/hostname_configuration.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
| 48.012987 | 94 | 0.678117 |
0c0decf0160c2c2495315ba2014b0b8cb06458ac | 4,717 | py | Python | src/interactive_conditional_samples.py | 50417/gpt-2 | 0e0b3c97efb0048abffb2947aaa8573a783706ed | [
"MIT"
] | null | null | null | src/interactive_conditional_samples.py | 50417/gpt-2 | 0e0b3c97efb0048abffb2947aaa8573a783706ed | [
"MIT"
] | null | null | null | src/interactive_conditional_samples.py | 50417/gpt-2 | 0e0b3c97efb0048abffb2947aaa8573a783706ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1000,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=0.0
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
print(length)
#elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
#config = tf.ConfigProto(device_count={'GPU': 0})
config = tf.ConfigProto()
with tf.Session(graph=tf.Graph(),config=config) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
raw_text = """Model {"""
#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
from datetime import datetime
#while True:
generated = 0
import time
grand_start = time.time()
for cnt in range(nsamples // batch_size):
start_per_sample = time.time()
output_text = raw_text
text = raw_text
context_tokens = enc.encode(text)
#raw_text = input("Model prompt >>> ")
# while not raw_text:
# print('Prompt should not be empty!')
# raw_text = input("Model prompt >>> ")
#print(context_tokens)
#file_to_save.write(raw_text)
#for cnt in range(nsamples // batch_size):
while "<|endoftext|>" not in text:
out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:,
len(context_tokens):]
for i in range(batch_size):
#generated += 1
text = enc.decode(out[i])
if "<|endoftext|>" in text:
sep = "<|endoftext|>"
rest = text.split(sep, 1)[0]
output_text += rest
break
context_tokens = enc.encode(text)
output_text += text
print("=" * 40 + " SAMPLE " + str(cnt+12) + " " + "=" * 40)
minutes, seconds = divmod(time.time() - start_per_sample, 60)
print("Output Done : {:0>2}:{:05.2f}".format(int(minutes),seconds) )
print("=" * 80)
with open("Simulink_sample/sample__"+str(cnt+12)+".mdl","w+") as f:
f.write(output_text)
elapsed_total = time.time()-grand_start
hours, rem = divmod(elapsed_total,3600)
minutes, seconds = divmod(rem, 60)
print("Total time to generate 1000 samples :{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
fire.Fire(interact_model)
| 38.349593 | 116 | 0.606954 |
0c0e5be12d46a3b1b4e3d634643649fcf6a3f4da | 291 | py | Python | todofy/tests/conftest.py | bokiex/eti_todo | 1c636d0973c57d4253440b4528185dba0ecb9d05 | [
"BSD-3-Clause"
] | 1 | 2019-11-29T09:52:19.000Z | 2019-11-29T09:52:19.000Z | todofy/tests/conftest.py | bokiex/eti_todo | 1c636d0973c57d4253440b4528185dba0ecb9d05 | [
"BSD-3-Clause"
] | 28 | 2019-11-28T20:02:48.000Z | 2022-02-10T14:04:45.000Z | todofy/tests/conftest.py | bokiex/eti_todo | 1c636d0973c57d4253440b4528185dba0ecb9d05 | [
"BSD-3-Clause"
] | null | null | null | import pytest
| 16.166667 | 46 | 0.704467 |
0c0e6124651142c0387644ad144cc2392388c0c5 | 33 | py | Python | Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py | ruben69695/python-course | a3d3532279510fa0315a7636c373016c7abe4f0a | [
"MIT"
] | 1 | 2019-01-27T20:44:53.000Z | 2019-01-27T20:44:53.000Z | Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py | ruben69695/python-course | a3d3532279510fa0315a7636c373016c7abe4f0a | [
"MIT"
] | null | null | null | Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py | ruben69695/python-course | a3d3532279510fa0315a7636c373016c7abe4f0a | [
"MIT"
] | null | null | null | import saludos
saludos.saludar() | 11 | 17 | 0.818182 |
0c0ea1386a3f6993039b27ca1ae2f4e56ebc457c | 1,033 | py | Python | question_bank/split-array-into-fibonacci-sequence/split-array-into-fibonacci-sequence.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 9 | 2020-08-12T10:01:00.000Z | 2022-01-05T04:37:48.000Z | question_bank/split-array-into-fibonacci-sequence/split-array-into-fibonacci-sequence.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 1 | 2021-02-16T10:19:31.000Z | 2021-02-16T10:19:31.000Z | question_bank/split-array-into-fibonacci-sequence/split-array-into-fibonacci-sequence.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 4 | 2020-08-12T10:13:31.000Z | 2021-11-05T01:26:58.000Z | # -*- coding: utf-8 -*-
# @Author : LG
"""
148 ms, Python3 35.57%
13.7 MB, Python3 36.81%
""" | 30.382353 | 159 | 0.460794 |
0c111c07238e7921c9ce9cb0615b8ac96b16babf | 2,771 | py | Python | convert_bootswatch_vurple.py | douglaskastle/bootswatch | cb8f368c8d3671afddae487736d7cba6509b7f5b | [
"MIT"
] | null | null | null | convert_bootswatch_vurple.py | douglaskastle/bootswatch | cb8f368c8d3671afddae487736d7cba6509b7f5b | [
"MIT"
] | null | null | null | convert_bootswatch_vurple.py | douglaskastle/bootswatch | cb8f368c8d3671afddae487736d7cba6509b7f5b | [
"MIT"
] | null | null | null | import re
import os
values = {
'uc': 'Vurple',
'lc': 'vurple',
'cl': '#116BB7',
}
if __name__ == '__main__':
main()
| 35.075949 | 139 | 0.587153 |
0c117a09b3c94bdc715dd3e404e0bc7ed330ac20 | 721 | py | Python | python/interface_getPixel.py | BulliB/PixelTable | f08ff3a7908857583f3cbc1b689abf2e8739f7d8 | [
"BSD-2-Clause"
] | 2 | 2019-10-28T14:33:31.000Z | 2019-10-30T10:08:58.000Z | python/interface_getPixel.py | BulliB/PixelTable | f08ff3a7908857583f3cbc1b689abf2e8739f7d8 | [
"BSD-2-Clause"
] | 33 | 2019-10-28T14:17:26.000Z | 2020-02-22T11:04:02.000Z | python/interface_getPixel.py | BulliB/PixelTable | f08ff3a7908857583f3cbc1b689abf2e8739f7d8 | [
"BSD-2-Clause"
] | 2 | 2019-11-08T11:14:33.000Z | 2019-11-19T21:22:54.000Z | #!/usr/bin/python3
from validData import *
from command import *
from readback import *
import sys
import time
# Expected Input
# 1: Row -> 0 to 9
# 2: Column -> 0 to 19
if (
isInt(sys.argv[1]) and strLengthIs(sys.argv[1],1) and
isInt(sys.argv[2]) and (strLengthIs(sys.argv[2],1) or strLengthIs(sys.argv[2],2))
):
command = ["PixelToWeb"]
command.append(sys.argv[1])
command.append(sys.argv[2])
setNewCommand(command)
time.sleep(.3)
print(readbackGet())
readbackClear()
else:
f = open("/var/www/pixel/python/.error", "a")
f.write(sys.argv[0] + '\n')
f.write(sys.argv[1] + '\n')
f.write(sys.argv[2] + '\n')
f.write(sys.argv[3] + '\n')
f.close()
| 21.848485 | 89 | 0.601942 |
0c11d6edd1fa7404e67e7a29c7dcaef50cd598a8 | 1,834 | py | Python | FunTOTP/interface.py | Z33DD/FunTOTP | 912c1a4a307af6a495f12a82305ae7dbf49916a2 | [
"Unlicense"
] | 3 | 2020-01-19T17:10:37.000Z | 2022-02-19T18:39:20.000Z | FunTOTP/interface.py | Z33DD/FunTOTP | 912c1a4a307af6a495f12a82305ae7dbf49916a2 | [
"Unlicense"
] | null | null | null | FunTOTP/interface.py | Z33DD/FunTOTP | 912c1a4a307af6a495f12a82305ae7dbf49916a2 | [
"Unlicense"
] | 1 | 2020-01-19T20:25:18.000Z | 2020-01-19T20:25:18.000Z | from getpass import getpass
from colorama import init, Fore, Back, Style
yes = ['Y', 'y', 'YES', 'yes', 'Yes']
| 25.123288 | 76 | 0.490185 |
0c1456a33812aa7157896227520f3def0676ad91 | 885 | py | Python | envdsys/envcontacts/apps.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | 1 | 2021-11-06T19:22:53.000Z | 2021-11-06T19:22:53.000Z | envdsys/envcontacts/apps.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | 25 | 2019-06-18T20:40:36.000Z | 2021-07-23T20:56:48.000Z | envdsys/envcontacts/apps.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | null | null | null | from django.apps import AppConfig
| 29.5 | 64 | 0.523164 |
0c16dc36c44b72bd40c213bf05ac31ec7273fca3 | 8,053 | py | Python | tests/interpreter.py | AndrejHatzi/Haya | 31291142decf6a172149516f08a2f2d68115e2dc | [
"MIT"
] | null | null | null | tests/interpreter.py | AndrejHatzi/Haya | 31291142decf6a172149516f08a2f2d68115e2dc | [
"MIT"
] | 1 | 2019-02-14T16:47:10.000Z | 2019-02-14T16:47:10.000Z | tests/interpreter.py | AndrejHatzi/Haya | 31291142decf6a172149516f08a2f2d68115e2dc | [
"MIT"
] | null | null | null | from sly import Lexer
from sly import Parser
import sys
#--------------------------
# While Loop
# Del Var
# Print stmt
# EQEQ, LEQ
#--------------------------
#=> This version has parenthesis precedence!
if __name__ == '__main__':
lexer = BasicLexer()
parser = BasicParser()
env = {}
try:
file : str = sys.argv[1]
try:
with open(file, 'r', encoding="utf-8") as f:
line : str
for line in f:
try:
text = line
except EOFError:
break
if text:
tree = parser.parse(lexer.tokenize(text))
BasicExecute(tree, env)
except:
print('the specified file "{}" was not found!'.format(file))
except:
while True:
try:
text = input('haya development edition > ')
except EOFError:
break
if text:
tree = parser.parse(lexer.tokenize(text))
BasicExecute(tree, env)
#parsetree = parser.parse(lexer.tokenize(text))
#print(parsetree)
| 27.768966 | 83 | 0.46343 |
0c16fa03a21f3d8b261783bab62dd87a48e2c16d | 1,012 | py | Python | braintree/apple_pay_card.py | futureironman/braintree_python | 26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac | [
"MIT"
] | 182 | 2015-01-09T05:26:46.000Z | 2022-03-16T14:10:06.000Z | braintree/apple_pay_card.py | futureironman/braintree_python | 26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac | [
"MIT"
] | 95 | 2015-02-24T23:29:56.000Z | 2022-03-13T03:27:58.000Z | braintree/apple_pay_card.py | futureironman/braintree_python | 26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac | [
"MIT"
] | 93 | 2015-02-19T17:59:06.000Z | 2022-03-19T17:01:25.000Z | import braintree
from braintree.resource import Resource
| 30.666667 | 132 | 0.662055 |
0c1758002a3f4c2e5686dc0e50493960b4c98bea | 4,054 | py | Python | src/lda_without_tf_idf_sports.py | mspkvp/MiningOpinionTweets | 23f05b4cea22254748675e03a51844da1dff70ac | [
"MIT"
] | 1 | 2016-01-18T14:30:31.000Z | 2016-01-18T14:30:31.000Z | src/lda_without_tf_idf_sports.py | mspkvp/MiningOpinionTweets | 23f05b4cea22254748675e03a51844da1dff70ac | [
"MIT"
] | null | null | null | src/lda_without_tf_idf_sports.py | mspkvp/MiningOpinionTweets | 23f05b4cea22254748675e03a51844da1dff70ac | [
"MIT"
] | null | null | null | from __future__ import print_function
from time import time
import csv
import sys
import os
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import lda
import logging
logging.basicConfig(filename='lda_analyser.log', level=logging.DEBUG)
entities = ['jose_mourinho',
'cristiano_ronaldo',
'ruben_neves',
'pinto_da_costa',
'jorge_jesus',
'lionel_messi',
'eusebio',
'luisao',
'paulo_bento',
'iker_casillas',
'joao_moutinho',
'jorge_mendes',
'julen_lopetegui',
'rui_vitoria',
'ricardo',
'luis_figo',
'jose_socrates',
'antonio_costa',
'benfica',
'futebol_porto',
'sporting']
if not os.path.exists("results"):
os.makedirs("results")
for n_topics in [10, 20, 50, 100]:
n_features = 10000
n_top_words = int(sys.argv[1]) + 1
corpus = []
topics_write_file = csv.writer(open("results/lda_topics_{}topics_{}words_{}.csv".format(n_topics,
n_top_words - 1,
"sports"), "wb"),
delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
write_file = csv.writer(open("results/lda_topics_{}topics_{}words_mapping_{}.csv".format(n_topics,
n_top_words - 1,
"sports"), "wb"),
delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
entity_day_dict = dict()
# read all files and store their contents on a dictionary
for i in os.listdir(os.getcwd() + "/filtered_tweets"):
for filename in os.listdir(os.getcwd() + "/filtered_tweets" + "/" + i):
if(filename.split(".")[0] in entities):
entity_day_dict[i+" "+filename] = open(os.getcwd() + "/filtered_tweets" + "/" + i + "/" + filename, 'r').read()
entity_day_key_index = dict()
i = 0
for key in entity_day_dict:
entity_day_key_index[i] = key.split(" ")
corpus.append(entity_day_dict[key])
i += 1
# Use tf (raw term count) features for LDA.
logging.info("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(corpus)
logging.info("done in %0.3fs." % (time() - t0))
logging.info("Fitting LDA models with tf")
model = lda.LDA(n_topics=n_topics, n_iter=1500, random_state=1)
#LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_method='online', #learning_offset=50., random_state=0)
t0 = time()
model.fit(tf)
logging.info("done in %0.3fs." % (time() - t0))
topic_word = model.topic_word_
doc_topic = model.doc_topic_
logging.info("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(topic_word, doc_topic, tf_feature_names, n_top_words, entity_day_key_index) | 37.192661 | 129 | 0.561914 |
0c18dab0a973e417315a5c146525d7d91b9da0fe | 4,476 | py | Python | glab_common/allsummary.py | gentnerlab/glab-common-py | 9ff87ac6ca5f07c0d550594da38080bd3ee916db | [
"BSD-3-Clause"
] | 3 | 2016-03-07T19:51:32.000Z | 2018-11-08T22:34:14.000Z | glab_common/allsummary.py | gentnerlab/glab-common-py | 9ff87ac6ca5f07c0d550594da38080bd3ee916db | [
"BSD-3-Clause"
] | 16 | 2015-02-19T04:32:01.000Z | 2018-11-14T20:09:09.000Z | glab_common/allsummary.py | gentnerlab/glab-common-py | 9ff87ac6ca5f07c0d550594da38080bd3ee916db | [
"BSD-3-Clause"
] | 4 | 2015-04-01T23:55:25.000Z | 2018-02-28T18:23:29.000Z | from __future__ import print_function
import re
import datetime as dt
from behav.loading import load_data_pandas
import warnings
import subprocess
import os
import sys
process_fname = "/home/bird/opdat/panel_subject_behavior"
box_nums = []
bird_nums = []
processes = []
with open(process_fname, "rt") as psb_file:
for line in psb_file.readlines():
if line.startswith("#") or not line.strip():
pass # skip comment lines & blank lines
else:
spl_line = line.split()
if spl_line[1] == "1": # box enabled
box_nums.append(spl_line[0])
bird_nums.append(int(spl_line[2]))
processes.append(spl_line[4])
# rsync magpis
hostname = os.uname()[1]
if "magpi" in hostname:
for box_num in box_nums:
box_hostname = box_num
rsync_src = "bird@{}:/home/bird/opdat/".format(box_hostname)
rsync_dst = "/home/bird/opdat/"
print("Rsync src: {}".format(rsync_src), file=sys.stderr)
print("Rsync dest: {}".format(rsync_dst), file=sys.stderr)
rsync_output = subprocess.run(["rsync", "-avz", "--exclude Generated_Songs/", rsync_src, rsync_dst])
subjects = ["B%d" % (bird_num) for bird_num in bird_nums]
data_folder = "/home/bird/opdat"
with open("/home/bird/all.summary", "w") as as_file:
as_file.write(
"this all.summary generated at %s\n" % (dt.datetime.now().strftime("%x %X"))
)
as_file.write(
"FeedErr(won't come up, won't go down, already up, resp during feed)\n"
)
# Now loop through each bird and grab the error info from each summaryDAT file
for (box, bird, proc) in zip(box_nums, bird_nums, processes):
try:
# make sure box is a string
box = str(box)
if proc in ("shape", "lights", "pylights", "lights.py"):
as_file.write("%s\tB%d\t %s\n" % (box, bird, proc))
else:
summaryfname = "/home/bird/opdat/B%d/%d.summaryDAT" % (bird, bird)
with open(summaryfname, "rt") as sdat:
sdata = sdat.read()
m = re.search(r"failures today: (\w+)", sdata)
hopper_failures = m.group(1)
m = re.search(r"down failures today: (\w+)", sdata)
godown_failures = m.group(1)
m = re.search(r"up failures today: (\w+)", sdata)
goup_failures = m.group(1)
m = re.search(r"Responses during feed: (\w+)", sdata)
resp_feed = m.group(1)
subj = "B%d" % (bird)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
behav_data = load_data_pandas([subj], data_folder)
df = behav_data[subj]
# df = df[~pd.isnull(data.index)]
todays_data = df[
(df.index.date - dt.datetime.today().date()) == dt.timedelta(days=0)
]
feeder_ops = sum(todays_data["reward"].values)
trials_run = len(todays_data)
noRs = sum(todays_data["response"].values == "none")
TOs = trials_run - feeder_ops - noRs
last_trial_time = todays_data.sort_index().tail().index[-1]
if last_trial_time.day != dt.datetime.now().day:
datediff = "(not today)"
else:
minutes_ago = (dt.datetime.now() - last_trial_time).seconds / 60
datediff = "(%d mins ago)" % (minutes_ago)
outline = (
"%s\tB%d\t %s \ttrls=%s \tfeeds=%d \tTOs=%d \tnoRs=%d \tFeedErrs=(%s,%s,%s,%s) \tlast @ %s %s\n"
% (
box,
bird,
proc,
trials_run,
feeder_ops,
TOs,
noRs,
hopper_failures,
godown_failures,
goup_failures,
resp_feed,
last_trial_time.strftime("%x %X"),
datediff,
)
)
as_file.write(outline)
except Exception as e:
as_file.write(
"%s\tB%d\t Error opening SummaryDat or incorrect format\n" % (box, bird)
)
print(e)
| 38.921739 | 122 | 0.50849 |
0c194bbda6bf427b571869e7619f91e9298b8f04 | 2,239 | py | Python | api/v1/circuits.py | tahoe/janitor | b6ce73bddc13c70079bdc7ba4c7a9b3ee0cad0bd | [
"Apache-2.0"
] | 52 | 2019-08-14T10:48:26.000Z | 2022-03-30T18:09:08.000Z | api/v1/circuits.py | tahoe/janitor | b6ce73bddc13c70079bdc7ba4c7a9b3ee0cad0bd | [
"Apache-2.0"
] | 18 | 2019-08-20T04:13:37.000Z | 2022-01-31T12:40:12.000Z | api/v1/circuits.py | tahoe/janitor | b6ce73bddc13c70079bdc7ba4c7a9b3ee0cad0bd | [
"Apache-2.0"
] | 12 | 2019-08-14T10:49:11.000Z | 2020-09-02T18:56:34.000Z | from app.models import Circuit, CircuitSchema, Provider
from flask import make_response, jsonify
from app import db
def read_all():
"""
This function responds to a request for /circuits
with the complete lists of circuits
:return: sorted list of circuits
"""
circuits = Circuit.query.all()
schema = CircuitSchema(many=True)
return schema.dump(circuits).data
def create(circuit):
"""
creates a circuit! checks to see if the provider_cid is unique and
that the provider exists.
:return: circuit
"""
provider_cid = circuit.get('provider_cid')
provider_id = circuit.get('provider_id')
circuit_exists = Circuit.query.filter(
Circuit.provider_cid == provider_cid
).one_or_none()
provider_exists = Provider.query.filter(Provider.id == provider_id).one_or_none()
if circuit_exists:
text = f'Circuit {provider_cid} already exists'
return make_response(jsonify(error=409, message=text), 409)
if not provider_exists:
text = f'Provider {provider_id} does not exist.' 'Unable to create circuit'
return make_response(jsonify(error=403, message=text), 403)
schema = CircuitSchema()
new_circuit = schema.load(circuit, session=db.session).data
db.session.add(new_circuit)
db.session.commit()
data = schema.dump(new_circuit).data
return data, 201
def update(circuit_id, circuit):
"""
updates a circuit!
:return: circuit
"""
c = Circuit.query.filter_by(id=circuit_id).one_or_none()
if not c:
text = f'Can not update a circuit that does not exist!'
return make_response(jsonify(error=409, message=text), 404)
schema = CircuitSchema()
update = schema.load(circuit, session=db.session).data
db.session.merge(update)
db.session.commit()
data = schema.dump(c).data
return data, 201
| 26.341176 | 85 | 0.676195 |
0c1a66a69d47f4abcbb592a1b69142a384d2f89b | 2,311 | py | Python | youtube_related/client.py | kijk2869/youtube-related | daabefc60277653098e1d8e266258b71567796d8 | [
"MIT"
] | 7 | 2020-07-13T00:15:37.000Z | 2021-12-06T14:35:14.000Z | youtube_related/client.py | kijk2869/youtube-related | daabefc60277653098e1d8e266258b71567796d8 | [
"MIT"
] | 11 | 2020-07-17T16:11:16.000Z | 2022-03-01T23:02:54.000Z | youtube_related/client.py | kijk2869/youtube-related | daabefc60277653098e1d8e266258b71567796d8 | [
"MIT"
] | 3 | 2020-11-04T11:44:50.000Z | 2022-01-11T04:21:01.000Z | import asyncio
import json
import re
from collections import deque
from typing import Deque, Dict, List, Match, Pattern
import aiohttp
from .error import RateLimited
headers: dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
DATA_JSON: Pattern = re.compile(
r'(?:window\["ytInitialData"\]|ytInitialData)\W?=\W?({.*?});'
)
| 29.253165 | 89 | 0.63479 |
0c1b29cfd60d9ee7d4e6451a8264af9459d2ddcb | 2,522 | py | Python | app/request/migrations/0001_initial.py | contestcrew/2019SeoulContest-Backend | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | [
"MIT"
] | null | null | null | app/request/migrations/0001_initial.py | contestcrew/2019SeoulContest-Backend | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | [
"MIT"
] | 32 | 2019-08-30T13:09:28.000Z | 2021-06-10T19:07:56.000Z | app/request/migrations/0001_initial.py | contestcrew/2019SeoulContest-Backend | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | [
"MIT"
] | 3 | 2019-09-19T10:12:50.000Z | 2019-09-30T15:59:13.000Z | # Generated by Django 2.2.5 on 2019-09-24 09:11
from django.db import migrations, models
import django.db.models.deletion
| 50.44 | 179 | 0.596352 |
0c1bfa28ddb2f6e0a2bc571eb9a019b7ef92cb0d | 690 | py | Python | field/FieldFactory.py | goph-R/NodeEditor | 5cc4749785bbd348f3db01b27c1533b4caadb920 | [
"Apache-2.0"
] | null | null | null | field/FieldFactory.py | goph-R/NodeEditor | 5cc4749785bbd348f3db01b27c1533b4caadb920 | [
"Apache-2.0"
] | null | null | null | field/FieldFactory.py | goph-R/NodeEditor | 5cc4749785bbd348f3db01b27c1533b4caadb920 | [
"Apache-2.0"
] | null | null | null | from PySide2.QtGui import QVector3D, QColor
from field.ColorField import ColorField
from field.FloatField import FloatField
from field.StringField import StringField
from field.Vector3Field import Vector3Field
| 23 | 44 | 0.618841 |
0c1cee1a04ba87b43d0454e7e5294887e53530fd | 1,348 | py | Python | scrapy/utils/engine.py | sulochanaviji/scrapy | 6071c82e7ac80136e844b56a09d5d31aa8f41296 | [
"BSD-3-Clause"
] | 8 | 2021-02-01T07:55:19.000Z | 2021-03-22T18:17:47.000Z | scrapy/utils/engine.py | sulochanaviji/scrapy | 6071c82e7ac80136e844b56a09d5d31aa8f41296 | [
"BSD-3-Clause"
] | 30 | 2021-02-17T14:17:57.000Z | 2021-03-03T16:57:16.000Z | scrapy/utils/engine.py | sulochanaviji/scrapy | 6071c82e7ac80136e844b56a09d5d31aa8f41296 | [
"BSD-3-Clause"
] | 3 | 2021-08-21T04:09:17.000Z | 2021-08-25T01:00:41.000Z | """Some debugging functions for working with the Scrapy engine"""
# used in global tests code
from time import time # noqa: F401
def get_engine_status(engine):
"""Return a report of the current engine status"""
tests = [
"time()-engine.start_time",
"engine.has_capacity()",
"len(engine.downloader.active)",
"engine.scraper.is_idle()",
"engine.spider.name",
"engine.spider_is_idle(engine.spider)",
"engine.slot.closing",
"len(engine.slot.inprogress)",
"len(engine.slot.scheduler.dqs or [])",
"len(engine.slot.scheduler.mqs)",
"len(engine.scraper.slot.queue)",
"len(engine.scraper.slot.active)",
"engine.scraper.slot.active_size",
"engine.scraper.slot.itemproc_size",
"engine.scraper.slot.needs_backout()",
]
checks = []
for test in tests:
try:
checks += [(test, eval(test))]
except Exception as e:
checks += [(test, f"{type(e).__name__} (exception)")]
return checks
| 27.510204 | 65 | 0.609792 |
0c1da110a449d15b92ca6653ffd9fc76029d3fee | 2,588 | py | Python | share/pegasus/init/population/scripts/full_res_pop_raster.py | hariharan-devarajan/pegasus | d0641541f2eccc69dd6cc5a09b0b51303686d3ac | [
"Apache-2.0"
] | null | null | null | share/pegasus/init/population/scripts/full_res_pop_raster.py | hariharan-devarajan/pegasus | d0641541f2eccc69dd6cc5a09b0b51303686d3ac | [
"Apache-2.0"
] | null | null | null | share/pegasus/init/population/scripts/full_res_pop_raster.py | hariharan-devarajan/pegasus | d0641541f2eccc69dd6cc5a09b0b51303686d3ac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from typing import Dict
import optparse
import numpy as np
import rasterio
from rasterio import features
def main(county_pop_file, spatial_dist_file, fname_out, no_data_val=-9999):
'''
county_pop_file: County level population estimates
spatial_dist_file: Spatial projection of population distribution
'''
# -------------------------------------
# Open and read raster file with county
# level population estimates
# -------------------------------------
with rasterio.open(county_pop_file) as rastf:
county_pop = rastf.read()
nodatacp = rastf.nodata
# --------------------------------------------------------------
# Open and read raster file with spatial population distribution
# --------------------------------------------------------------
with rasterio.open(spatial_dist_file) as rastf:
pop_dist = rastf.read()
nodatasp = rastf.nodata
prf = rastf.profile
county_pop = np.squeeze(county_pop)
pop_dist = np.squeeze(pop_dist)
pop_est = np.ones(pop_dist.shape)*no_data_val
ind1 = np.where(county_pop.flatten() != nodatacp)[0]
ind2 = np.where(pop_dist.flatten() != nodatasp)[0]
ind = np.intersect1d(ind1, ind2)
ind2d = np.unravel_index(ind, pop_dist.shape)
pop_est[ind2d] = county_pop[ind2d] * pop_dist[ind2d]
pop_est[ind2d] = np.round(pop_est[ind2d])
# Update raster meta-data
prf.update(nodata=no_data_val)
# Write out spatially distributed population estimate to raster
with open(fname_out, "wb") as fout:
with rasterio.open(fout.name, 'w', **prf) as out_raster:
out_raster.write(pop_est.astype(rasterio.float32), 1)
argparser = optparse.OptionParser()
argparser.add_option('--population-file', action='store', dest='pop_file',
help='County level population estimates')
argparser.add_option('--dist-file', action='store', dest='dist_file',
help='Spatial projection of population distribution')
argparser.add_option('--out-file', action='store', dest='out_file',
help='Filename of the output')
(options, args) = argparser.parse_args()
if not options.pop_file:
print('Please specify a population file with --population-file')
sys.exit(1)
if not options.dist_file:
print('Please specify a distribution file with --dist-file')
sys.exit(1)
if not options.out_file:
print('Please specify the name of the output with --out-file')
sys.exit(1)
main(options.pop_file, options.dist_file, options.out_file)
| 33.179487 | 75 | 0.63524 |
0c1e7c3ccf6eceb66230761a4bde8362593a8064 | 9,557 | py | Python | TestCase/pr_test_case.py | openeuler-mirror/ci-bot | c50056ff73670bc0382e72cf8c653c01e1aed5e1 | [
"MulanPSL-1.0"
] | 1 | 2020-01-12T07:35:34.000Z | 2020-01-12T07:35:34.000Z | TestCase/pr_test_case.py | openeuler-mirror/ci-bot | c50056ff73670bc0382e72cf8c653c01e1aed5e1 | [
"MulanPSL-1.0"
] | null | null | null | TestCase/pr_test_case.py | openeuler-mirror/ci-bot | c50056ff73670bc0382e72cf8c653c01e1aed5e1 | [
"MulanPSL-1.0"
] | 2 | 2020-03-04T02:09:14.000Z | 2020-03-07T03:00:40.000Z | import os
import requests
import subprocess
import time
import yaml
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
info = yaml.load(f.read())['test case']
owner = info[0]['owner']
repo = info[1]['repo']
local_owner = info[2]['local_owner']
pr = PullRequestOperation(owner, repo, local_owner)
print('Prepare:')
print('step 1/4: git clone')
pr.git_clone()
print('\nstep 2/4: change file')
pr.change_file()
print('\nstep 3/4: git push')
pr.git_push()
print('\nstep 4/4: pull request')
number = pr.pull_request()
print('the number of the pull request: {}'.format(number))
time.sleep(10)
print('\n\nTest:')
print('test case 1: without comments by contributor')
comments = pr.get_all_comments(number)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
errors = 0
if len(comments) == 0:
print('no "Welcome to ci-bot Community."')
print('no "Thanks for your pull request."')
else:
if 'Welcome to ci-bot Community.' not in comments[0]:
print('no "Welcome to ci-bot Community."')
errors += 1
if 'Thanks for your pull request.' not in comments[-1]:
print('no "Thanks for your pull request."')
errors += 1
if len(labels) == 0:
print('no label "ci-bot-cla/yes" or "ci-bot-cla/no"')
errors += 1
elif len(labels) > 0:
if 'ci-bot-cla/yes' not in labels:
print('no label "ci-bot-cla/yes"')
errors += 1
if 'ci-bot-cla/no' not in labels:
print('no label "ci-bot-cla/no"')
errors += 1
if errors == 0:
print('test case 1 succeeded')
else:
print('test case 1 failed')
print('\ntest case 2: /lgtm')
pr.comment(number, '/lgtm')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if 'can not be added in your self-own pull request' in comments[-1]:
print('test case 2 succeeded')
else:
print('test case 2 failed')
print(comments[-1])
print('\ntest case 3: comment /lgtm by others')
pr.comment_by_others(number, '/lgtm')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if 'Thanks for your review' in comments[-1]:
print('test case 3 succeeded')
else:
print('test case 3 failed')
print(comments[-1])
print('\ntest case 4: comment /approve by others')
pr.comment_by_others(number, '/approve')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if 'has no permission to add' in comments[-1]:
print('test case 4 succeeded')
else:
print('test case 4 failed')
print(comments[-1])
print('\ntest case 5: /approve')
pr.comment(number, '/approve')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if '***approved*** is added in this pull request by' in comments[-1]:
print('test case 5 succeeded')
else:
print('test case 5 failed')
print(comments[-1])
print('\ntest case 6: tag stat/need-squash')
labels_before_commit = pr.get_all_labels(number)
print('labels_before_commit: {}'.format(labels_before_commit))
pr.write_2_file()
time.sleep(10)
lables_after_commit = pr.get_all_labels(number)
print('lables_after_commit: {}'.format(lables_after_commit))
if 'lgtm' not in labels and 'stat/need-squash' in lables_after_commit:
print('test case 6 succeeded')
else:
print('test case 6 failed')
print('\ntest case 7: add labels')
pr.add_labels_2_pr(number, '["lgtm"]')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
if "lgtm" in labels:
print('test case 7 succeeded')
else:
print('test case 7 failed')
print('\ntest case 8: check-pr')
pr.comment(number, '/check-pr')
time.sleep(10)
code = pr.get_pr_status(number)
if code == 200:
print('test case 8 succeeded')
else:
print('failed code: {}'.format(code))
print('test case 8 failed')
| 38.381526 | 206 | 0.548917 |
0c1eb2fd9329de0c031fe686c52f4c0e67ec1227 | 1,103 | py | Python | tempest/api/hybrid_cloud/compute/flavors/test_flavors_operations.py | Hybrid-Cloud/hybrid-tempest | 319e90c6fa6e46925b495c93cd5258f088a30ec0 | [
"Apache-2.0"
] | null | null | null | tempest/api/hybrid_cloud/compute/flavors/test_flavors_operations.py | Hybrid-Cloud/hybrid-tempest | 319e90c6fa6e46925b495c93cd5258f088a30ec0 | [
"Apache-2.0"
] | null | null | null | tempest/api/hybrid_cloud/compute/flavors/test_flavors_operations.py | Hybrid-Cloud/hybrid-tempest | 319e90c6fa6e46925b495c93cd5258f088a30ec0 | [
"Apache-2.0"
] | null | null | null | import testtools
from oslo_log import log
from tempest.api.compute import base
import tempest.api.compute.flavors.test_flavors as FlavorsV2Test
import tempest.api.compute.flavors.test_flavors_negative as FlavorsListWithDetailsNegativeTest
import tempest.api.compute.flavors.test_flavors_negative as FlavorDetailsNegativeTest
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest.lib import decorators
from tempest import test
from tempest import config
CONF = config.CONF
LOG = log.getLogger(__name__)
| 36.766667 | 126 | 0.853128 |
0c1f09091be19e77ace869bcb2f31a8df0eb57b2 | 8,910 | py | Python | dycall/exports.py | demberto/DyCall | b234e7ba535eae71234723bb3d645eb986f96a30 | [
"MIT"
] | null | null | null | dycall/exports.py | demberto/DyCall | b234e7ba535eae71234723bb3d645eb986f96a30 | [
"MIT"
] | null | null | null | dycall/exports.py | demberto/DyCall | b234e7ba535eae71234723bb3d645eb986f96a30 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
dycall.exports
~~~~~~~~~~~~~~
Contains `ExportsFrame` and `ExportsTreeView`.
"""
from __future__ import annotations
import logging
import pathlib
from typing import TYPE_CHECKING
import ttkbootstrap as tk
from ttkbootstrap import ttk
from ttkbootstrap.dialogs import Messagebox
from ttkbootstrap.localization import MessageCatalog as MsgCat
from ttkbootstrap.tableview import Tableview
from dycall._widgets import _TrLabelFrame
from dycall.types import Export, PEExport
from dycall.util import StaticThemedTooltip, get_img
log = logging.getLogger(__name__)
| 34.534884 | 86 | 0.579237 |
0c1fb0aec727010060874060c9a7121a40357346 | 1,899 | py | Python | src/homologs/filter_by_occupancy.py | jlanga/smsk_selection | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 4 | 2021-07-18T05:20:20.000Z | 2022-01-03T10:22:33.000Z | src/homologs/filter_by_occupancy.py | jlanga/smsk_selection | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 1 | 2017-08-21T07:26:13.000Z | 2018-11-08T13:59:48.000Z | src/homologs/filter_by_occupancy.py | jlanga/smsk_orthofinder | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 2 | 2021-07-18T05:20:26.000Z | 2022-03-31T18:23:31.000Z | #!/usr/bin/env python
"""
Filter a fasta alignment according to its occupancy:
filter_by_occupancy.py fasta_raw.fa fasta_trimmed.fa 0.5
"""
import os
import sys
from helpers import fasta_to_dict
def filter_by_occupancy(filename_in, filename_out, min_occupancy=0.5):
"""
Filter an alignment in fasta format according to the occupancy of the
columns. Store the results in fasta format.
"""
fasta_raw = fasta_to_dict(filename_in)
n_sequences = len(fasta_raw.keys())
alignment_length = len(fasta_raw[tuple(fasta_raw.keys())[0]])
columns = tuple(
"".join(fasta_raw[seqname][column_index] for seqname in fasta_raw.keys())
for column_index in range(alignment_length)
)
columns_to_keep = []
for column_number, column in enumerate(columns):
n_gaps = column.count("-")
if 1 - float(n_gaps) / float(n_sequences) >= min_occupancy:
columns_to_keep.append(column_number)
fasta_trimmed = {}
for seqname, sequence in fasta_raw.items():
fasta_trimmed[seqname] = "".join(
fasta_raw[seqname][column_to_keep] for column_to_keep in columns_to_keep
)
if not os.path.exists(os.path.dirname(filename_out)):
os.makedirs(os.path.dirname(filename_out))
with open(filename_out, "w") as f_out:
for seqname, sequence in fasta_trimmed.items():
f_out.write(
">{seqname}\n{sequence}\n".format(seqname=seqname, sequence=sequence)
)
if __name__ == "__main__":
if len(sys.argv) != 4:
sys.stderr.write(
"ERROR: incorrect number of arguments.\n"
"python filter_by_occupancy.py fastain fastaout min_occupancy\n"
)
sys.exit(1)
FASTA_IN = sys.argv[1]
FASTA_OUT = sys.argv[2]
MIN_OCCUPANCY = float(sys.argv[3])
filter_by_occupancy(FASTA_IN, FASTA_OUT, MIN_OCCUPANCY)
| 28.772727 | 85 | 0.664034 |
0c20b529cd83a9fd598afa8e482ff4d521f8b78a | 954 | py | Python | setup.py | eppeters/xontrib-dotenv | f866f557592d822d1ecb2b607c63c4cdecb580e4 | [
"BSD-2-Clause"
] | null | null | null | setup.py | eppeters/xontrib-dotenv | f866f557592d822d1ecb2b607c63c4cdecb580e4 | [
"BSD-2-Clause"
] | null | null | null | setup.py | eppeters/xontrib-dotenv | f866f557592d822d1ecb2b607c63c4cdecb580e4 | [
"BSD-2-Clause"
] | 1 | 2020-03-16T00:39:57.000Z | 2020-03-16T00:39:57.000Z | #!/usr/bin/env python
"""
xontrib-dotenv
-----
Automatically reads .env file from current working directory
or parentdirectories and push variables to environment.
"""
from setuptools import setup
setup(
name='xontrib-dotenv',
version='0.1',
description='Reads .env files into environment',
long_description=__doc__,
license='BSD',
url='https://github.com/urbaniak/xontrib-dotenv',
author='Krzysztof Urbaniak',
packages=['xontrib'],
package_dir={'xontrib': 'xontrib'},
package_data={'xontrib': ['*.xsh']},
zip_safe=True,
include_package_data=False,
platforms='any',
install_requires=[
'xonsh>=0.4.6',
],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: System :: Shells',
'Topic :: System :: System Shells',
]
)
| 25.105263 | 60 | 0.631027 |
0c20b7c255ec391f7dad36b9c36ded1071de5e8b | 222 | py | Python | tests/sample_app/urls.py | dreipol/meta-tagger | c1a2f1f8b0c051018a5bb75d4e579d27bd2c27b2 | [
"BSD-3-Clause"
] | 3 | 2016-05-30T07:48:54.000Z | 2017-02-08T21:16:03.000Z | tests/sample_app/urls.py | dreipol/meta-tagger | c1a2f1f8b0c051018a5bb75d4e579d27bd2c27b2 | [
"BSD-3-Clause"
] | null | null | null | tests/sample_app/urls.py | dreipol/meta-tagger | c1a2f1f8b0c051018a5bb75d4e579d27bd2c27b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import url
from tests.sample_app.views import NewsArticleDetailView
urlpatterns = [
url(r'^(?P<pk>\d+)/$', NewsArticleDetailView.as_view(), name='news-article-detail'),
]
| 27.75 | 88 | 0.702703 |
0c23261891d98100b6ddda0879a36f77857b6f48 | 624 | py | Python | utils/bbox_utils/center_to_corner.py | Jaskaran197/Red-blood-cell-detection-SSD | a33b330ad17454a7425aa7f57818c0a41b4e0ff9 | [
"MIT"
] | null | null | null | utils/bbox_utils/center_to_corner.py | Jaskaran197/Red-blood-cell-detection-SSD | a33b330ad17454a7425aa7f57818c0a41b4e0ff9 | [
"MIT"
] | null | null | null | utils/bbox_utils/center_to_corner.py | Jaskaran197/Red-blood-cell-detection-SSD | a33b330ad17454a7425aa7f57818c0a41b4e0ff9 | [
"MIT"
] | null | null | null | import numpy as np
def center_to_corner(boxes):
""" Convert bounding boxes from center format (cx, cy, width, height) to corner format (xmin, ymin, xmax, ymax)
Args:
- boxes: numpy array of tensor containing all the boxes to be converted
Returns:
- A numpy array or tensor of converted boxes
"""
temp = boxes.copy()
temp[..., 0] = boxes[..., 0] - (boxes[..., 2] / 2) # xmin
temp[..., 1] = boxes[..., 1] - (boxes[..., 3] / 2) # ymin
temp[..., 2] = boxes[..., 0] + (boxes[..., 2] / 2) # xmax
temp[..., 3] = boxes[..., 1] + (boxes[..., 3] / 2) # ymax
return temp
| 32.842105 | 115 | 0.535256 |
0c24e2918c9577a7b38b38b7b54cfb7d7c91ca26 | 337 | py | Python | pytest/np.py | i0Ek3/disintegration | b59307f8166b93d76fab35af180a5cf3ffa51b09 | [
"MIT"
] | null | null | null | pytest/np.py | i0Ek3/disintegration | b59307f8166b93d76fab35af180a5cf3ffa51b09 | [
"MIT"
] | null | null | null | pytest/np.py | i0Ek3/disintegration | b59307f8166b93d76fab35af180a5cf3ffa51b09 | [
"MIT"
] | null | null | null | import numpy as np
list = [np.linspace([1,2,3], 3),\
np.array([1,2,3]),\
np.arange(3),\
np.arange(8).reshape(2,4),\
np.zeros((2,3)),\
np.zeros((2,3)).T,\
np.ones((3,1)),\
np.eye(3),\
np.full((3,3), 1),\
np.random.rand(3),\
np.random.rand(3,3),\
np.random.uniform(5,15,3),\
np.random.randn(3),\
np.random.normal(3, 2.5, 3)]
print(list)
| 17.736842 | 33 | 0.590504 |
0c25a90b221d6137090c0e77b536a592e4921a3d | 337 | py | Python | api/data_explorer/models/__init__.py | karamalhotra/data-explorer | 317f4d7330887969ab6bfe2ca23ec24163472c55 | [
"BSD-3-Clause"
] | null | null | null | api/data_explorer/models/__init__.py | karamalhotra/data-explorer | 317f4d7330887969ab6bfe2ca23ec24163472c55 | [
"BSD-3-Clause"
] | null | null | null | api/data_explorer/models/__init__.py | karamalhotra/data-explorer | 317f4d7330887969ab6bfe2ca23ec24163472c55 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from data_explorer.models.dataset_response import DatasetResponse
from data_explorer.models.facet import Facet
from data_explorer.models.facet_value import FacetValue
from data_explorer.models.facets_response import FacetsResponse
| 33.7 | 65 | 0.860534 |
0c2791187a63a4bcc6905cd731c3e9fbdcde2c2b | 2,288 | py | Python | seabird/cli.py | nicholas512/seabird | 23073b2b9a550b86ec155cbe43be9b50e50b8310 | [
"BSD-3-Clause"
] | 38 | 2015-04-15T08:57:44.000Z | 2022-03-13T02:51:53.000Z | seabird/cli.py | nicholas512/seabird | 23073b2b9a550b86ec155cbe43be9b50e50b8310 | [
"BSD-3-Clause"
] | 54 | 2015-01-28T03:53:43.000Z | 2021-12-11T07:37:24.000Z | seabird/cli.py | nicholas512/seabird | 23073b2b9a550b86ec155cbe43be9b50e50b8310 | [
"BSD-3-Clause"
] | 22 | 2015-09-22T12:24:22.000Z | 2022-01-31T22:27:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Command line utilities for package Seabird
"""
import click
from seabird.exceptions import CNVError
from .cnv import fCNV
from .netcdf import cnv2nc
| 28.962025 | 68 | 0.645979 |
0c2931d41844c5cadfbc0f4d8cd12cf1c0991cb4 | 1,752 | py | Python | main.py | Mitch-the-Fridge/pi | 70ab24dab9b06722084e93f783dc541747d46720 | [
"MIT"
] | null | null | null | main.py | Mitch-the-Fridge/pi | 70ab24dab9b06722084e93f783dc541747d46720 | [
"MIT"
] | null | null | null | main.py | Mitch-the-Fridge/pi | 70ab24dab9b06722084e93f783dc541747d46720 | [
"MIT"
] | 1 | 2020-05-31T17:13:42.000Z | 2020-05-31T17:13:42.000Z | #!/usr/bin/env python3
#import face_recognition
import cv2
import numpy as np
from datetime import datetime, timedelta
from buffer import Buffer
from collections import deque
import os
from copy import copy
import archive
WEIGHT_EPS = 5
TIMEOUT = 5 # in seconds
# with an fps we then have a "before" duration of 15 seconds
video_buffer = Buffer(300)
building = False
clip = None
previous_weight = poll_weight()
last_weight_event = None
cap = cv2.VideoCapture(0)
while True:
archive.try_upload_buffer()
# if enough_diff is true we will actually start the recording
weight = poll_weight()
weight_diff = weight - previous_weight
enough_diff = abs(weight_diff) >= WEIGHT_EPS
ret, frame = cap.read()
rgb_frame = cv2.resize(frame, (0, 0), fx=.5, fy=.5)[:, :, ::-1]
#face_locations = face_recognition.face_locations(rgb_frame)
print(
len(video_buffer.q),
len(clip) if clip is not None else 0,
building,
#face_locations
)
point = {
'time': datetime.now(),
#'face_locations': face_locations,
'frame': frame,
'current_weight': weight,
}
if building:
clip.append(point)
else:
video_buffer.add(point)
if not building and enough_diff:
building = True
clip = copy(video_buffer.q)
video_buffer.clear()
elif building and datetime.now() >= last_weight_event + timedelta(seconds=TIMEOUT):
frames = list(clip)
clip = None
building = False
print("creating clip of len", len(frames))
print(archive.create_from_clip(frames))
previous_weight = weight
if enough_diff:
last_weight_event = datetime.now()
| 23.36 | 87 | 0.660388 |
0c2a0afb31018189385f06e7bd9d48b8c0f6df9c | 2,895 | py | Python | OpenPNM/Network/models/pore_topology.py | Eng-RSMY/OpenPNM | a0a057d0f6346c515792459b1da97f05bab383c1 | [
"MIT"
] | 1 | 2021-03-30T21:38:26.000Z | 2021-03-30T21:38:26.000Z | OpenPNM/Network/models/pore_topology.py | Eng-RSMY/OpenPNM | a0a057d0f6346c515792459b1da97f05bab383c1 | [
"MIT"
] | null | null | null | OpenPNM/Network/models/pore_topology.py | Eng-RSMY/OpenPNM | a0a057d0f6346c515792459b1da97f05bab383c1 | [
"MIT"
] | null | null | null | r"""
===============================================================================
pore_topology -- functions for monitoring and adjusting topology
===============================================================================
"""
import scipy as _sp
def get_subscripts(network, shape, **kwargs):
r"""
Return the 3D subscripts (i,j,k) into the cubic network
Parameters
----------
shape : list
The (i,j,k) shape of the network in number of pores in each direction
"""
if network.num_pores('internal') != _sp.prod(shape):
print('Supplied shape does not match Network size, cannot proceed')
else:
template = _sp.atleast_3d(_sp.empty(shape))
a = _sp.indices(_sp.shape(template))
i = a[0].flatten()
j = a[1].flatten()
k = a[2].flatten()
ind = _sp.vstack((i, j, k)).T
vals = _sp.ones((network.Np, 3))*_sp.nan
vals[network.pores('internal')] = ind
return vals
def adjust_spacing(network, new_spacing, **kwargs):
r"""
Adjust the the pore-to-pore lattice spacing on a cubic network
Parameters
----------
new_spacing : float
The new lattice spacing to apply
Notes
-----
At present this method only applies a uniform spacing in all directions.
This is a limiation of OpenPNM Cubic Networks in general, and not of the
method.
"""
coords = network['pore.coords']
try:
spacing = network._spacing
coords = coords/spacing*new_spacing
network._spacing = new_spacing
except:
pass
return coords
def reduce_coordination(network, z, mode='random', **kwargs):
r"""
Reduce the coordination number to the specified z value
Parameters
----------
z : int
The coordination number or number of throats connected a pore
mode : string, optional
Controls the logic used to trim connections. Options are:
- 'random': (default) Throats will be randomly removed to achieve a
coordination of z
- 'max': All pores will be adjusted to have a maximum coordination of z
(not implemented yet)
Returns
-------
A label array indicating which throats should be trimmed to achieve desired
coordination.
Notes
-----
Pores with only 1 throat will be ignored in all calculations since these
are generally boundary pores.
"""
T_trim = ~network['throat.all']
T_nums = network.num_neighbors(network.pores())
# Find protected throats
T_keep = network.find_neighbor_throats(pores=(T_nums == 1))
if mode == 'random':
z_ave = _sp.average(T_nums[T_nums > 1])
f_trim = (z_ave - z)/z_ave
T_trim = _sp.rand(network.Nt) < f_trim
T_trim = T_trim*(~network.tomask(throats=T_keep))
if mode == 'max':
pass
return T_trim
| 29.242424 | 79 | 0.587219 |
0c2a4b436c5eaf17c454eecf85f7cdb41e8c152f | 9,559 | py | Python | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Tasks/Join.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | 2 | 2015-01-26T07:15:19.000Z | 2015-11-09T13:42:11.000Z | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Tasks/Join.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Tasks/Join.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | # Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from SpiffWorkflow.Task import Task
from SpiffWorkflow.Exception import WorkflowException
from SpiffWorkflow.Operators import valueof
from TaskSpec import TaskSpec
| 37.93254 | 80 | 0.61293 |
0c2c64f073f540439acf039ecdc1016885d5eb85 | 5,763 | py | Python | covsirphy/visualization/bar_plot.py | ardhanii/covid19-sir | 87881963c49a2fc5b6235c8b21269d216acaa941 | [
"Apache-2.0"
] | 97 | 2020-05-15T15:20:15.000Z | 2022-03-18T02:55:54.000Z | covsirphy/visualization/bar_plot.py | ardhanii/covid19-sir | 87881963c49a2fc5b6235c8b21269d216acaa941 | [
"Apache-2.0"
] | 970 | 2020-06-01T13:48:34.000Z | 2022-03-29T08:20:49.000Z | covsirphy/visualization/bar_plot.py | ardhani31/Covid19-SIRV-v3 | 59d95156b375c41259c46ce4e656b86903f92ec2 | [
"Apache-2.0"
] | 36 | 2020-05-15T15:36:43.000Z | 2022-02-25T17:59:08.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
import pandas as pd
from covsirphy.util.argument import find_args
from covsirphy.visualization.vbase import VisualizeBase
def bar_plot(df, title=None, filename=None, show_legend=True, **kwargs):
"""
Wrapper function: show chronological change of the data.
Args:
data (pandas.DataFrame or pandas.Series): data to show
Index
Date (pandas.Timestamp)
Columns
variables to show
title (str): title of the figure
filename (str or None): filename to save the figure or None (display)
show_legend (bool): whether show legend or not
kwargs: keyword arguments of the following classes and methods.
- covsirphy.BarPlot() and its methods,
- matplotlib.pyplot.savefig(), matplotlib.pyplot.legend(),
- pandas.DataFrame.plot()
"""
with BarPlot(filename=filename, **find_args(plt.savefig, **kwargs)) as bp:
bp.title = title
bp.plot(data=df, **find_args([BarPlot.plot, pd.DataFrame.plot], **kwargs))
# Axis
bp.x_axis(**find_args([BarPlot.x_axis], **kwargs))
bp.y_axis(**find_args([BarPlot.y_axis], **kwargs))
# Vertical/horizontal lines
bp.line(**find_args([BarPlot.line], **kwargs))
# Legend
if show_legend:
bp.legend(**find_args([BarPlot.legend, plt.legend], **kwargs))
else:
bp.legend_hide()
| 36.942308 | 152 | 0.599167 |
0c2c8fc01f580afd1e737eea2d3f4a891285699e | 3,342 | py | Python | 03-process-unsplash-dataset.py | l294265421/natural-language-image-search | 71621f2208f345b922ed0f82d406526cef456d48 | [
"MIT"
] | null | null | null | 03-process-unsplash-dataset.py | l294265421/natural-language-image-search | 71621f2208f345b922ed0f82d406526cef456d48 | [
"MIT"
] | null | null | null | 03-process-unsplash-dataset.py | l294265421/natural-language-image-search | 71621f2208f345b922ed0f82d406526cef456d48 | [
"MIT"
] | null | null | null | import os
import math
from pathlib import Path
import clip
import torch
from PIL import Image
import numpy as np
import pandas as pd
from common import common_path
# Set the path to the photos
# dataset_version = "lite" # Use "lite" or "full"
# photos_path = Path("unsplash-dataset") / dataset_version / "photos"
photos_path = os.path.join(common_path.project_dir, 'unsplash-dataset/lite/photos')
# List all JPGs in the folder
photos_files = list(Path(photos_path).glob("*.jpg"))
# Print some statistics
print(f"Photos found: {len(photos_files)}")
# Load the open CLIP model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
# Function that computes the feature vectors for a batch of images
# Define the batch size so that it fits on your GPU. You can also do the processing on the CPU, but it will be slower.
batch_size = 16
# Path where the feature vectors will be stored
features_path = os.path.join(common_path.project_dir, 'unsplash-dataset/lite/features')
# Compute how many batches are needed
batches = math.ceil(len(photos_files) / batch_size)
# Process each batch
for i in range(batches):
print(f"Processing batch {i + 1}/{batches}")
batch_ids_path = os.path.join(features_path, f"{i:010d}.csv")
batch_features_path = os.path.join(features_path, f"{i:010d}.npy")
# Only do the processing if the batch wasn't processed yet
if not os.path.exists(batch_features_path):
try:
# Select the photos for the current batch
batch_files = photos_files[i * batch_size: (i + 1) * batch_size]
# Compute the features and save to a numpy file
batch_features = compute_clip_features(batch_files)
np.save(batch_features_path, batch_features)
# Save the photo IDs to a CSV file
photo_ids = [photo_file.name.split(".")[0] for photo_file in batch_files]
photo_ids_data = pd.DataFrame(photo_ids, columns=['photo_id'])
photo_ids_data.to_csv(batch_ids_path, index=False)
except:
# Catch problems with the processing to make the process more robust
print(f'Problem with batch {i}')
# Load all numpy files
features_list = [np.load(features_file) for features_file in sorted(Path(features_path).glob("*.npy"))]
# Concatenate the features and store in a merged file
features = np.concatenate(features_list)
np.save(os.path.join(features_path, "features.npy"), features)
# Load all the photo IDs
photo_ids = pd.concat([pd.read_csv(ids_file) for ids_file in sorted(Path(features_path).glob("*.csv"))])
photo_ids.to_csv(os.path.join(features_path, "photo_ids.csv"), index=False)
| 36.326087 | 118 | 0.718731 |
0c2caff0890d29c7f470b93cedd466717f34705f | 4,612 | py | Python | treadmill_pipeline/treadmill.py | ttngu207/project-treadmill | 55b5241b1c0b2634da8c153bf9aaeb511f28b07f | [
"MIT"
] | null | null | null | treadmill_pipeline/treadmill.py | ttngu207/project-treadmill | 55b5241b1c0b2634da8c153bf9aaeb511f28b07f | [
"MIT"
] | null | null | null | treadmill_pipeline/treadmill.py | ttngu207/project-treadmill | 55b5241b1c0b2634da8c153bf9aaeb511f28b07f | [
"MIT"
] | 4 | 2020-03-05T15:44:36.000Z | 2020-03-18T15:18:11.000Z | import numpy as np
import datajoint as dj
from treadmill_pipeline import project_database_prefix
from ephys.utilities import ingestion, time_sync
from ephys import get_schema_name
schema = dj.schema(project_database_prefix + 'treadmill_pipeline')
reference = dj.create_virtual_module('reference', get_schema_name('reference'))
acquisition = dj.create_virtual_module('acquisition', get_schema_name('acquisition'))
behavior = dj.create_virtual_module('behavior', get_schema_name('behavior'))
| 50.681319 | 130 | 0.624892 |
0c3043c88aed8f6a40aafefe3d1e9548537a28e3 | 1,324 | py | Python | management/api/v1/serializers.py | bwksoftware/cypetulip | 4ea5c56d2d48a311220e144d094280a275109316 | [
"MIT"
] | 3 | 2019-08-03T12:00:22.000Z | 2020-02-02T08:37:09.000Z | management/api/v1/serializers.py | basetwode/cypetulip | d6be294a288706c5661afb433215fe6c3ffea92b | [
"MIT"
] | 47 | 2019-08-03T16:17:41.000Z | 2022-03-11T23:15:48.000Z | management/api/v1/serializers.py | basetwode/cypetulip | d6be294a288706c5661afb433215fe6c3ffea92b | [
"MIT"
] | null | null | null | from rest_framework import serializers
from management.models.main import MailSetting, LdapSetting, ShopSetting, LegalSetting, Header, CacheSetting, Footer
| 22.066667 | 116 | 0.702417 |
0c309ee4537295e1c6db342512009ad9c9a55328 | 9,854 | py | Python | tests/test_optimal.py | craffer/fantasy-coty | 08903cb138fa1c2d160b90fc028c8ec55901040b | [
"MIT"
] | null | null | null | tests/test_optimal.py | craffer/fantasy-coty | 08903cb138fa1c2d160b90fc028c8ec55901040b | [
"MIT"
] | 2 | 2019-12-21T18:48:40.000Z | 2019-12-22T20:19:20.000Z | tests/test_optimal.py | craffer/fantasy-coty | 08903cb138fa1c2d160b90fc028c8ec55901040b | [
"MIT"
] | null | null | null | """Unit test optimal lineup functions."""
import unittest
import copy
import ff_espn_api # pylint: disable=import-error
from collections import defaultdict
from fantasy_coty.main import add_to_optimal
if __name__ == "__main__":
unittest.main()
| 46.046729 | 100 | 0.623909 |
0c31fa4744359e49cd3c719e5fe2aae79bc7f68a | 5,391 | py | Python | spark/explorer.py | Elavarasan17/Stack-Overflow-Data-Dump-Analysis | 3742a1eef17b211ddcda4bd5f41d8a8c42ec228f | [
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | spark/explorer.py | Elavarasan17/Stack-Overflow-Data-Dump-Analysis | 3742a1eef17b211ddcda4bd5f41d8a8c42ec228f | [
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | spark/explorer.py | Elavarasan17/Stack-Overflow-Data-Dump-Analysis | 3742a1eef17b211ddcda4bd5f41d8a8c42ec228f | [
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, functions, types
from pyspark.sql.functions import date_format
from pyspark.sql.functions import year, month, dayofmonth
import sys
import json
import argparse
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
# add more functions as necessary
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-posts_src", action="store", dest="posts_src", type=str)
parser.add_argument("-users_src", action="store", dest="users_src", type=str)
parser.add_argument("-tempbucket_src", action="store", dest="tempbucket_src", type=str)
parser.add_argument("-dataset_src", action="store", dest="dataset_src", type=str)
args = parser.parse_args()
posts_inputs = args.posts_src
users_inputs = args.users_src
temp_bucket_input = args.tempbucket_src
dataset_input = args.dataset_src
spark = SparkSession.builder.appName('Explorer DF').getOrCreate()
assert spark.version >= '3.0' # make sure we have Spark 3.0+
spark.sparkContext.setLogLevel('WARN')
main(posts_inputs, users_inputs,temp_bucket_input,dataset_input) | 70.012987 | 325 | 0.725654 |
0c32795d8af79fcf1c3d723adbd4971a62b457ad | 2,177 | py | Python | self_supervised/loss.py | ravidziv/self-supervised-learning | f02c1639ce3c2119afa522e400d793e741fb68a0 | [
"MIT"
] | null | null | null | self_supervised/loss.py | ravidziv/self-supervised-learning | f02c1639ce3c2119afa522e400d793e741fb68a0 | [
"MIT"
] | null | null | null | self_supervised/loss.py | ravidziv/self-supervised-learning | f02c1639ce3c2119afa522e400d793e741fb68a0 | [
"MIT"
] | null | null | null | """Contrastive loss functions."""
from functools import partial
import tensorflow as tf
LARGE_NUM = 1e9
def add_supervised_loss(labels: tf.Tensor, logits: tf.Tensor):
"""Compute mean supervised loss over local batch."""
losses = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
logits)
return tf.reduce_mean(losses)
def add_contrastive_loss(hidden: tf.Tensor, hidden_norm: bool = True,
temperature: float = 1.0):
"""Compute loss for model.
Args:
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
hidden1_large = hidden1
hidden2_large = hidden2
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
loss_a = tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ab, logits_aa], 1))
loss_b = tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ba, logits_bb], 1))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels
| 36.283333 | 81 | 0.683969 |
0c336dedc298c3448acb41a9e995e66ab5dfe2bf | 3,391 | py | Python | suzieq/engines/pandas/tables.py | zxiiro/suzieq | eca92820201c05bc80081599f69e41cd6b991107 | [
"Apache-2.0"
] | null | null | null | suzieq/engines/pandas/tables.py | zxiiro/suzieq | eca92820201c05bc80081599f69e41cd6b991107 | [
"Apache-2.0"
] | null | null | null | suzieq/engines/pandas/tables.py | zxiiro/suzieq | eca92820201c05bc80081599f69e41cd6b991107 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from suzieq.engines.pandas.engineobj import SqPandasEngine
from suzieq.sqobjects import get_sqobject
| 34.252525 | 76 | 0.529932 |
0c34007b8ed98fbad90350a4894f2960e309e1be | 3,306 | py | Python | connect_box/data.py | jtru/python-connect-box | 2d26923e966fbb319760da82e3e71103018ded0b | [
"MIT"
] | null | null | null | connect_box/data.py | jtru/python-connect-box | 2d26923e966fbb319760da82e3e71103018ded0b | [
"MIT"
] | null | null | null | connect_box/data.py | jtru/python-connect-box | 2d26923e966fbb319760da82e3e71103018ded0b | [
"MIT"
] | null | null | null | """Handle Data attributes."""
from datetime import datetime
from ipaddress import IPv4Address, IPv6Address, ip_address as convert_ip
from typing import Iterable, Union
import attr
| 24.857143 | 82 | 0.635209 |
0c34fc08f42c8637a1d795a07180d44a6de5c252 | 4,946 | py | Python | components/server/src/data_model/sources/jenkins.py | m-zakeri/quality-time | 531931f0d8d4f5d262ea98445868158e41d268da | [
"Apache-2.0"
] | null | null | null | components/server/src/data_model/sources/jenkins.py | m-zakeri/quality-time | 531931f0d8d4f5d262ea98445868158e41d268da | [
"Apache-2.0"
] | null | null | null | components/server/src/data_model/sources/jenkins.py | m-zakeri/quality-time | 531931f0d8d4f5d262ea98445868158e41d268da | [
"Apache-2.0"
] | null | null | null | """Jenkins source."""
from ..meta.entity import Color, EntityAttributeType
from ..parameters import (
access_parameters,
Days,
FailureType,
MultipleChoiceParameter,
MultipleChoiceWithAdditionParameter,
TestResult,
)
from ..meta.source import Source
def jenkins_access_parameters(*args, **kwargs):
"""Create Jenkins specific access parameters."""
kwargs["include"] = dict(private_token=False, landing_url=False)
if "name" not in kwargs.setdefault("kwargs", {}).setdefault("url", {}):
kwargs["kwargs"]["url"]["name"] = "URL to Jenkins job"
kwargs["kwargs"]["password"] = dict(
name="Password or API token for basic authentication",
help_url="https://wiki.jenkins.io/display/JENKINS/Authenticating+scripted+clients",
)
return access_parameters(*args, **kwargs)
ALL_JENKINS_METRICS = ["failed_jobs", "source_up_to_dateness", "source_version", "unused_jobs"]
JOB_ENTITY = dict(
name="job",
attributes=[
dict(name="Job name", key="name", url="url"),
dict(
name="Status of most recent build",
key="build_status",
color=dict(Success=Color.POSITIVE, Failure=Color.NEGATIVE, Aborted=Color.ACTIVE, Unstable=Color.WARNING),
),
dict(name="Date of most recent build", key="build_date", type=EntityAttributeType.DATE),
],
)
JENKINS = Source(
name="Jenkins",
description="Jenkins is an open source continuous integration/continuous deployment server.",
url="https://jenkins.io/",
parameters=dict(
inactive_days=Days(
name="Number of days without builds after which to consider CI-jobs unused.",
short_name="number of days without builds",
default_value="90",
metrics=["unused_jobs"],
),
jobs_to_include=MultipleChoiceWithAdditionParameter(
name="Jobs to include (regular expressions or job names)",
short_name="jobs to include",
help="Jobs to include can be specified by job name or by regular expression. "
"Use {parent job name}/{child job name} for the names of nested jobs.",
placeholder="all",
metrics=["failed_jobs", "source_up_to_dateness", "unused_jobs"],
),
jobs_to_ignore=MultipleChoiceWithAdditionParameter(
name="Jobs to ignore (regular expressions or job names)",
short_name="jobs to ignore",
help="Jobs to ignore can be specified by job name or by regular expression. "
"Use {parent job name}/{child job name} for the names of nested jobs.",
metrics=["failed_jobs", "source_up_to_dateness", "unused_jobs"],
),
result_type=MultipleChoiceParameter(
name="Build result types",
short_name="result types",
help="Limit which build result types to include.",
placeholder="all result types",
values=["Aborted", "Failure", "Not built", "Success", "Unstable"],
metrics=["source_up_to_dateness"],
),
failure_type=FailureType(values=["Aborted", "Failure", "Not built", "Unstable"]),
**jenkins_access_parameters(
ALL_JENKINS_METRICS,
kwargs=dict(
url=dict(
name="URL",
help="URL of the Jenkins instance, with port if necessary, but without path. For example, "
"'https://jenkins.example.org'.",
)
),
)
),
entities=dict(
failed_jobs=JOB_ENTITY,
source_up_to_dateness=JOB_ENTITY,
unused_jobs=JOB_ENTITY,
),
)
ALL_JENKINS_TEST_REPORT_METRICS = ["source_up_to_dateness", "tests"]
JENKINS_TEST_REPORT = Source(
name="Jenkins test report",
description="A Jenkins job with test results.",
url="https://plugins.jenkins.io/junit",
parameters=dict(
test_result=TestResult(values=["failed", "passed", "skipped"]),
**jenkins_access_parameters(
ALL_JENKINS_TEST_REPORT_METRICS,
kwargs=dict(
url=dict(
help="URL to a Jenkins job with a test report generated by the JUnit plugin. For example, "
"'https://jenkins.example.org/job/test' or https://jenkins.example.org/job/test/job/master' "
"in case of a pipeline job."
)
),
)
),
entities=dict(
tests=dict(
name="test",
attributes=[
dict(name="Class name"),
dict(name="Test case", key="name"),
dict(
name="Test result", color=dict(failed=Color.NEGATIVE, passed=Color.POSITIVE, skipped=Color.WARNING)
),
dict(name="Number of builds the test has been failing", key="age", type=EntityAttributeType.INTEGER),
],
)
),
)
| 38.640625 | 119 | 0.603114 |
0c3529b1f848cd4aef129f241e7149c8c46fd8c6 | 3,155 | py | Python | tests/test_transducer.py | dandersonw/myouji-kenchi | 6a373d8626995bf5d4383dcac4fc8e6372135640 | [
"MIT"
] | 7 | 2019-10-22T10:09:12.000Z | 2022-01-31T07:49:07.000Z | tests/test_transducer.py | dandersonw/myouji-kenchi | 6a373d8626995bf5d4383dcac4fc8e6372135640 | [
"MIT"
] | null | null | null | tests/test_transducer.py | dandersonw/myouji-kenchi | 6a373d8626995bf5d4383dcac4fc8e6372135640 | [
"MIT"
] | 1 | 2021-07-09T18:10:17.000Z | 2021-07-09T18:10:17.000Z | import myouji_kenchi
# Given that the output depends on what goes into the attested myouji file I'm
# hesitant to write too many tests in the blast radius of changes to that file
| 44.43662 | 78 | 0.66878 |
0c36ba1ececdebf56f9ae6696bc5d261578450ca | 1,668 | py | Python | algorithms/named/mergesort.py | thundergolfer/uni | e604d1edd8e5085f0ae1c0211015db38c07fc926 | [
"MIT"
] | 1 | 2022-01-06T04:50:09.000Z | 2022-01-06T04:50:09.000Z | algorithms/named/mergesort.py | thundergolfer/uni | e604d1edd8e5085f0ae1c0211015db38c07fc926 | [
"MIT"
] | 1 | 2022-01-23T06:09:21.000Z | 2022-01-23T06:14:17.000Z | algorithms/named/mergesort.py | thundergolfer/uni | e604d1edd8e5085f0ae1c0211015db38c07fc926 | [
"MIT"
] | null | null | null | import unittest
from typing import List, Optional
# Consider making generic with:
# https://stackoverflow.com/a/47970232/4885590
if __name__ == "__main__":
x = [10, 5, 9, 10, 3]
print(x)
merge_sort(x)
print(x)
unittest.main()
| 24.895522 | 80 | 0.576739 |
0c380570b168add317dd67b7037f3b6ec7e93c2b | 392 | py | Python | pages/main_page.py | thaidem/selenium-training-page-objects | 1f37a2b5287a502295bb57050c95455d68c2d3eb | [
"Apache-2.0"
] | null | null | null | pages/main_page.py | thaidem/selenium-training-page-objects | 1f37a2b5287a502295bb57050c95455d68c2d3eb | [
"Apache-2.0"
] | null | null | null | pages/main_page.py | thaidem/selenium-training-page-objects | 1f37a2b5287a502295bb57050c95455d68c2d3eb | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.support.wait import WebDriverWait
| 23.058824 | 67 | 0.670918 |
0c39dce08e2639d2b8e9721a52545154b1694858 | 196 | py | Python | src/frr/tests/lib/test_nexthop_iter.py | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | src/frr/tests/lib/test_nexthop_iter.py | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | src/frr/tests/lib/test_nexthop_iter.py | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | import frrtest
TestNexthopIter.onesimple("Simple test passed.")
TestNexthopIter.onesimple("PRNG test passed.")
| 19.6 | 48 | 0.785714 |
0c3b154bc332d251c3e35e98a56001cf94c27a53 | 1,319 | py | Python | setup.py | themis-project/themis-finals-checker-app-py | 12e70102bcca3d6e4082d96e676e364176c0da67 | [
"MIT"
] | null | null | null | setup.py | themis-project/themis-finals-checker-app-py | 12e70102bcca3d6e4082d96e676e364176c0da67 | [
"MIT"
] | null | null | null | setup.py | themis-project/themis-finals-checker-app-py | 12e70102bcca3d6e4082d96e676e364176c0da67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import io
import os
about = {}
about_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'themis', 'finals', 'checker', 'app', '__about__.py')
with io.open(about_filename, 'rb') as fp:
exec(fp.read(), about)
setup(
name='themis.finals.checker.app',
version=about['__version__'],
description='Themis Finals checker application',
author='Alexander Pyatkin',
author_email='aspyatkin@gmail.com',
url='https://github.com/themis-project/themis-finals-checker-app-py',
license='MIT',
packages=find_packages('.'),
install_requires=[
'setuptools>=0.8',
'Flask>=0.11.1,<0.12',
'redis>=2.10.5,<2.11',
'hiredis>=0.2.0,<0.3',
'rq>=0.7.1,<0.8.0',
'requests>=2.11.0',
'python-dateutil>=2.5.3,<2.6',
'themis.finals.checker.result==1.1.0',
'raven>=5.26.0,<5.27.0',
'PyJWT>=1.5.0,<1.6.0',
'cryptography>=1.8.1,<1.9.0',
'PyYAML>=3.11'
],
namespace_packages=[
'themis',
'themis.finals',
'themis.finals.checker'
],
entry_points=dict(
console_scripts=[
'themis-finals-checker-app-worker = themis.finals.checker.app:start_worker'
]
)
)
| 26.918367 | 87 | 0.581501 |
0c3b1affbabd1c858deb93d0a0302a8d675091d1 | 8,090 | py | Python | tools/xenserver/cleanup_sm_locks.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | tools/xenserver/cleanup_sm_locks.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | tools/xenserver/cleanup_sm_locks.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'#!/usr/bin/env python'
nl|'\n'
nl|'\n'
comment|'# Copyright 2013 OpenStack Foundation'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License");'
nl|'\n'
comment|'# you may not use this file except in compliance with the License.'
nl|'\n'
comment|'# You may obtain a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS,'
nl|'\n'
comment|'# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'
nl|'\n'
comment|'# See the License for the specific language governing permissions and'
nl|'\n'
comment|'# limitations under the License.'
nl|'\n'
string|'"""\nScript to cleanup old XenServer /var/lock/sm locks.\n\nXenServer 5.6 and 6.0 do not appear to always cleanup locks when using a\nFileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998)\nlocks laying around, builds will begin to fail because we can\'t create any\nadditional locks. This cleanup script is something we can run periodically as\na stop-gap measure until this is fixed upstream.\n\nThis script should be run on the dom0 of the affected machine.\n"""'
newline|'\n'
name|'import'
name|'errno'
newline|'\n'
name|'import'
name|'optparse'
newline|'\n'
name|'import'
name|'os'
newline|'\n'
name|'import'
name|'sys'
newline|'\n'
name|'import'
name|'time'
newline|'\n'
nl|'\n'
DECL|variable|BASE
name|'BASE'
op|'='
string|"'/var/lock/sm'"
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_get_age_days
name|'def'
name|'_get_age_days'
op|'('
name|'secs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'float'
op|'('
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
name|'secs'
op|')'
op|'/'
number|'86400'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_parse_args
dedent|''
name|'def'
name|'_parse_args'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'parser'
op|'='
name|'optparse'
op|'.'
name|'OptionParser'
op|'('
op|')'
newline|'\n'
name|'parser'
op|'.'
name|'add_option'
op|'('
string|'"-d"'
op|','
string|'"--dry-run"'
op|','
nl|'\n'
name|'action'
op|'='
string|'"store_true"'
op|','
name|'dest'
op|'='
string|'"dry_run"'
op|','
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
name|'help'
op|'='
string|'"don\'t actually remove locks"'
op|')'
newline|'\n'
name|'parser'
op|'.'
name|'add_option'
op|'('
string|'"-l"'
op|','
string|'"--limit"'
op|','
nl|'\n'
name|'action'
op|'='
string|'"store"'
op|','
name|'type'
op|'='
string|"'int'"
op|','
name|'dest'
op|'='
string|'"limit"'
op|','
nl|'\n'
name|'default'
op|'='
name|'sys'
op|'.'
name|'maxint'
op|','
nl|'\n'
name|'help'
op|'='
string|'"max number of locks to delete (default: no limit)"'
op|')'
newline|'\n'
name|'parser'
op|'.'
name|'add_option'
op|'('
string|'"-v"'
op|','
string|'"--verbose"'
op|','
nl|'\n'
name|'action'
op|'='
string|'"store_true"'
op|','
name|'dest'
op|'='
string|'"verbose"'
op|','
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
name|'help'
op|'='
string|'"don\'t print status messages to stdout"'
op|')'
newline|'\n'
nl|'\n'
name|'options'
op|','
name|'args'
op|'='
name|'parser'
op|'.'
name|'parse_args'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'days_old'
op|'='
name|'int'
op|'('
name|'args'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
name|'except'
op|'('
name|'IndexError'
op|','
name|'ValueError'
op|')'
op|':'
newline|'\n'
indent|' '
name|'parser'
op|'.'
name|'print_help'
op|'('
op|')'
newline|'\n'
name|'sys'
op|'.'
name|'exit'
op|'('
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'options'
op|','
name|'days_old'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|main
dedent|''
name|'def'
name|'main'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'options'
op|','
name|'days_old'
op|'='
name|'_parse_args'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'BASE'
op|')'
op|':'
newline|'\n'
indent|' '
name|'print'
op|'>>'
name|'sys'
op|'.'
name|'stderr'
op|','
string|'"error: \'%s\' doesn\'t exist. Make sure you\'re"'
string|'" running this on the dom0."'
op|'%'
name|'BASE'
newline|'\n'
name|'sys'
op|'.'
name|'exit'
op|'('
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'lockpaths_removed'
op|'='
number|'0'
newline|'\n'
name|'nspaths_removed'
op|'='
number|'0'
newline|'\n'
nl|'\n'
name|'for'
name|'nsname'
name|'in'
name|'os'
op|'.'
name|'listdir'
op|'('
name|'BASE'
op|')'
op|'['
op|':'
name|'options'
op|'.'
name|'limit'
op|']'
op|':'
newline|'\n'
indent|' '
name|'nspath'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'BASE'
op|','
name|'nsname'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'os'
op|'.'
name|'path'
op|'.'
name|'isdir'
op|'('
name|'nspath'
op|')'
op|':'
newline|'\n'
indent|' '
name|'continue'
newline|'\n'
nl|'\n'
comment|'# Remove old lockfiles'
nl|'\n'
dedent|''
name|'removed'
op|'='
number|'0'
newline|'\n'
name|'locknames'
op|'='
name|'os'
op|'.'
name|'listdir'
op|'('
name|'nspath'
op|')'
newline|'\n'
name|'for'
name|'lockname'
name|'in'
name|'locknames'
op|':'
newline|'\n'
indent|' '
name|'lockpath'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'nspath'
op|','
name|'lockname'
op|')'
newline|'\n'
name|'lock_age_days'
op|'='
name|'_get_age_days'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'getmtime'
op|'('
name|'lockpath'
op|')'
op|')'
newline|'\n'
name|'if'
name|'lock_age_days'
op|'>'
name|'days_old'
op|':'
newline|'\n'
indent|' '
name|'lockpaths_removed'
op|'+='
number|'1'
newline|'\n'
name|'removed'
op|'+='
number|'1'
newline|'\n'
nl|'\n'
name|'if'
name|'options'
op|'.'
name|'verbose'
op|':'
newline|'\n'
indent|' '
name|'print'
string|"'Removing old lock: %03d %s'"
op|'%'
op|'('
name|'lock_age_days'
op|','
nl|'\n'
name|'lockpath'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'options'
op|'.'
name|'dry_run'
op|':'
newline|'\n'
indent|' '
name|'os'
op|'.'
name|'unlink'
op|'('
name|'lockpath'
op|')'
newline|'\n'
nl|'\n'
comment|'# Remove empty namespace paths'
nl|'\n'
dedent|''
dedent|''
dedent|''
name|'if'
name|'len'
op|'('
name|'locknames'
op|')'
op|'=='
name|'removed'
op|':'
newline|'\n'
indent|' '
name|'nspaths_removed'
op|'+='
number|'1'
newline|'\n'
nl|'\n'
name|'if'
name|'options'
op|'.'
name|'verbose'
op|':'
newline|'\n'
indent|' '
name|'print'
string|"'Removing empty namespace: %s'"
op|'%'
name|'nspath'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'options'
op|'.'
name|'dry_run'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'os'
op|'.'
name|'rmdir'
op|'('
name|'nspath'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'OSError'
op|','
name|'e'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'e'
op|'.'
name|'errno'
op|'=='
name|'errno'
op|'.'
name|'ENOTEMPTY'
op|':'
newline|'\n'
indent|' '
name|'print'
op|'>>'
name|'sys'
op|'.'
name|'stderr'
op|','
string|'"warning: directory \'%s\'"'
string|'" not empty"'
op|'%'
name|'nspath'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'raise'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
dedent|''
dedent|''
name|'if'
name|'options'
op|'.'
name|'dry_run'
op|':'
newline|'\n'
indent|' '
name|'print'
string|'"** Dry Run **"'
newline|'\n'
nl|'\n'
dedent|''
name|'print'
string|'"Total locks removed: "'
op|','
name|'lockpaths_removed'
newline|'\n'
name|'print'
string|'"Total namespaces removed: "'
op|','
name|'nspaths_removed'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
name|'if'
name|'__name__'
op|'=='
string|"'__main__'"
op|':'
newline|'\n'
indent|' '
name|'main'
op|'('
op|')'
newline|'\n'
dedent|''
endmarker|''
end_unit
| 13.758503 | 495 | 0.591595 |
0c3c5abd70c1f21c01879c6ec3f584ca3464ae2e | 13,324 | py | Python | clifun.py | tdimiduk/clifun | d7e5acae0a76506d9440ae86a15341b6cc1cf25e | [
"MIT"
] | 1 | 2022-01-04T17:58:19.000Z | 2022-01-04T17:58:19.000Z | clifun.py | tdimiduk/clifun | d7e5acae0a76506d9440ae86a15341b6cc1cf25e | [
"MIT"
] | 4 | 2022-01-04T17:17:33.000Z | 2022-01-04T17:26:12.000Z | clifun.py | tdimiduk/clifun | d7e5acae0a76506d9440ae86a15341b6cc1cf25e | [
"MIT"
] | null | null | null | import datetime as dt
import importlib.util
import inspect
import itertools
import json
import os
import pathlib
import sys
import types
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
S = TypeVar("S")
T = TypeVar("T")
O = TypeVar("O", Any, None)
StringInterpreters = Dict[Type[T], Callable[[str], T]]
def call(
c: Callable[..., T],
args: Optional[List[str]] = None,
string_interpreters: Optional[StringInterpreters] = None,
) -> T:
"""
Call a function from the command line
Assembles the inputs to a function from command line arguments, environment variables, and config files and call it.
"""
argv = sys.argv if args is None else args
interpreters = (
string_interpreters
if string_interpreters is not None
else default_string_interpreters()
)
annotated = annotate_callable(c, interpreters, [])
provided_inputs = assemble_input_sources(argv)
if provided_inputs.args.help:
print_usage(annotated, header=True)
sys.exit(0)
needed_inputs = all_needed_inputs(annotated)
unknown = invalid_args(provided_inputs.args.keyword.keys(), needed_inputs)
if unknown:
print(f"Unknown arguments: {unknown}")
print_usage(annotated)
sys.exit(1)
resolved_inputs, missing_inputs = resolve_inputs(needed_inputs, provided_inputs)
if missing_inputs:
print(f"Missing arguments: {missing_inputs}")
print_usage(annotated)
sys.exit(1)
return annotated(resolved_inputs)
################################################################################
# Interpreting strings into python types
################################################################################
def interpret_bool(s: str) -> bool:
"""
Slightly more intuitive bool iterpretation
Raw python's `bool("false")==True` since it is a non-empty string
"""
if s.lower() in {"t", "true", "yes", "y"}:
return True
elif s.lower() in {"f", "false", "no", "n"}:
return False
else:
raise InterpretationError(s, bool)
def interpret_datetime(s: str) -> dt.datetime:
"""
Date and time in isoformat
"""
if hasattr(dt.datetime, "fromisoformat"):
return dt.datetime.fromisoformat(s)
else:
# for python 3.6 where `fromisoformat` doesn't exist
import isodate # type: ignore
return isodate.parse_datetime(s)
def interpret_date(s: str) -> dt.date:
"""
Dates in YYYY-MM-DD format
"""
return dt.date(*[int(i) for i in s.split("-")])
################################################################################
# Data classes
#
# these should really be dataclasses, and will be converted when clifun drops compatability
# with python 3.6
################################################################################
Annotated = Union["AnnotatedParameter", "AnnotatedCallable"]
def __str__(self) -> str:
return f"<parameter: {self.name}: {self.t}>"
class InputSources:
def __init__(self, args: Arguments, config_files: ConfigFiles):
self.args = args
self.config_files = config_files
################################################################################
# Assemble inputs from the "outside world"
################################################################################
NOT_SPECIFIED = inspect._empty
################################################################################
# Input validation and help
################################################################################
################################################################################
# Determine what inputs a function needs
################################################################################
################################################################################
# Make clifun.py usable as a script to call functions in any module
################################################################################
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) < 3:
print("Usage: clifun.py path_to_module function_name ...")
sys.exit(1)
target = pathlib.Path(sys.argv[1]).resolve()
function_name = sys.argv[2]
arguments = sys.argv[2:]
module = import_module_by_path(target)
function = getattr(module, function_name)
print(call(function, arguments))
| 29.283516 | 120 | 0.590513 |
0c3c8f51e10f9073a8d53d99be68ca016464578d | 2,758 | py | Python | pages/views.py | joshua-hashimoto/eigo-of-the-day-django | 68ec7fe4257c67689de596cf34e991a3750b7f36 | [
"MIT"
] | null | null | null | pages/views.py | joshua-hashimoto/eigo-of-the-day-django | 68ec7fe4257c67689de596cf34e991a3750b7f36 | [
"MIT"
] | 8 | 2021-04-08T19:45:15.000Z | 2022-03-12T00:49:25.000Z | pages/views.py | joshua-hashimoto/eigo-of-the-day-django | 68ec7fe4257c67689de596cf34e991a3750b7f36 | [
"MIT"
] | null | null | null | import os
import json
import uuid
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
import cloudinary
| 34.049383 | 85 | 0.591008 |
0c3e673531e09903ae71e40dc82ffb45887a73df | 1,776 | py | Python | shc/log/in_memory.py | fabaff/smarthomeconnect | 611cd0f372d03b5fc5798a2a9a5f962d1da72799 | [
"Apache-2.0"
] | 5 | 2021-07-02T21:48:45.000Z | 2021-12-12T21:55:42.000Z | shc/log/in_memory.py | fabaff/smarthomeconnect | 611cd0f372d03b5fc5798a2a9a5f962d1da72799 | [
"Apache-2.0"
] | 49 | 2020-09-18T20:05:55.000Z | 2022-03-05T19:51:33.000Z | shc/log/in_memory.py | fabaff/smarthomeconnect | 611cd0f372d03b5fc5798a2a9a5f962d1da72799 | [
"Apache-2.0"
] | 1 | 2021-12-10T14:50:43.000Z | 2021-12-10T14:50:43.000Z | import datetime
from typing import Optional, Type, Generic, List, Tuple
from ..base import T
from .generic import PersistenceVariable
| 36.244898 | 96 | 0.606419 |
0c3ec0f29f7bce414073cc341dd9839fbf5fca06 | 1,393 | py | Python | guts/api/contrib/type_actions.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/api/contrib/type_actions.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/api/contrib/type_actions.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | 1 | 2022-03-03T05:41:31.000Z | 2022-03-03T05:41:31.000Z | # Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from guts.api import extensions
from guts.api.openstack import wsgi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('types', '')
| 29.638298 | 78 | 0.724336 |
0c4122d4f0b749136bdf171cdb6e696eecf404bd | 8,579 | py | Python | models/StyleTransfer/AdaIN.py | mtroym/pytorch-train | 3b303b6c7e364a58cb88d7142da942a30cc2b255 | [
"Apache-2.0"
] | 2 | 2019-12-21T14:40:11.000Z | 2020-05-26T09:26:52.000Z | models/StyleTransfer/AdaIN.py | mtroym/pytorch-train | 3b303b6c7e364a58cb88d7142da942a30cc2b255 | [
"Apache-2.0"
] | null | null | null | models/StyleTransfer/AdaIN.py | mtroym/pytorch-train | 3b303b6c7e364a58cb88d7142da942a30cc2b255 | [
"Apache-2.0"
] | 1 | 2020-10-16T12:03:19.000Z | 2020-10-16T12:03:19.000Z | """
Author: Yiming Mao - mtroym@github
Description: Transplant from "https://github.com/xunhuang1995/AdaIN-style/blob/master/train.lua"
"""
import functools
import os
from collections import OrderedDict
import torch
import torch.nn as nn
from torchvision.models import vgg19
from datasets.utils import denorm
from models.blocks import AdaptiveInstanceNorm2d
from models.blocks.vgg import rename_sequential
from models.helpers import init_weights
if __name__ == '__main__':
bs = 10
w, h = 128, 128
image = torch.rand((bs, 3, w, h))
# g = _Generator_ResizeConv()
e = _Encoder()
d = _Decoder(e)
adain = AdaptiveInstanceNorm2d(e.out_channels)
te = adain(e(image)["relu4_1"], e(image)["relu4_1"])
print(d)
print(d(te).shape)
# print(e(image).shape)
# print(d(e(image)).shape)
# print(.out_channels)
# fak = g(z)
# print(fak.shape)
# print(d(fak).shape)
| 40.852381 | 115 | 0.602518 |
0c418b56746d824c2d98f37af03cc0b209cd7415 | 1,099 | py | Python | airflow/migrations/versions/52d714495f0_job_id_indices.py | rubeshdcube/incubator-airflow | 5419fbb78a2ea2388456c356d2f899ea1991b2de | [
"Apache-2.0"
] | 6 | 2016-04-20T20:40:43.000Z | 2022-02-20T10:32:00.000Z | airflow/migrations/versions/52d714495f0_job_id_indices.py | curest0x1021/incubator-airflow | e6d3160a061dbaa6042d524095dcd1cbc15e0bcd | [
"Apache-2.0"
] | 13 | 2018-11-30T18:18:32.000Z | 2021-02-19T17:04:12.000Z | airflow/migrations/versions/52d714495f0_job_id_indices.py | curest0x1021/incubator-airflow | e6d3160a061dbaa6042d524095dcd1cbc15e0bcd | [
"Apache-2.0"
] | 9 | 2017-08-24T15:47:44.000Z | 2022-02-14T03:30:49.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""job_id indices
Revision ID: 52d714495f0
Revises: 338e90f54d61
Create Date: 2015-10-20 03:17:01.962542
"""
# revision identifiers, used by Alembic.
revision = '52d714495f0'
down_revision = '338e90f54d61'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
| 27.475 | 98 | 0.755232 |
0c42822d78adfa0e7f0264f5d356cb0270939941 | 7,836 | py | Python | deep_semantic_similarity_keras.py | viksit/Deep-Semantic-Similarity-Model | 1dc94346801e711125fb573284a1984ce17fb90e | [
"MIT"
] | 3 | 2016-05-26T00:04:38.000Z | 2019-10-22T09:52:39.000Z | deep_semantic_similarity_keras.py | viksit/Deep-Semantic-Similarity-Model | 1dc94346801e711125fb573284a1984ce17fb90e | [
"MIT"
] | null | null | null | deep_semantic_similarity_keras.py | viksit/Deep-Semantic-Similarity-Model | 1dc94346801e711125fb573284a1984ce17fb90e | [
"MIT"
] | 1 | 2019-10-22T09:59:04.000Z | 2019-10-22T09:59:04.000Z | # Michael A. Alcorn (malcorn@redhat.com)
# An implementation of the Deep Semantic Similarity Model (DSSM) found in [1].
# [1] Shen, Y., He, X., Gao, J., Deng, L., and Mesnil, G. 2014. A latent semantic model
# with convolutional-pooling structure for information retrieval. In CIKM, pp. 101-110.
# http://research.microsoft.com/pubs/226585/cikm2014_cdssm_final.pdf
# [2] http://research.microsoft.com/en-us/projects/dssm/
# [3] http://research.microsoft.com/pubs/238873/wsdm2015.v3.pdf
import numpy as np
from keras import backend
from keras.layers import Input, merge
from keras.layers.core import Dense, Lambda, Reshape
from keras.layers.convolutional import Convolution1D
from keras.models import Model
def R(vects):
"""
Calculates the cosine similarity of two vectors.
:param vects: a list of two vectors.
:return: the cosine similarity of two vectors.
"""
(x, y) = vects
return backend.dot(x, backend.transpose(y)) / (x.norm(2) * y.norm(2)) # See equation (4)
LETTER_GRAM_SIZE = 3 # See section 3.2.
WINDOW_SIZE = 3 # See section 3.2.
TOTAL_LETTER_GRAMS = int(3 * 1e4) # Determined from data. See section 3.2.
WORD_DEPTH = WINDOW_SIZE * TOTAL_LETTER_GRAMS # See equation (1).
K = 300 # Dimensionality of the max-pooling layer. See section 3.4.
L = 128 # Dimensionality of latent semantic space. See section 3.5.
J = 4 # Number of random unclicked documents serving as negative examples for a query. See section 4.
FILTER_LENGTH = 1 # We only consider one time step for convolutions.
# Input tensors holding the query, positive (clicked) document, and negative (unclicked) documents.
# The first dimension is None because the queries and documents can vary in length.
query = Input(shape = (None, WORD_DEPTH))
pos_doc = Input(shape = (None, WORD_DEPTH))
neg_docs = [Input(shape = (None, WORD_DEPTH)) for j in range(J)]
# Query model. The paper uses separate neural nets for queries and documents (see section 5.2).
# In this step, we transform each word vector with WORD_DEPTH dimensions into its
# convolved representation with K dimensions. K is the number of kernels/filters
# being used in the operation. Essentially, the operation is taking the dot product
# of a single weight matrix (W_c) with each of the word vectors (l_t) from the
# query matrix (l_Q), adding a bias vector (b_c), and then applying the tanh function.
# That is, h_Q = tanh(W_c l_Q + b_c). With that being said, that's not actually
# how the operation is being calculated here. To tie the weights of the weight
# matrix (W_c) together, we have to use a one-dimensional convolutional layer.
# Further, we have to transpose our query matrix (l_Q) so that time is the first
# dimension rather than the second (as described in the paper). That is, l_Q[0, :]
# represents our first word vector rather than l_Q[:, 0]. We can think of the weight
# matrix (W_c) as being similarly transposed such that each kernel is a column
# of W_c. Therefore, h_Q = tanh(l_Q W_c + b_c) with l_Q, W_c, and b_c being
# the transposes of the matrices described in the paper.
query_conv = Convolution1D(K, FILTER_LENGTH, border_mode = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")(query) # See equation (2).
# Next, we apply a max-pooling layer to the convolved query matrix. Keras provides
# its own max-pooling layers, but they cannot handle variable length input (as
# far as I can tell). As a result, I define my own max-pooling layer here. In the
# paper, the operation selects the maximum value for each row of h_Q, but, because
# we're using the transpose, we're selecting the maximum value for each column.
query_max = Lambda(lambda x: x.max(axis = 1), output_shape = (K,))(query_conv) # See section 3.4.
# In this step, we generate the semantic vector represenation of the query. This
# is a standard neural network dense layer, i.e., y = tanh(W_s v + b_s).
query_sem = Dense(L, activation = "tanh", input_dim = K)(query_max) # See section 3.5.
# The document equivalent of the above query model.
doc_conv = Convolution1D(K, FILTER_LENGTH, border_mode = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")
doc_max = Lambda(lambda x: x.max(axis = 1), output_shape = (K,))
doc_sem = Dense(L, activation = "tanh", input_dim = K)
pos_doc_conv = doc_conv(pos_doc)
neg_doc_convs = [doc_conv(neg_doc) for neg_doc in neg_docs]
pos_doc_max = doc_max(pos_doc_conv)
neg_doc_maxes = [doc_max(neg_doc_conv) for neg_doc_conv in neg_doc_convs]
pos_doc_sem = doc_sem(pos_doc_max)
neg_doc_sems = [doc_sem(neg_doc_max) for neg_doc_max in neg_doc_maxes]
# This layer calculates the cosine similarity between the semantic representations of
# a query and a document.
R_layer = Lambda(R, output_shape = (1,)) # See equation (4).
R_Q_D_p = R_layer([query_sem, pos_doc_sem]) # See equation (4).
R_Q_D_ns = [R_layer([query_sem, neg_doc_sem]) for neg_doc_sem in neg_doc_sems] # See equation (4).
concat_Rs = merge([R_Q_D_p] + R_Q_D_ns, mode = "concat")
concat_Rs = Reshape((J + 1, 1))(concat_Rs)
# In this step, we multiply each R(Q, D) value by gamma. In the paper, gamma is
# described as a smoothing factor for the softmax function, and it's set empirically
# on a held-out data set. We're going to learn gamma's value by pretending it's
# a single, 1 x 1 kernel.
with_gamma = Convolution1D(1, 1, border_mode = "same", input_shape = (J + 1, 1), activation = "linear")(concat_Rs) # See equation (5).
# Next, we exponentiate each of the gamma x R(Q, D) values.
exponentiated = Lambda(lambda x: backend.exp(x), output_shape = (J + 1,))(with_gamma) # See equation (5).
exponentiated = Reshape((J + 1,))(exponentiated)
# Finally, we use the softmax function to calculate the P(D+|Q).
prob = Lambda(lambda x: x[0][0] / backend.sum(x[0]), output_shape = (1,))(exponentiated) # See equation (5).
# We now have everything we need to define our model.
model = Model(input = [query, pos_doc] + neg_docs, output = prob)
model.compile(optimizer = "adadelta", loss = "binary_crossentropy")
# Build a random data set.
sample_size = 10
l_Qs = []
pos_l_Ds = []
for i in range(sample_size):
query_len = np.random.randint(1, 10)
l_Q = np.random.rand(1, query_len, WORD_DEPTH)
l_Qs.append(l_Q)
doc_len = np.random.randint(50, 500)
l_D = np.random.rand(1, doc_len, WORD_DEPTH)
pos_l_Ds.append(l_D)
neg_l_Ds = []
for i in range(sample_size):
possibilities = list(range(sample_size))
possibilities.remove(i)
negatives = np.random.choice(possibilities, J)
neg_l_Ds.append([pos_l_Ds[negative] for negative in negatives])
# Because we're using the "binary_crossentropy" loss function, we can pretend that
# we're dealing with a binary classification problem and that every sample is a
# member of the "1" class.
y = np.ones(1)
for i in range(sample_size):
history = model.fit([l_Qs[i], pos_l_Ds[i]] + neg_l_Ds[i], y, nb_epoch = 1, verbose = 0)
# Here, I walk through an example of how to define a function for calculating output
# from the computational graph. Let's define a function that calculates R(Q, D+)
# for a given query and clicked document. The function depends on two inputs, query
# and pos_doc. That is, if you start at the point in the graph where R(Q, D+) is
# calculated and then backtrack as far as possible, you'll end up at two different
# starting points, query and pos_doc. As a result, we supply those inputs in a list
# to the function. This particular function only calculates a single output, but
# multiple outputs are possible (see the next example).
get_R_Q_D_p = backend.function([query, pos_doc], R_Q_D_p)
get_R_Q_D_p([l_Qs[0], pos_l_Ds[0]])
# A slightly more complex function. Notice that both neg_docs and the output are
# lists.
get_R_Q_D_ns = backend.function([query] + neg_docs, R_Q_D_ns)
get_R_Q_D_ns([l_Qs[0]] + neg_l_Ds[0])
| 49.910828 | 148 | 0.732006 |
0c4483174d1c4ff711dd1bd4cb802a150131d7f7 | 469 | py | Python | posthog/migrations/0087_fix_annotation_created_at.py | avoajaugochukwu/posthog | 7e7fd42b0542ebc4734aedb926df11d462e3dd4f | [
"MIT"
] | 7,409 | 2020-02-09T23:18:10.000Z | 2022-03-31T22:36:25.000Z | posthog/migrations/0087_fix_annotation_created_at.py | avoajaugochukwu/posthog | 7e7fd42b0542ebc4734aedb926df11d462e3dd4f | [
"MIT"
] | 5,709 | 2020-02-09T23:26:13.000Z | 2022-03-31T20:20:01.000Z | posthog/migrations/0087_fix_annotation_created_at.py | avoajaugochukwu/posthog | 7e7fd42b0542ebc4734aedb926df11d462e3dd4f | [
"MIT"
] | 647 | 2020-02-13T17:50:55.000Z | 2022-03-31T11:24:19.000Z | # Generated by Django 3.0.7 on 2020-10-14 07:46
import django.utils.timezone
from django.db import migrations, models
| 23.45 | 85 | 0.648188 |
0c476cbc9139db2d5b5477a2919a3f47a83b94b5 | 4,723 | py | Python | tumorevo/tumorfig/main.py | pedrofale/tumorevo | cf43f3854f6815c822cf4df71be82fc6dbae065b | [
"MIT"
] | 2 | 2022-02-08T12:54:58.000Z | 2022-03-04T12:21:06.000Z | tumorevo/tumorfig/main.py | pedrofale/tumorevo | cf43f3854f6815c822cf4df71be82fc6dbae065b | [
"MIT"
] | null | null | null | tumorevo/tumorfig/main.py | pedrofale/tumorevo | cf43f3854f6815c822cf4df71be82fc6dbae065b | [
"MIT"
] | null | null | null | """
Create a cartoon of a tumor given the frequencies of different genotypes.
"""
from .util import *
import pandas as pd
import matplotlib.pyplot as plt
import click
import os
from pathlib import Path
from pymuller import muller
if __name__ == "__main__":
main()
| 28.624242 | 85 | 0.547957 |
0c48b673acc0ea7efa42fafb3fba6d032e5deab7 | 196 | py | Python | src/brouwers/online_users/urls.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 6 | 2015-03-03T13:23:07.000Z | 2021-12-19T18:12:41.000Z | src/brouwers/online_users/urls.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 95 | 2015-02-07T00:55:39.000Z | 2022-02-08T20:22:05.000Z | src/brouwers/online_users/urls.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 2 | 2016-03-22T16:53:26.000Z | 2019-02-09T22:46:04.000Z | from django.conf.urls import url
from .views import get_online_users, set_online
app_name = 'online_users'
urlpatterns = [
url(r'^so/$', set_online),
url(r'^ous/$', get_online_users),
]
| 19.6 | 47 | 0.704082 |
0c49d08e42802a84e6d6315644d21f43e88ce921 | 5,881 | py | Python | dftb+/plot_spline.py | hsulab/DailyScripts | 26b03cfb721fd66f39c86df50d2ec5866e651d6e | [
"MIT"
] | 2 | 2020-06-08T21:39:44.000Z | 2020-10-18T15:12:47.000Z | dftb+/plot_spline.py | hsulab/DailyScripts | 26b03cfb721fd66f39c86df50d2ec5866e651d6e | [
"MIT"
] | null | null | null | dftb+/plot_spline.py | hsulab/DailyScripts | 26b03cfb721fd66f39c86df50d2ec5866e651d6e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg') #silent mode
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, InsetPosition, zoomed_inset_axes, mark_inset
MAXLINE = 10000
def plot_spline(skf='Pt-Pt.skf', skf2=None, rmin=1.0, pic='spl.png'):
"""Plot the Spline Repulsive Potential..."""
# read spline, turn into spline object
SP_rep1 = read_spline(skf)
# generate data
rs = np.linspace(0.,SP_rep1.cutoff-0.01,1000)
reps = []
for r in rs:
reps.append(SP_rep1.calc_rep(r))
skf_name = os.path.basename(skf).split('.')[0]
rs = np.array(rs)
reps = np.array(reps)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12,8))
ax.set_title(r'$%s$ Spline Repulsive Potential' %skf_name, \
fontsize=24, fontweight='bold')
ax.set_xlabel(r'$r$ / Bohr', fontsize=20)
ax.set_ylabel(r'$V_{rep}(r)$ / Hartree', fontsize=20)
skf1_curve, = ax.plot(rs, reps, \
color='g', linestyle='-', linewidth=2., \
label='Skf-1')
# inset figure
ax2 = plt.axes([0,0,1,1])
ip = InsetPosition(ax, [0.4,0.2,0.5,0.5])
ax2.set_axes_locator(ip)
mark_inset(ax, ax2, loc1=1, loc2=3, fc="none", ec='0.5')
#ax2 = zoomed_inset_axes(ax, 1, loc=4)
r_min, r_max = rmin, SP_rep1.cutoff
indices = np.where((rs>r_min) & (rs<r_max))
ax2.plot(rs[indices], reps[indices], color='g', linestyle='-', linewidth=2.)
# skf2 for comparision
if skf2:
SP_rep2 = read_spline(skf2)
# generate data
rs = np.linspace(0.,SP_rep2.cutoff-0.01,1000)
reps = []
for r in rs:
reps.append(SP_rep2.calc_rep(r))
rs = np.array(rs)
reps = np.array(reps)
skf2_curve, = ax.plot(rs, reps, \
color='orange', linestyle='--', linewidth=2., \
label='Skf-2')
ax2.plot(rs[indices], reps[indices], color='orange', linestyle='--', linewidth=2.)
plt.legend(handles=[skf1_curve,skf2_curve])
else:
plt.legend(handles=[skf1_curve,])
plt.savefig(pic)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--skf', required=True,\
help='Slater-Koster File')
parser.add_argument('-f2', '--skf2', default=None, \
help='Second Slater-Koster File for Comparision')
parser.add_argument('-p', '--pic', \
default='spl.png', help='Spline Repulsive Potential Figure')
parser.add_argument('-rmin', '--radius_min', type=float,\
default=1.0, help='Minimum Radius for Zoom')
args = parser.parse_args()
plot_spline(args.skf, args.skf2, args.radius_min, args.pic)
#plot_spline()
| 29.70202 | 106 | 0.530012 |
0c4b5ba22b3ba7761012b4918404bffd6258a269 | 370 | py | Python | network_monitor/__init__.py | brennanhfredericks/network-monitor-client | 618d222bb015662c3958f0100a965f3c71b29d32 | [
"MIT"
] | null | null | null | network_monitor/__init__.py | brennanhfredericks/network-monitor-client | 618d222bb015662c3958f0100a965f3c71b29d32 | [
"MIT"
] | null | null | null | network_monitor/__init__.py | brennanhfredericks/network-monitor-client | 618d222bb015662c3958f0100a965f3c71b29d32 | [
"MIT"
] | null | null | null | import argparse
import netifaces
import sys
import signal
import os
import asyncio
from asyncio import CancelledError, Task
from typing import Optional, List, Any
from .services import (
Service_Manager,
Packet_Parser,
Packet_Submitter,
Packet_Filter,
)
from .configurations import generate_configuration_template, DevConfig, load_config_from_file
| 16.818182 | 93 | 0.802703 |
0c4c75e50a5aeb0f4d0c50388de64676ac264483 | 1,516 | py | Python | investing_com/cs_pattern_list.py | filipecn/maldives | f20f17d817fc3dcad7f9674753744716d1d4c821 | [
"MIT"
] | 1 | 2021-09-17T18:04:33.000Z | 2021-09-17T18:04:33.000Z | investing_com/cs_pattern_list.py | filipecn/maldives | f20f17d817fc3dcad7f9674753744716d1d4c821 | [
"MIT"
] | null | null | null | investing_com/cs_pattern_list.py | filipecn/maldives | f20f17d817fc3dcad7f9674753744716d1d4c821 | [
"MIT"
] | 3 | 2021-09-17T18:04:43.000Z | 2022-03-18T20:04:07.000Z | #!/usr/bin/py
import pandas as pd
import os
# Holds investing.com candlestick patterns
| 29.72549 | 73 | 0.513852 |
0c4cdf64475499e51798185a532224a138493103 | 1,113 | py | Python | simpleTest04Client_.py | LaplaceKorea/APIClient | e772482c3d9cbedee98f46a3529dca5acc254f3c | [
"MIT"
] | null | null | null | simpleTest04Client_.py | LaplaceKorea/APIClient | e772482c3d9cbedee98f46a3529dca5acc254f3c | [
"MIT"
] | null | null | null | simpleTest04Client_.py | LaplaceKorea/APIClient | e772482c3d9cbedee98f46a3529dca5acc254f3c | [
"MIT"
] | null | null | null | from LaplaceWSAPIClient import *
from MarkowitzSerde import *
from TargetSerde import *
from Operators import *
from TargetOperators import *
from RLStructure import *
from ClientConfig import client_config
query = RLQuery("default", datetime(2021,1,1), datetime(2021,1,21), {
"BankAccount": 100000.0,
"MMM":1.0,
"AA":1.0,
"AXP":1.0,
"BA":1.0,
"BAC":1.0,
"C":1.0,
"CAT":1.0,
"CVX":1.0,
"DD":1.0,
"DIS":1.0,
"GE":1.0,
"GM":1.0,
"HD":1.0,
"HPQ":1.0,
"IBM":1.0,
"JNJ":1.0,
"JPM":1.0,
"KO":1.0,
"MCD":1.0,
"MRK":1.0,
"PFE":1.0,
"PG":1.0,
"T":1.0,
"UTX":1.0,
"VZ":1.0,
"WMT":1.0,
"XOM":1.0
}, UserTokenSerde(client_config["user"],client_config["token"]))
performQueryRLQuery(client_config["wss"], query, lambda x: print("yahoo: ", x.Steps[0][0], x.Steps[0][1], x.Steps[0][203]))
| 27.146341 | 123 | 0.442947 |
0c4fdea50a153837205a14c5c61c7d560b9d7a43 | 14,406 | py | Python | vdisk.py | cookpan001/vdisk | 1414e5c20eba3722ce99818fe48ddf0217fb25ca | [
"BSD-3-Clause"
] | 1 | 2016-01-11T06:46:11.000Z | 2016-01-11T06:46:11.000Z | vdisk.py | cookpan001/vdisk | 1414e5c20eba3722ce99818fe48ddf0217fb25ca | [
"BSD-3-Clause"
] | null | null | null | vdisk.py | cookpan001/vdisk | 1414e5c20eba3722ce99818fe48ddf0217fb25ca | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# author: cookpan001
import sys
import logging
import time
import mimetypes
import urllib
import urllib2
"""
oauth2 client
"""
"""
All the responses will be a Response object
"""
"""
The vdisk(weipan) client.
"""
| 36.470886 | 115 | 0.521102 |
0c50ef47cd53ea48685602b6b3d98c7fea184c96 | 263 | py | Python | setup.py | thevoxium/netspeed | 9e16a49d64da90a173ef9eaf491d4245c1023105 | [
"MIT"
] | null | null | null | setup.py | thevoxium/netspeed | 9e16a49d64da90a173ef9eaf491d4245c1023105 | [
"MIT"
] | null | null | null | setup.py | thevoxium/netspeed | 9e16a49d64da90a173ef9eaf491d4245c1023105 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='netspeed',
version='0.1',
py_modules=['netspeed'],
install_requires=[
'Click',
'pyspeedtest'
],
entry_points='''
[console_scripts]
netspeed=netspeed:cli
''',
)
| 16.4375 | 29 | 0.558935 |
0c51490cf6a9e00d3f171f44d583a875d050c2af | 244 | py | Python | store/admin.py | salemzii/ChopFast | 95ea88387ecfdb56bd643970b69425b1a1c6f388 | [
"MIT"
] | null | null | null | store/admin.py | salemzii/ChopFast | 95ea88387ecfdb56bd643970b69425b1a1c6f388 | [
"MIT"
] | null | null | null | store/admin.py | salemzii/ChopFast | 95ea88387ecfdb56bd643970b69425b1a1c6f388 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import (Dish, Payments, Order, Delivery, OrderItem)
admin.site.register(Dish)
admin.site.register(Payments)
admin.site.register(Order)
admin.site.register(Delivery)
admin.site.register(OrderItem)
| 24.4 | 64 | 0.807377 |
0c517d2c976cb6c4a933b0a237cbe0bcc83aaacb | 31,109 | py | Python | hpedockerplugin/request_context.py | renovate-bot/python-hpedockerplugin | b7fa6b3193fa6dd42574585b4c621ff6a16babc9 | [
"Apache-2.0"
] | 49 | 2016-06-14T22:25:40.000Z | 2021-04-05T05:00:59.000Z | hpedockerplugin/request_context.py | imran-ansari/python-hpedockerplugin | e2726f48ac793dc894100e3772c40ce89bfe9bb8 | [
"Apache-2.0"
] | 550 | 2016-07-25T12:01:12.000Z | 2021-11-15T17:52:40.000Z | hpedockerplugin/request_context.py | imran-ansari/python-hpedockerplugin | e2726f48ac793dc894100e3772c40ce89bfe9bb8 | [
"Apache-2.0"
] | 96 | 2016-06-01T22:07:03.000Z | 2021-06-22T09:05:05.000Z | import abc
import json
import re
from collections import OrderedDict
from oslo_log import log as logging
import hpedockerplugin.exception as exception
from hpedockerplugin.hpe import share
LOG = logging.getLogger(__name__)
# To be implemented by derived class
def _default_req_ctxt_creator(self, contents):
pass
def _check_is_valid_acl_string(self, fsMode):
fsMode_list = fsMode.split(',')
if len(fsMode_list) != 3:
msg = "Passed acl string is not valid. "\
"Pass correct acl string."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
for value in fsMode_list:
self._check_valid_fsMode_string(value)
return True
# TODO: This is work in progress - can be taken up later if agreed upon
# class VolumeRequestContextBuilder(RequestContextBuilder):
# def __init__(self, backend_configs):
# super(VolumeRequestContextBuilder, self).__init__(backend_configs)
#
# def _get_build_req_ctxt_map(self):
# build_req_ctxt_map = OrderedDict()
# build_req_ctxt_map['virtualCopyOf,scheduleName'] = \
# self._create_snap_schedule_req_ctxt,
# build_req_ctxt_map['virtualCopyOf,scheduleFrequency'] = \
# self._create_snap_schedule_req_ctxt
# build_req_ctxt_map['virtualCopyOf,snaphotPrefix'] = \
# self._create_snap_schedule_req_ctxt
# build_req_ctxt_map['virtualCopyOf'] = \
# self._create_snap_req_ctxt
# build_req_ctxt_map['cloneOf'] = \
# self._create_clone_req_ctxt
# build_req_ctxt_map['importVol'] = \
# self._create_import_vol_req_ctxt
# build_req_ctxt_map['replicationGroup'] = \
# self._create_rcg_req_ctxt
# build_req_ctxt_map['help'] = self._create_help_req_ctxt
# return build_req_ctxt_map
#
# def _default_req_ctxt_creator(self, contents):
# return self._create_vol_create_req_ctxt(contents)
#
# @staticmethod
# def _validate_mutually_exclusive_ops(contents):
# mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol',
# 'replicationGroup']
# if 'Opts' in contents and contents['Opts']:
# received_opts = contents.get('Opts').keys()
# diff = set(mutually_exclusive_ops) - set(received_opts)
# if len(diff) < len(mutually_exclusive_ops) - 1:
# mutually_exclusive_ops.sort()
# msg = "Operations %s are mutually exclusive and cannot be " \
# "specified together. Please check help for usage." % \
# mutually_exclusive_ops
# raise exception.InvalidInput(reason=msg)
#
# @staticmethod
# def _validate_opts(operation, contents, valid_opts, mandatory_opts=None):
# if 'Opts' in contents and contents['Opts']:
# received_opts = contents.get('Opts').keys()
#
# if mandatory_opts:
# diff = set(mandatory_opts) - set(received_opts)
# if diff:
# # Print options in sorted manner
# mandatory_opts.sort()
# msg = "One or more mandatory options %s are missing " \
# "for operation %s" % (mandatory_opts, operation)
# raise exception.InvalidInput(reason=msg)
#
# diff = set(received_opts) - set(valid_opts)
# if diff:
# diff = list(diff)
# diff.sort()
# msg = "Invalid option(s) %s specified for operation %s. " \
# "Please check help for usage." % \
# (diff, operation)
# raise exception.InvalidInput(reason=msg)
#
# def _create_vol_create_req_ctxt(self, contents):
# valid_opts = ['compression', 'size', 'provisioning',
# 'flash-cache', 'qos-name', 'fsOwner',
# 'fsMode', 'mountConflictDelay', 'cpg',
# 'snapcpg', 'backend']
# self._validate_opts("create volume", contents, valid_opts)
# return {'operation': 'create_volume',
# '_vol_orchestrator': 'volume'}
#
# def _create_clone_req_ctxt(self, contents):
# valid_opts = ['cloneOf', 'size', 'cpg', 'snapcpg',
# 'mountConflictDelay']
# self._validate_opts("clone volume", contents, valid_opts)
# return {'operation': 'clone_volume',
# 'orchestrator': 'volume'}
#
# def _create_snap_req_ctxt(self, contents):
# valid_opts = ['virtualCopyOf', 'retentionHours', 'expirationHours',
# 'mountConflictDelay', 'size']
# self._validate_opts("create snapshot", contents, valid_opts)
# return {'operation': 'create_snapshot',
# '_vol_orchestrator': 'volume'}
#
# def _create_snap_schedule_req_ctxt(self, contents):
# valid_opts = ['virtualCopyOf', 'scheduleFrequency', 'scheduleName',
# 'snapshotPrefix', 'expHrs', 'retHrs',
# 'mountConflictDelay', 'size']
# mandatory_opts = ['scheduleName', 'snapshotPrefix',
# 'scheduleFrequency']
# self._validate_opts("create snapshot schedule", contents,
# valid_opts, mandatory_opts)
# return {'operation': 'create_snapshot_schedule',
# 'orchestrator': 'volume'}
#
# def _create_import_vol_req_ctxt(self, contents):
# valid_opts = ['importVol', 'backend', 'mountConflictDelay']
# self._validate_opts("import volume", contents, valid_opts)
#
# # Replication enabled backend cannot be used for volume import
# backend = contents['Opts'].get('backend', 'DEFAULT')
# if backend == '':
# backend = 'DEFAULT'
#
# try:
# config = self._backend_configs[backend]
# except KeyError:
# backend_names = list(self._backend_configs.keys())
# backend_names.sort()
# msg = "ERROR: Backend '%s' doesn't exist. Available " \
# "backends are %s. Please use " \
# "a valid backend name and retry." % \
# (backend, backend_names)
# raise exception.InvalidInput(reason=msg)
#
# if config.replication_device:
# msg = "ERROR: Import volume not allowed with replication " \
# "enabled backend '%s'" % backend
# raise exception.InvalidInput(reason=msg)
#
# volname = contents['Name']
# existing_ref = str(contents['Opts']['importVol'])
# manage_opts = contents['Opts']
# return {'orchestrator': 'volume',
# 'operation': 'import_volume',
# 'args': (volname,
# existing_ref,
# backend,
# manage_opts)}
#
# def _create_rcg_req_ctxt(self, contents):
# valid_opts = ['replicationGroup', 'size', 'provisioning',
# 'backend', 'mountConflictDelay', 'compression']
# self._validate_opts('create replicated volume', contents, valid_opts)
#
# # It is possible that the user configured replication in hpe.conf
# # but didn't specify any options. In that case too, this operation
# # must fail asking for "replicationGroup" parameter
# # Hence this validation must be done whether "Opts" is there or not
# options = contents['Opts']
# backend = self._get_str_option(options, 'backend', 'DEFAULT')
# create_vol_args = self._get_create_volume_args(options)
# rcg_name = create_vol_args['replicationGroup']
# try:
# self._validate_rcg_params(rcg_name, backend)
# except exception.InvalidInput as ex:
# return json.dumps({u"Err": ex.msg})
#
# return {'operation': 'create_volume',
# 'orchestrator': 'volume',
# 'args': create_vol_args}
#
# def _get_fs_owner(self, options):
# val = self._get_str_option(options, 'fsOwner', None)
# if val:
# fs_owner = val.split(':')
# if len(fs_owner) != 2:
# msg = "Invalid value '%s' specified for fsOwner. Please " \
# "specify a correct value." % val
# raise exception.InvalidInput(msg)
# return fs_owner
# return None
#
# def _get_fs_mode(self, options):
# fs_mode_str = self._get_str_option(options, 'fsMode', None)
# if fs_mode_str:
# try:
# int(fs_mode_str)
# except ValueError as ex:
# msg = "Invalid value '%s' specified for fsMode. Please " \
# "specify an integer value." % fs_mode_str
# raise exception.InvalidInput(msg)
#
# if fs_mode_str[0] != '0':
# msg = "Invalid value '%s' specified for fsMode. Please " \
# "specify an octal value." % fs_mode_str
# raise exception.InvalidInput(msg)
#
# for mode in fs_mode_str:
# if int(mode) > 7:
# msg = "Invalid value '%s' specified for fsMode. Please"\
# " specify an octal value." % fs_mode_str
# raise exception.InvalidInput(msg)
# return fs_mode_str
#
# def _get_create_volume_args(self, options):
# ret_args = dict()
# ret_args['size'] = self._get_int_option(
# options, 'size', volume.DEFAULT_SIZE)
# ret_args['provisioning'] = self._get_str_option(
# options, 'provisioning', volume.DEFAULT_PROV,
# ['full', 'thin', 'dedup'])
# ret_args['flash-cache'] = self._get_str_option(
# options, 'flash-cache', volume.DEFAULT_FLASH_CACHE,
# ['true', 'false'])
# ret_args['qos-name'] = self._get_str_option(
# options, 'qos-name', volume.DEFAULT_QOS)
# ret_args['compression'] = self._get_str_option(
# options, 'compression', volume.DEFAULT_COMPRESSION_VAL,
# ['true', 'false'])
# ret_args['fsOwner'] = self._get_fs_owner(options)
# ret_args['fsMode'] = self._get_fs_mode(options)
# ret_args['mountConflictDelay'] = self._get_int_option(
# options, 'mountConflictDelay',
# volume.DEFAULT_MOUNT_CONFLICT_DELAY)
# ret_args['cpg'] = self._get_str_option(options, 'cpg', None)
# ret_args['snapcpg'] = self._get_str_option(options, 'snapcpg', None)
# ret_args['replicationGroup'] = self._get_str_option(
# options, 'replicationGroup', None)
#
# return ret_args
#
# def _validate_rcg_params(self, rcg_name, backend_name):
# LOG.info("Validating RCG: %s, backend name: %s..." % (rcg_name,
# backend_name))
# hpepluginconfig = self._backend_configs[backend_name]
# replication_device = hpepluginconfig.replication_device
#
# LOG.info("Replication device: %s" % six.text_type(
# replication_device))
#
# if rcg_name and not replication_device:
# msg = "Request to create replicated volume cannot be fulfilled"\
# "without defining 'replication_device' entry defined in"\
# "hpe.conf for the backend '%s'. Please add it and execute"\
# "the request again." % backend_name
# raise exception.InvalidInput(reason=msg)
#
# if replication_device and not rcg_name:
# backend_names = list(self._backend_configs.keys())
# backend_names.sort()
#
# msg = "'%s' is a replication enabled backend. " \
# "Request to create replicated volume cannot be fulfilled "\
# "without specifying 'replicationGroup' option in the "\
# "request. Please either specify 'replicationGroup' or use"\
# "a normal backend and execute the request again. List of"\
# "backends defined in hpe.conf: %s" % (backend_name,
# backend_names)
# raise exception.InvalidInput(reason=msg)
#
# if rcg_name and replication_device:
#
# def _check_valid_replication_mode(mode):
# valid_modes = ['synchronous', 'asynchronous', 'streaming']
# if mode.lower() not in valid_modes:
# msg = "Unknown replication mode '%s' specified. Valid "\
# "values are 'synchronous | asynchronous | " \
# "streaming'" % mode
# raise exception.InvalidInput(reason=msg)
#
# rep_mode = replication_device['replication_mode'].lower()
# _check_valid_replication_mode(rep_mode)
# if replication_device.get('quorum_witness_ip'):
# if rep_mode.lower() != 'synchronous':
# msg = "For Peer Persistence, replication mode must be "\
# "synchronous"
# raise exception.InvalidInput(reason=msg)
#
# sync_period = replication_device.get('sync_period')
# if sync_period and rep_mode == 'synchronous':
# msg = "'sync_period' can be defined only for 'asynchronous'"\
# " and 'streaming' replicate modes"
# raise exception.InvalidInput(reason=msg)
#
# if (rep_mode == 'asynchronous' or rep_mode == 'streaming')\
# and sync_period:
# try:
# sync_period = int(sync_period)
# except ValueError as ex:
# msg = "Non-integer value '%s' not allowed for " \
# "'sync_period'. %s" % (
# replication_device.sync_period, ex)
# raise exception.InvalidInput(reason=msg)
# else:
# SYNC_PERIOD_LOW = 300
# SYNC_PERIOD_HIGH = 31622400
# if sync_period < SYNC_PERIOD_LOW or \
# sync_period > SYNC_PERIOD_HIGH:
# msg = "'sync_period' must be between 300 and " \
# "31622400 seconds."
# raise exception.InvalidInput(reason=msg)
#
# @staticmethod
# def _validate_name(vol_name):
# is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", vol_name)
# if not is_valid_name:
# msg = 'Invalid volume name: %s is passed.' % vol_name
# raise exception.InvalidInput(reason=msg)
| 43.387727 | 79 | 0.567746 |
0c51d4fd680a6be2f21491d3d55f99e1a13769ea | 32,369 | py | Python | scripts/train_image_.py | shafieelab/SPyDERMAN | 1b3fe1d0fcb33dcaed85fb110c88575ffa6fb7b6 | [
"MIT"
] | 1 | 2021-01-26T18:07:56.000Z | 2021-01-26T18:07:56.000Z | scripts/train_image_.py | Deeksha-K/SPyDERMAN | 8cb4a3efc2b8706133f81e7bf878439110402434 | [
"MIT"
] | null | null | null | scripts/train_image_.py | Deeksha-K/SPyDERMAN | 8cb4a3efc2b8706133f81e7bf878439110402434 | [
"MIT"
] | 3 | 2021-01-26T18:07:39.000Z | 2021-04-07T22:07:01.000Z | import argparse
import csv
import os
import os.path as osp
import statistics
import tqdm
import time
from datetime import datetime
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import helper_utils.network as network
import helper_utils.loss as loss
import helper_utils.pre_process as prep
from sklearn.metrics import confusion_matrix
from torch.utils.data import DataLoader
import helper_utils.lr_schedule as lr_schedule
from helper_utils.data_list_m import ImageList
from helper_utils.logger import Logger
from helper_utils.sampler import ImbalancedDatasetSampler
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--dset', type=str, default='COVID19', help="The dataset or source dataset used")
parser.add_argument('--trail', type=str, default='mb', help="The dataset or source dataset used")
parser.add_argument('--lr', type=float, default=0.005)
args = parser.parse_args()
seed = 0
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
dt_string = dt_string.replace("/", "_").replace(" ", "_").replace(":", "_").replace(".", "_")
dataset = args.dset
valid_or_test = "" # "valid or "test" or "" if whole dataset
test_on_source_ed3 = False
is_training = True
if valid_or_test == "test":
is_training = False
testing = "testing"
# testing = "train"
if testing == "testing":
is_training = False
print(dataset)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
log_output_dir_root = '../logs/' + dataset + '/'
results_output_dir_root = '../experimental results/' + dataset + '/'
models_output_dir_root = '../models/' + dataset + '/'
trial_number = args.trail + "_" + dataset + "_" + testing + "_" + dt_string
if dataset == 'HCV':
source_path = "../Data/HCV/txt_80_20_tile/HCV_train_80.txt"
valid_source_path = "../Data/HCV/txt_80_20_tile/HCV_val_20.txt"
target_path = "../Data/HCV/HCV_target_tile.txt"
no_of_classes = 2
elif dataset == 'HIV':
source_path = "../Data/HIV/txt_80_20_tile/HIV_train_80.txt"
valid_source_path = "../Data/HIV/txt_80_20_tile/HIV_val_20.txt"
target_path = "../Data/HIV/HIV_target_tile.txt"
no_of_classes = 2
elif dataset == 'ZIKA':
source_path = "../Data/ZIKA/txt_90_10_tile/ZIKA_train_90.txt"
valid_source_path = "../Data/ZIKA/txt_90_10_tile/ZIKA_val_10.txt"
target_path = "../Data/ZIKA/ZIKA_target_tile.txt"
no_of_classes = 2
elif dataset == 'HBV':
source_path = "../Data/HBV/txt_80_20_tile/HBV_train_80.txt"
valid_source_path = "../Data/HBV/txt_80_20_tile/HBV_val_20.txt"
target_path = "../Data/HBV/HBV_target_tile.txt"
no_of_classes = 2
elif dataset == 'COVID19':
source_path = "../Data/COVID19/txt_80_20_tile/COVID19_train_80.txt"
valid_source_path = "../Data/COVID19/txt_80_20_tile/COVID19_val_20.txt"
target_path = "../Data/COVID19/COVID19_target_tile.txt"
no_of_classes = 2
elif dataset == 'CAS12':
source_path = "../Data/CAS12/txt_80_20_tile/CAS12_train_80.txt"
valid_source_path = "../Data/CAS12/txt_80_20_tile/CAS12_val_20.txt"
target_path = "../Data/CAS12/CAS12_target_tile.txt"
no_of_classes = 2
else:
no_of_classes = None
net = 'Xception'
# net = 'ResNet50'
dset = dataset
lr_ = args.lr
gamma = 0.001
power = 0.75
# power = 0.9
momentum = 0.9
weight_decay = 0.0005
nesterov = True
optimizer = optim.Adam
config = {}
config['method'] = 'CDAN+E'
config["gpu"] = '0'
config["num_iterations"] = 10000
config["test_interval"] = 50
config["snapshot_interval"] = 5000
batch_size = 8
batch_size_test = 128
use_bottleneck = False
bottleneck_dim = 256
adv_lay_random = False
random_dim = 1024
new_cls = True
if not is_training:
valid_source_path = "../Data/Test/mb1/mb_test.txt"
target_path = "../Data/Test/mb1/mb_test.txt"
model_path_for_testing = "../Final Models/CDAN + GAN/COVID19/model_1600.pth.tar"
config["num_iterations"] = 0
best_itr = "testing"
print("Testing:")
config["best_itr"] = "testing"
print("num_iterations", config["num_iterations"])
header_list = ["trail no ", 'metric name',
'source_val_accuracy', 'source_val_loss', 'best_cm',
# 'val_accuracy_target', 'val_loss_target', 'best_cm_target',
"best_classifier_loss", "best_transfer_loss", "best_total_loss"
,
"best_itr"] + \
["lr", "gamma", "power", "momentum", "weight_decay", "nesterov", "optimizer",
"batch_size", "batch_size_test", "use_bottleneck", "bottleneck_dim", "adv_lay_random",
"random_dim",
"no_of_classes", "new_cls", "dset", "net", "source_path", "target_path", "output_path",
"model_path"
, "logs_path", "gpu", "test_interval", "seed"]
log_output_path = log_output_dir_root + net + '/' + 'trial-' + trial_number + '/'
trial_results_path = net + '/trial-' + trial_number + '/'
config["output_path"] = results_output_dir_root + trial_results_path
config["model_path"] = models_output_dir_root + trial_results_path
config["logs_path"] = log_output_path
if not os.path.exists(config["logs_path"]):
os.makedirs(config["logs_path"])
if is_training:
if not os.path.exists(config["model_path"]):
os.makedirs(config["model_path"])
# if not os.path.exists(config["output_path"]):
# os.makedirs(config["output_path"])
if not os.path.isfile(osp.join(log_output_dir_root, "log.csv")):
with open(osp.join(log_output_dir_root, "log.csv"), mode='w') as param_log_file:
param_log_writer = csv.writer(param_log_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
param_log_writer.writerow(header_list)
config["out_file"] = open(osp.join(config["logs_path"], "log.txt"), "w")
config["trial_parameters_log"] = csv.writer(open(osp.join(log_output_dir_root, "log.csv"), "a"))
config["prep"] = {"test_10crop": False, 'params': {"resize_size": 224, "crop_size": 224, 'alexnet': False}}
config["loss"] = {"trade_off": 1.0}
if "Xception" in net:
config["network"] = \
{"name": network.XceptionFc,
"params":
{"use_bottleneck": use_bottleneck,
"bottleneck_dim": bottleneck_dim,
"new_cls": new_cls}}
elif "ResNet50" in net:
config["network"] = {"name": network.ResNetFc,
"params":
{"resnet_name": net,
"use_bottleneck": use_bottleneck,
"bottleneck_dim": bottleneck_dim,
"new_cls": new_cls}}
config["loss"]["random"] = adv_lay_random
config["loss"]["random_dim"] = random_dim
if optimizer == optim.SGD:
config["optimizer"] = {"type": optim.SGD, "optim_params": {'lr': lr_, "momentum": momentum,
"weight_decay": weight_decay, "nesterov": nesterov},
"lr_type": "inv",
"lr_param": {"lr": lr_, "gamma": gamma, "power": power}}
elif optimizer == optim.Adam:
config["optimizer"] = {"type": optim.Adam, "optim_params": {'lr': lr_,
"weight_decay": weight_decay},
"lr_type": "inv",
"lr_param": {"lr": lr_, "gamma": gamma, "power": power}}
config["dataset"] = dset
config["data"] = {"source": {"list_path": source_path, "batch_size": batch_size},
"target": {"list_path": target_path, "batch_size": batch_size},
"test": {"list_path": target_path, "batch_size": batch_size_test},
"valid_source": {"list_path": valid_source_path, "batch_size": batch_size}}
config["optimizer"]["lr_param"]["lr"] = lr_
config["network"]["params"]["class_num"] = no_of_classes
config["out_file"].write(str(config))
config["out_file"].flush()
training_parameters = [lr_, gamma, power, momentum, weight_decay, nesterov, optimizer,
batch_size, batch_size_test, use_bottleneck, bottleneck_dim, adv_lay_random, random_dim,
no_of_classes, new_cls, dset, net, source_path, target_path, config["output_path"],
config["model_path"]
, config["logs_path"], config["gpu"], config["test_interval"], str(seed)]
print("source_path", source_path)
print("target_path", target_path)
print("lr_", lr_)
print('GPU', os.environ["CUDA_VISIBLE_DEVICES"], config["gpu"])
train(config)
| 42.646904 | 170 | 0.575489 |
0c51eb0b9b67869087426ffee62488bbc0029d3f | 1,230 | py | Python | src/freshchat/client/configuration.py | twyla-ai/python-freshchat | 5bb0ea730f82b63292688be61315b6b880896e1f | [
"MIT"
] | 4 | 2019-10-15T11:03:28.000Z | 2021-08-19T01:14:12.000Z | src/freshchat/client/configuration.py | twyla-ai/python-freshchat | 5bb0ea730f82b63292688be61315b6b880896e1f | [
"MIT"
] | 137 | 2019-10-18T04:36:21.000Z | 2022-03-21T04:11:18.000Z | src/freshchat/client/configuration.py | twyla-ai/python-freshchat | 5bb0ea730f82b63292688be61315b6b880896e1f | [
"MIT"
] | 1 | 2021-08-19T01:14:14.000Z | 2021-08-19T01:14:14.000Z | import os
from dataclasses import dataclass, field
from typing import AnyStr, Dict, Optional
from urllib.parse import urljoin
| 28.604651 | 80 | 0.64065 |
0c521cb77fbca7152db05ece3eddd9a49ae59322 | 20,120 | py | Python | get_headers.py | rupendrab/py_unstr_parse | 3cece3fb7ca969734bf5e60fe5846a7148ce8be4 | [
"MIT"
] | null | null | null | get_headers.py | rupendrab/py_unstr_parse | 3cece3fb7ca969734bf5e60fe5846a7148ce8be4 | [
"MIT"
] | null | null | null | get_headers.py | rupendrab/py_unstr_parse | 3cece3fb7ca969734bf5e60fe5846a7148ce8be4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.5
import sys
import re
import os
import csv
import numpy as np
from operator import itemgetter
from time import time
from multiprocessing import Pool
from extract_toc import parseargs
from predict_using_toc_mapper import Mapper, get_topic, read_model_file
from find_topics import toc_entries, get_summary_map, read_topics
import dateparser
import check_309
import parse_name
from get_ratings import Ratings, Ratings2
import delimiterwriter
from predict_using_subtopic_mapper import SubtopicPredictor
from analyze_topic import SubtopicReader
from headerutil import *
# NEWLINE_WITHIN_COLUMN = '\r\n'
# NEWLINE_WITHIN_COLUMN = '\r\n'
# CSV_LINE_TERMINATOR = '\r\n'
# CSV_FIELD_DELIMITER = ','
# FD_REPLACED = None
p_region = re.compile('(^|.*\s+)region(\s*[:\']\s*|\s+)(.*)?\s*$', re.IGNORECASE)
p_region_with_other = re.compile('(.*)?\s{5,}(certificate\s+num[bh]e[ir]|certificate|charter\s+num[bh]er|charter|field\s+offic\s*e|url)\s*:?\s*(.*)?\s*$', re.IGNORECASE)
p_blank = re.compile('^\s*$')
p_from_first_uppercase_char = re.compile('^.*?([A-Z].*)$', re.MULTILINE)
p_cert_direct = re.compile('(^|^.*\s+)(certificate\s+number)(\s*:\s*|\s+)(\w+).*$', re.IGNORECASE)
p_region_direct = re.compile('(^|^.*\s+)(region)(\s*:\s*|\s+)(\w+).*$', re.IGNORECASE)
p_patterns_str = {
'bank_name' : [
'bank\s+name',
'institution\s+name',
'name'
],
'bank_location': [
'location'
],
'examiner_in_charge': [
'examiner[\s\-]*in[\s\-]*charge'
],
'exam_start_date': [
'examination[\s\-]*start[\s\-]*date'
],
'exam_date': [
'examination[\s\-]*date'
],
'exam_as_of_date': [
'examination[\s\-]*as[\s\-]*of[\s\-]*date'
]
}
all_matched = {}
for k,patterns in p_patterns_str.items():
all_matched[k] = []
p_patterns = {}
for k,patterns in p_patterns_str.items():
p_patterns[k] = [re.compile('(^|.*\s+)' + p + '(\s*[:\'][\'\s\.]*|[\'\s\.]+)' + '(.*)?\s*$', re.IGNORECASE) for p in patterns]
"""
def best_match(pat):
# print('In best match', pat)
all_m = all_matched.get(pat)
if (all_m):
l = sorted(all_matched.get(pat), key=lambda x: (-1 * p_patterns_str[pat].index(x[0]), x[2]), reverse=True)
# print('Best match sorted list', l)
if (l):
if (l[0][2] > 0): ## Quality more than zero
return l[0]
"""
"""
def best_match_text(pat):
bm_tuple = best_match(pat)
if (bm_tuple and len(bm_tuple) == 3):
return bm_tuple[1]
else:
return ""
"""
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| 31.885895 | 174 | 0.655964 |
0c52238d0be9f0af598966fd7664c6c79e85f8cb | 6,214 | py | Python | dalle_pytorch/dalle_pytorch.py | tensorfork/DALLE-pytorch | 0e8f5d9a7fe054c587ed91d9c9616c7a883f393b | [
"MIT"
] | 1 | 2021-06-22T08:26:20.000Z | 2021-06-22T08:26:20.000Z | dalle_pytorch/dalle_pytorch.py | tensorfork/DALLE-pytorch | 0e8f5d9a7fe054c587ed91d9c9616c7a883f393b | [
"MIT"
] | null | null | null | dalle_pytorch/dalle_pytorch.py | tensorfork/DALLE-pytorch | 0e8f5d9a7fe054c587ed91d9c9616c7a883f393b | [
"MIT"
] | null | null | null | from math import log2
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from x_transformers import Encoder, Decoder
# helpers
# classes
# main classes
| 30.019324 | 117 | 0.587383 |
0c52795432861cbcf4e3ec45d893ec1acc331585 | 7,668 | py | Python | aiida_phonopy/parsers/phonopy.py | giovannipizzi/aiida-phonopy | 26e419c34415c68f815fa81ce2ac644aa387ae72 | [
"MIT"
] | null | null | null | aiida_phonopy/parsers/phonopy.py | giovannipizzi/aiida-phonopy | 26e419c34415c68f815fa81ce2ac644aa387ae72 | [
"MIT"
] | null | null | null | aiida_phonopy/parsers/phonopy.py | giovannipizzi/aiida-phonopy | 26e419c34415c68f815fa81ce2ac644aa387ae72 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from aiida.orm.data.folder import FolderData
from aiida.parsers.parser import Parser
from aiida.common.datastructures import calc_states
from aiida.parsers.exceptions import OutputParsingError
from aiida.common.exceptions import UniquenessError
import numpy
from aiida.orm.data.array import ArrayData
from aiida.orm.data.array.bands import BandsData
from aiida.orm.data.array.kpoints import KpointsData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
import json
from aiida_phonopy.calculations.phonopy import PhonopyCalculation
__copyright__ = u"Copyright (c), 2014-2015, cole Polytechnique Fdrale de Lausanne (EPFL), Switzerland, Laboratory of Theory and Simulation of Materials (THEOS). All rights reserved."
__license__ = "Non-Commercial, End-User Software License Agreement, see LICENSE.txt file"
__version__ = "0.4.1"
| 36.865385 | 185 | 0.594027 |
0c52883ec5869dd4ebaf9438c8845a04d78492ff | 1,128 | py | Python | bagua/bagua_define.py | jphgxq/bagua | 3444f79b8fe9c9d2975a8994a1a613ebd14c3d33 | [
"MIT"
] | 1 | 2021-07-12T03:33:38.000Z | 2021-07-12T03:33:38.000Z | bagua/bagua_define.py | jphgxq/bagua | 3444f79b8fe9c9d2975a8994a1a613ebd14c3d33 | [
"MIT"
] | null | null | null | bagua/bagua_define.py | jphgxq/bagua | 3444f79b8fe9c9d2975a8994a1a613ebd14c3d33 | [
"MIT"
] | null | null | null | import enum
from typing import List
import sys
if sys.version_info >= (3, 9):
from typing import TypedDict # pytype: disable=not-supported-yet
else:
from typing_extensions import TypedDict # pytype: disable=not-supported-yet
from pydantic import BaseModel
def get_tensor_declaration_bytes(td: TensorDeclaration) -> int:
dtype_unit_size = {
TensorDtype.F32.value: 4,
TensorDtype.F16.value: 2,
TensorDtype.U8.value: 1,
}
return td["num_elements"] * dtype_unit_size[td["dtype"]]
| 22.56 | 80 | 0.656915 |
0c53db086eb0eb7f6a00e60d3b14eacbfe7ba92e | 97 | py | Python | instaphotos/apps.py | LekamCharity/insta-IG | 0302440df3b2029297af54eb9c56090f82232973 | [
"MIT"
] | null | null | null | instaphotos/apps.py | LekamCharity/insta-IG | 0302440df3b2029297af54eb9c56090f82232973 | [
"MIT"
] | null | null | null | instaphotos/apps.py | LekamCharity/insta-IG | 0302440df3b2029297af54eb9c56090f82232973 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.166667 | 35 | 0.773196 |
0c5539475c0da1f3dfc53cbf5dc335c43077d9cf | 2,835 | py | Python | services/backend/expiring_links/tests/test_expiring_link_generator_serializer.py | patpio/drf_images_api | ef689bac10ce8b9d2f03d6b647fa4bbd70b02f1c | [
"Beerware"
] | 1 | 2022-02-27T16:34:46.000Z | 2022-02-27T16:34:46.000Z | services/backend/expiring_links/tests/test_expiring_link_generator_serializer.py | patpio/drf_images_api | ef689bac10ce8b9d2f03d6b647fa4bbd70b02f1c | [
"Beerware"
] | null | null | null | services/backend/expiring_links/tests/test_expiring_link_generator_serializer.py | patpio/drf_images_api | ef689bac10ce8b9d2f03d6b647fa4bbd70b02f1c | [
"Beerware"
] | null | null | null | import pytest
from expiring_links.serializers import ExpiringLinkGeneratorSerializer
| 42.954545 | 118 | 0.71358 |
0c553d8f4165e63fa177620f1fa3f79bb1b9cb45 | 91,609 | py | Python | com/vmware/nsx/trust_management_client.py | adammillerio/vsphere-automation-sdk-python | c07e1be98615201139b26c28db3aa584c4254b66 | [
"MIT"
] | null | null | null | com/vmware/nsx/trust_management_client.py | adammillerio/vsphere-automation-sdk-python | c07e1be98615201139b26c28db3aa584c4254b66 | [
"MIT"
] | null | null | null | com/vmware/nsx/trust_management_client.py | adammillerio/vsphere-automation-sdk-python | c07e1be98615201139b26c28db3aa584c4254b66 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.trust_management.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
| 44.40572 | 156 | 0.596492 |
0c5549700625606ae1bd959bf730c22c941eb303 | 4,255 | py | Python | bottleneck/tests/list_input_test.py | stroxler/bottleneck | 6e91bcb8a21170588ee9a3f2c425a4e307ae05de | [
"BSD-2-Clause"
] | 2 | 2015-05-26T09:06:32.000Z | 2015-05-26T09:06:46.000Z | bottleneck/tests/list_input_test.py | stroxler/bottleneck | 6e91bcb8a21170588ee9a3f2c425a4e307ae05de | [
"BSD-2-Clause"
] | null | null | null | bottleneck/tests/list_input_test.py | stroxler/bottleneck | 6e91bcb8a21170588ee9a3f2c425a4e307ae05de | [
"BSD-2-Clause"
] | null | null | null | "Test list input."
# For support of python 2.5
from __future__ import with_statement
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
import bottleneck as bn
# ---------------------------------------------------------------------------
# Check that functions can handle list input
def unit_maker(func, func0, args=tuple()):
"Test that bn.xxx gives the same output as bn.slow.xxx for list input."
msg = '\nfunc %s | input %s | shape %s\n'
msg += '\nInput array:\n%s\n'
for i, arr in enumerate(lists()):
argsi = tuple([list(arr)] + list(args))
actual = func(*argsi)
desired = func0(*argsi)
tup = (func.__name__, 'a'+str(i), str(np.array(arr).shape), arr)
err_msg = msg % tup
assert_array_almost_equal(actual, desired, err_msg=err_msg)
def test_nn():
"Test nn."
a = [[1, 2], [3, 4]]
a0 = [1, 2]
assert_equal(bn.nn(a, a0), bn.slow.nn(a, a0))
| 22.632979 | 77 | 0.636193 |
0c587de94c3ee270415110f012b7d77cb256c5a4 | 1,475 | py | Python | hanzo/warcindex.py | ukwa/warctools | f74061382d6bc37b6eec889a3aec26c5748d90d3 | [
"MIT"
] | 1 | 2020-09-03T00:51:50.000Z | 2020-09-03T00:51:50.000Z | hanzo/warcindex.py | martinsbalodis/warc-tools | d9d5e708e00bd0f6d9d0c2d95cbc9332f51b05e4 | [
"MIT"
] | null | null | null | hanzo/warcindex.py | martinsbalodis/warc-tools | d9d5e708e00bd0f6d9d0c2d95cbc9332f51b05e4 | [
"MIT"
] | 1 | 2021-04-12T01:45:14.000Z | 2021-04-12T01:45:14.000Z | #!/usr/bin/env python
"""warcindex - dump warc index"""
import os
import sys
import sys
import os.path
from optparse import OptionParser
from .warctools import WarcRecord, expand_files
parser = OptionParser(usage="%prog [options] warc warc warc")
parser.add_option("-l", "--limit", dest="limit")
parser.add_option("-O", "--output-format", dest="output_format", help="output format (ignored)")
parser.add_option("-o", "--output", dest="output_format", help="output file (ignored)")
parser.add_option("-L", "--log-level", dest="log_level")
parser.set_defaults(output=None, limit=None, log_level="info")
if __name__ == '__main__':
run()
| 23.412698 | 114 | 0.633898 |
0c599f149ff2c8a006a46a9e33e3ef181a3cc037 | 1,469 | py | Python | tsdata/migrations/0001_initial.py | OpenDataPolicingNC/Traffic-Stops | 74e0d16ad2ac32addca6f04d34c2ddf36d023990 | [
"MIT"
] | 25 | 2015-09-12T23:10:52.000Z | 2021-03-24T08:39:46.000Z | tsdata/migrations/0001_initial.py | OpenDataPolicingNC/Traffic-Stops | 74e0d16ad2ac32addca6f04d34c2ddf36d023990 | [
"MIT"
] | 159 | 2015-07-01T03:57:23.000Z | 2021-04-17T21:09:19.000Z | tsdata/migrations/0001_initial.py | copelco/NC-Traffic-Stops | 74e0d16ad2ac32addca6f04d34c2ddf36d023990 | [
"MIT"
] | 8 | 2015-10-02T16:56:40.000Z | 2020-10-18T01:16:29.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 40.805556 | 153 | 0.582709 |
0c5b11a856de6baa5333d1f6f60e74187acb3fcd | 1,836 | py | Python | api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | 235 | 2017-10-27T20:37:27.000Z | 2022-03-30T14:09:49.000Z | api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py | koji/opentrons | 0f339f45de238183b2c433e67f839363d5177582 | [
"Apache-2.0"
] | 8,425 | 2017-10-26T15:25:43.000Z | 2022-03-31T23:54:26.000Z | api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | 130 | 2017-11-09T21:02:37.000Z | 2022-03-15T18:01:24.000Z | """Run control side-effect handler."""
import pytest
from decoy import Decoy
from opentrons.protocol_engine.state import StateStore
from opentrons.protocol_engine.actions import ActionDispatcher, PauseAction
from opentrons.protocol_engine.execution.run_control import RunControlHandler
from opentrons.protocol_engine.state import EngineConfigs
async def test_pause(
decoy: Decoy,
state_store: StateStore,
action_dispatcher: ActionDispatcher,
subject: RunControlHandler,
) -> None:
"""It should be able to execute a pause."""
decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=False))
await subject.pause()
decoy.verify(
action_dispatcher.dispatch(PauseAction()),
await state_store.wait_for(condition=state_store.commands.get_is_running),
)
| 30.6 | 88 | 0.751634 |
0c5b7ae73a2b618a79092df65cc9600f76dbf5e0 | 510 | py | Python | Datasets/Generator/Healthcare/mergedrug.py | undraaa/m2bench | b661b61ca04470ed1c9c50531ce760a2cd5000d9 | [
"RSA-MD"
] | null | null | null | Datasets/Generator/Healthcare/mergedrug.py | undraaa/m2bench | b661b61ca04470ed1c9c50531ce760a2cd5000d9 | [
"RSA-MD"
] | null | null | null | Datasets/Generator/Healthcare/mergedrug.py | undraaa/m2bench | b661b61ca04470ed1c9c50531ce760a2cd5000d9 | [
"RSA-MD"
] | 1 | 2021-11-29T10:31:36.000Z | 2021-11-29T10:31:36.000Z | import json
import glob
| 26.842105 | 76 | 0.592157 |
0c5c225bea97b848df7068538bc1df5271634638 | 10,326 | py | Python | tests/test_rundramatiq_command.py | BradleyKirton/django_dramatiq | 93a4a9ae39aee643cc4a987b18030ad8d1fc8480 | [
"Apache-2.0"
] | null | null | null | tests/test_rundramatiq_command.py | BradleyKirton/django_dramatiq | 93a4a9ae39aee643cc4a987b18030ad8d1fc8480 | [
"Apache-2.0"
] | null | null | null | tests/test_rundramatiq_command.py | BradleyKirton/django_dramatiq | 93a4a9ae39aee643cc4a987b18030ad8d1fc8480 | [
"Apache-2.0"
] | null | null | null | import os
import sys
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django_dramatiq.management.commands import rundramatiq
def test_rundramatiq_command_autodiscovers_additional_modules(settings):
settings.DRAMATIQ_AUTODISCOVER_MODULES = ("services", )
assert rundramatiq.Command().discover_tasks_modules() == [
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
"tests.testapp4.services",
] | 35.122449 | 93 | 0.673833 |
0c5c924b0477b69417c6a0474627207f48573e2f | 3,620 | py | Python | WordRPG/data/states/new_game.py | ChristopherLBruce/WordRPG | e545cf313afc430e8191a7c813db9ee9759a6fd4 | [
"Apache-2.0"
] | 2 | 2018-12-15T15:06:35.000Z | 2022-02-09T00:19:28.000Z | WordRPG/data/states/new_game.py | ChristopherLBruce/WordRPG | e545cf313afc430e8191a7c813db9ee9759a6fd4 | [
"Apache-2.0"
] | null | null | null | WordRPG/data/states/new_game.py | ChristopherLBruce/WordRPG | e545cf313afc430e8191a7c813db9ee9759a6fd4 | [
"Apache-2.0"
] | null | null | null | """ 'new_game' state. Includes character creation. """
from ...engine.gui.screen import const, Screen
from ...engine.state_machine import State
| 27.424242 | 86 | 0.551381 |
0c5da302d0cfb597c70f8c34fe51028d86ae2e18 | 2,106 | py | Python | guestbook.py | Tycx2ry/FKRTimeline | 11e784f4a3800336abf19c42c15a06c86af970bd | [
"Apache-2.0"
] | null | null | null | guestbook.py | Tycx2ry/FKRTimeline | 11e784f4a3800336abf19c42c15a06c86af970bd | [
"Apache-2.0"
] | null | null | null | guestbook.py | Tycx2ry/FKRTimeline | 11e784f4a3800336abf19c42c15a06c86af970bd | [
"Apache-2.0"
] | null | null | null | #!/usr /bin/env python
# -*- coding: utf-8 -*-
__author__ = 'jiangge'
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask import Flask, request, render_template, redirect
application = Flask(__name__)
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///guestbook.db'
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(application)
def save_data(name, comment, url, create_at):
"""
save data from form submitted
"""
db.session.add(posts(name, comment, url, create_at))
db.session.commit()
def load_data(page):
"""
load saved data
"""
record_list = posts.query.paginate(page, per_page=5, error_out=True)
return record_list
if __name__ == '__main__':
if True:
db.drop_all()
db.create_all()
db.session.add(posts(text=application.config["FIRST_MESSAGE"]))
db.session.commit()
application.run('0.0.0.0', port=80, debug=True)
| 27.350649 | 73 | 0.632479 |
0c5db28673060acc0246927ee800263dd3a7f124 | 707 | py | Python | dash_test_runner/testapp/migrations/0001_initial.py | Ilhasoft/dash | d9b900cc08d9238304a226d837a4c90dec6b46fc | [
"BSD-3-Clause"
] | null | null | null | dash_test_runner/testapp/migrations/0001_initial.py | Ilhasoft/dash | d9b900cc08d9238304a226d837a4c90dec6b46fc | [
"BSD-3-Clause"
] | null | null | null | dash_test_runner/testapp/migrations/0001_initial.py | Ilhasoft/dash | d9b900cc08d9238304a226d837a4c90dec6b46fc | [
"BSD-3-Clause"
] | 1 | 2018-04-12T20:18:34.000Z | 2018-04-12T20:18:34.000Z | from django.db import migrations, models
| 32.136364 | 114 | 0.575672 |
0c5e4893a61a507b2525a971a14202b85e75581a | 6,596 | py | Python | tests/test_integration.py | Radico/business-rules | 7dd0551e8b33234fcea0abaf04f9982eb6f3426f | [
"MIT"
] | null | null | null | tests/test_integration.py | Radico/business-rules | 7dd0551e8b33234fcea0abaf04f9982eb6f3426f | [
"MIT"
] | null | null | null | tests/test_integration.py | Radico/business-rules | 7dd0551e8b33234fcea0abaf04f9982eb6f3426f | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from business_rules.actions import rule_action, BaseActions
from business_rules.engine import check_condition, run_all
from business_rules.fields import FIELD_TEXT, FIELD_NUMERIC, FIELD_SELECT
from business_rules.variables import BaseVariables, string_rule_variable, numeric_rule_variable, boolean_rule_variable
from . import TestCase
| 27.256198 | 118 | 0.575045 |
0c5f5d9ac8242efc8ccf5bafaa6e567b8ee2cc86 | 5,808 | py | Python | cog/cli/user_argparser.py | Demonware/cog | b206066ebfd5faae000b1a1708988db8ca592b94 | [
"BSD-3-Clause"
] | 2 | 2016-06-02T02:15:56.000Z | 2016-08-16T08:37:27.000Z | cog/cli/user_argparser.py | Demonware/cog | b206066ebfd5faae000b1a1708988db8ca592b94 | [
"BSD-3-Clause"
] | null | null | null | cog/cli/user_argparser.py | Demonware/cog | b206066ebfd5faae000b1a1708988db8ca592b94 | [
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import argparse
arg_no = len(sys.argv)
tool_parser = argparse.ArgumentParser(add_help=False)
tool_subparsers = tool_parser.add_subparsers(help='commands', dest='command')
# The rename command.
rename_parser = tool_subparsers.add_parser('rename', help='rename an existing user account.')
rename_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
rename_parser.add_argument(
'--new-name', '-n', action='store', dest='newName', metavar='<new account name>'
)
# The add command.
add_parser = tool_subparsers.add_parser('add', help='add new user account to the directory.')
add_parser.add_argument(
'--type', '-t', action='store', default='generic', dest='account_type', metavar='<type of account>'
)
add_parser.add_argument(
'name', action='store', help='account name', metavar='<name>'
)
group1_parser = add_parser.add_argument_group('account specific')
group1_parser.add_argument(
'--password', '-P', action='store', dest='userPassword', metavar='<account\'s owner password>'
)
group1_parser.add_argument(
'--home', action='store', dest='homeDirectory', metavar='<path to the home directory>'
)
group1_parser.add_argument(
'--shell', action='store', dest='loginShell', metavar='<path to the shell interpreter>'
)
group1_parser = add_parser.add_argument_group('personal information')
group1_parser.add_argument(
'--phone-no', action='append', dest='telephoneNumber', metavar='<phone number>'
)
group1_parser.add_argument(
'--last-name', action='store', dest='sn', metavar='<account owner\'s last name>'
)
group1_parser.add_argument(
'--first-name', action='store', dest='givenName', metavar='<account owner\'s first name>'
)
group1_parser.add_argument(
'--organization', '-o', action='store', dest='o', metavar='<organization>'
)
group1_parser.add_argument(
'--email', action='append', dest='mail', metavar='<email>'
)
group1_parser.add_argument(
'--full-name', action='store', dest='cn', metavar='<account owner\'s full name>'
)
group1_parser = add_parser.add_argument_group('uid and group management')
group1_parser.add_argument(
'--uid', action='store', dest='uid', metavar='<user\'s uid>'
)
group1_parser.add_argument(
'--add-group', action='append', dest='group', metavar='<secondary group>'
)
group1_parser.add_argument(
'--uid-number', action='store', dest='uidNumber', metavar='<user id number>'
)
group1_parser.add_argument(
'--gid', action='store', dest='gidNumber', metavar='<primary group id>'
)
# The show command.
show_parser = tool_subparsers.add_parser('show', help='show account data')
show_parser.add_argument(
'name', action='append', nargs='*', help='account name'
)
show_parser.add_argument(
'--verbose', '-v', action='store_true', dest='verbose', help='be verbose about it'
)
# The edit command.
edit_parser = tool_subparsers.add_parser('edit', help='edit existing user data in the directory')
edit_parser.add_argument(
'--type', '-t', action='store', dest='account_type', metavar='<change account type>'
)
edit_parser.add_argument(
'name', action='store', help='account name'
)
group1_parser = edit_parser.add_argument_group('account specific')
group1_parser.add_argument(
'--reset-password', '-r', dest='resetPassword', action='store_true', help='<reset user\'s password>'
)
group1_parser.add_argument(
'--home', action='store', dest='homeDirectory', metavar='<new home directory path>'
)
group1_parser.add_argument(
'--shell', action='store', dest='loginShell', metavar='<new shell interpreter path>'
)
group1_parser = edit_parser.add_argument_group('personal information')
group1_parser.add_argument(
'--first-name', action='store', dest='givenName', metavar='<new first name>'
)
group1_parser.add_argument(
'--del-email', action='append', dest='delMail', metavar='<remove email address>'
)
group1_parser.add_argument(
'--last-name', action='store', dest='sn', metavar='<new last name>'
)
group1_parser.add_argument(
'--add-email', action='append', dest='addMail', metavar='<add new email address>'
)
group1_parser.add_argument(
'--del-phone-no', action='append', dest='delTelephoneNumber', metavar='<phone number to remove>'
)
group1_parser.add_argument(
'--organization', '-o', action='store', dest='o', metavar='<organization>'
)
group1_parser.add_argument(
'--add-phone-no', action='append', dest='addTelephoneNumber', metavar='<phone number to add>'
)
group1_parser.add_argument(
'--full-name', action='store', dest='cn', metavar='<new full name>'
)
group1_parser = edit_parser.add_argument_group('uid and group management')
group1_parser.add_argument(
'--del-group', action='append', dest='delgroup', metavar='<remove user from the group>'
)
group1_parser.add_argument(
'--group-id', action='store', dest='gidNumber', metavar='<change primary group ID>'
)
group1_parser.add_argument(
'--add-group', action='append', dest='addgroup', metavar='<add user to the group>'
)
group1_parser.add_argument(
'--uid-number', action='store', dest='uidNumber', metavar='<change user ID number>'
)
group1_parser.add_argument(
'--uid', action='store', dest='uid', metavar='<user\'s uid>'
)
# The retire command.
retire_parser = tool_subparsers.add_parser('retire', help='retire an existing account and remove all its privileges.')
retire_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
# The type command.
type_parser = tool_subparsers.add_parser('type', help='manage user types')
type_parser.add_argument(
'--list', '-l', action='store_true', dest='list_types', help='list user types'
)
# The remove command.
remove_parser = tool_subparsers.add_parser('remove', help='remove an existing account.')
remove_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
| 35.2 | 118 | 0.722968 |
0c605a349671fad2588ca9a0e3c2afed9c2453f5 | 6,235 | py | Python | custom_components/wisersmart/climate.py | tomtomfx/wiserSmartForHA | 9878840b073250302e583bd2f6040a825de97803 | [
"MIT"
] | 1 | 2020-10-06T19:49:59.000Z | 2020-10-06T19:49:59.000Z | custom_components/wisersmart/climate.py | tomtomfx/wiserSmartForHA | 9878840b073250302e583bd2f6040a825de97803 | [
"MIT"
] | 1 | 2020-10-06T20:18:32.000Z | 2020-10-24T19:50:53.000Z | custom_components/wisersmart/climate.py | tomtomfx/wiserSmartForHA | 9878840b073250302e583bd2f6040a825de97803 | [
"MIT"
] | 1 | 2021-04-12T16:37:40.000Z | 2021-04-12T16:37:40.000Z | """
Climate Platform Device for Wiser Smart
https://github.com/tomtomfx/wiserSmartForHA
thomas.fayoux@gmail.com
"""
import asyncio
import logging
import voluptuous as vol
from functools import partial
from ruamel.yaml import YAML as yaml
from homeassistant.components.climate import ClimateEntity
from homeassistant.core import callback
from homeassistant.components.climate.const import (
SUPPORT_TARGET_TEMPERATURE,
ATTR_CURRENT_TEMPERATURE,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (
_LOGGER,
DOMAIN,
MANUFACTURER,
ROOM,
WISER_SMART_SERVICES,
)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
""" Definition of WiserSmartRoom """
| 28.865741 | 94 | 0.645549 |
0c60917a4d7a8f1d00442aa352ab85caf9e37f11 | 4,765 | py | Python | src/dataset/utils/process_df.py | Fkaneko/kaggle_g2net_gravitational_wave_detection- | 8bb32cc675e6b56171da8a3754fffeda41e934bb | [
"Apache-2.0"
] | null | null | null | src/dataset/utils/process_df.py | Fkaneko/kaggle_g2net_gravitational_wave_detection- | 8bb32cc675e6b56171da8a3754fffeda41e934bb | [
"Apache-2.0"
] | null | null | null | src/dataset/utils/process_df.py | Fkaneko/kaggle_g2net_gravitational_wave_detection- | 8bb32cc675e6b56171da8a3754fffeda41e934bb | [
"Apache-2.0"
] | null | null | null | import os
from functools import partial
from multiprocessing import Pool
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
from tqdm import tqdm
from src.dataset.utils.waveform_preprocessings import preprocess_strain
def id_2_path(
image_id: str,
is_train: bool = True,
data_dir: str = "../input/g2net-gravitational-wave-detection",
) -> str:
"""
modify from https://www.kaggle.com/ihelon/g2net-eda-and-modeling
"""
folder = "train" if is_train else "test"
return "{}/{}/{}/{}/{}/{}.npy".format(
data_dir, folder, image_id[0], image_id[1], image_id[2], image_id
)
def get_site_metrics(
df: pd.DataFrame,
interp_psd: Optional[Callable] = None,
psds: Optional[np.ndarray] = None,
window: str = "tukey",
fs: int = 2048,
fband: List[int] = [10, 912],
psd_cache_path_suffix: Optional[str] = None,
num_workers: int = 8,
):
"""
Compute for each id the metrics for each site.
df: the complete df
modify from
https://www.kaggle.com/andradaolteanu/g2net-searching-the-sky-pytorch-effnet-w-meta
"""
func_ = partial(
get_agg_feats,
interp_psd=interp_psd,
psds=psds,
window=window,
fs=fs,
fband=fband,
psd_cache_path_suffix=psd_cache_path_suffix,
)
if num_workers > 1:
with Pool(processes=num_workers) as pool:
agg_dicts = list(
tqdm(
pool.imap(func_, df["path"].tolist()),
total=len(df),
)
)
else:
agg_dicts = []
for ID, path in tqdm(zip(df["id"].values, df["path"].values)):
# First extract the cronological info
agg_dict = func_(path=path)
agg_dicts.append(agg_dict)
agg_df = pd.DataFrame(agg_dicts)
df = pd.merge(df, agg_df, on="id")
return df
| 28.532934 | 87 | 0.570619 |
0c60c978bb3233d48fef80aac1fbd85b7650f54f | 637 | py | Python | sa/migrations/0051_managedobject_set_profile.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/migrations/0051_managedobject_set_profile.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/migrations/0051_managedobject_set_profile.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# managedobject set profile
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
| 37.470588 | 90 | 0.470958 |
0c6180591c4611e118c4ac0d8c026f5d2d7c99fa | 305 | py | Python | oct2py/compat.py | sdvillal/oct2py | f7aa89b909cbb5959ddedf3ab3e743898eac3d45 | [
"MIT"
] | 8 | 2015-10-16T23:28:16.000Z | 2020-06-19T18:49:18.000Z | oct2py/compat.py | sdvillal/oct2py | f7aa89b909cbb5959ddedf3ab3e743898eac3d45 | [
"MIT"
] | 8 | 2015-06-25T20:57:56.000Z | 2020-04-03T22:33:16.000Z | oct2py/compat.py | sdvillal/oct2py | f7aa89b909cbb5959ddedf3ab3e743898eac3d45 | [
"MIT"
] | 6 | 2015-04-21T12:23:44.000Z | 2021-10-01T00:08:47.000Z | # -*- coding: utf-8 -*-
import sys
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
if PY2:
unicode = unicode
long = long
from StringIO import StringIO
import Queue as queue
else: # pragma : no cover
unicode = str
long = int
from io import StringIO
import queue
| 16.944444 | 33 | 0.606557 |
0c61d0a37539223a22a77c96706aa91e5bab6637 | 1,563 | py | Python | lambda_functions/compute/campaign/aws.py | pierrealixt/MapCampaigner | 7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad | [
"BSD-3-Clause"
] | null | null | null | lambda_functions/compute/campaign/aws.py | pierrealixt/MapCampaigner | 7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad | [
"BSD-3-Clause"
] | 1 | 2018-07-24T13:57:03.000Z | 2018-07-24T13:57:03.000Z | lambda_functions/compute/campaign/aws.py | pierrealixt/MapCampaigner | 7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad | [
"BSD-3-Clause"
] | null | null | null | import os
import json
import boto3
| 24.809524 | 78 | 0.515035 |