hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
127d60f439a2eeaeea97213b05b97e925b002613 | 15,790 | py | Python | osprofiler/tests/unit/drivers/test_ceilometer.py | charliebr30/osprofiler | cffca4e29e373e3f09f2ffdd458761183a851569 | [
"Apache-2.0"
] | null | null | null | osprofiler/tests/unit/drivers/test_ceilometer.py | charliebr30/osprofiler | cffca4e29e373e3f09f2ffdd458761183a851569 | [
"Apache-2.0"
] | 1 | 2017-04-15T22:16:06.000Z | 2017-04-15T22:16:06.000Z | osprofiler/tests/unit/drivers/test_ceilometer.py | shwsun/osprofiler | 46d29fc5ab8a4068217e399883f39cdd443a7500 | [
"Apache-2.0"
] | 1 | 2020-02-17T09:48:43.000Z | 2020-02-17T09:48:43.000Z | # Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler.drivers.ceilometer import Ceilometer
from osprofiler.tests import test
| 37.240566 | 79 | 0.338252 |
127dce97d99e34df63ba730d1cd14233e203885a | 2,271 | py | Python | threshold.py | jiep/unicode-similarity | a32a031f96dce2b8a52a8ff4b5365c768c016fc6 | [
"MIT"
] | 1 | 2019-02-22T10:31:51.000Z | 2019-02-22T10:31:51.000Z | threshold.py | jiep/unicode-similarity | a32a031f96dce2b8a52a8ff4b5365c768c016fc6 | [
"MIT"
] | null | null | null | threshold.py | jiep/unicode-similarity | a32a031f96dce2b8a52a8ff4b5365c768c016fc6 | [
"MIT"
] | 1 | 2020-12-15T15:34:43.000Z | 2020-12-15T15:34:43.000Z | from pathlib import Path
import numpy as np
import pickle
import argparse
import errno
import sys
if __name__ == '__main__':
main()
| 28.037037 | 79 | 0.589608 |
127def7299a4b8a5f141ed18533a55c708f10769 | 1,813 | py | Python | y2019/control_loops/python/wrist.py | Ewpratten/frc_971_mirror | 3a8a0c4359f284d29547962c2b4c43d290d8065c | [
"BSD-2-Clause"
] | null | null | null | y2019/control_loops/python/wrist.py | Ewpratten/frc_971_mirror | 3a8a0c4359f284d29547962c2b4c43d290d8065c | [
"BSD-2-Clause"
] | null | null | null | y2019/control_loops/python/wrist.py | Ewpratten/frc_971_mirror | 3a8a0c4359f284d29547962c2b4c43d290d8065c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
from aos.util.trapezoid_profile import TrapezoidProfile
from frc971.control_loops.python import control_loop
from frc971.control_loops.python import angular_system
from frc971.control_loops.python import controls
import copy
import numpy
import sys
from matplotlib import pylab
import gflags
import glog
FLAGS = gflags.FLAGS
try:
gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
except gflags.DuplicateFlagError:
pass
# Wrist alone
# 0.1348
# Wrist with ball
# 0.3007
# Wrist with hatch
# 0.446
kWrist = angular_system.AngularSystemParams(
name='Wrist',
motor=control_loop.BAG(),
G=(6.0 / 60.0) * (20.0 / 100.0) * (24.0 / 84.0),
J=0.30,
q_pos=0.20,
q_vel=5.0,
kalman_q_pos=0.12,
kalman_q_vel=2.0,
kalman_q_voltage=4.0,
kalman_r_position=0.05)
kWristBall = copy.copy(kWrist)
kWristBall.J = 0.4007
kWristBall.q_pos = 0.55
kWristBall.q_vel = 5.0
kWristPanel = copy.copy(kWrist)
kWristPanel.J = 0.446
kWristModel = copy.copy(kWrist)
kWristModel.J = 0.1348
if __name__ == '__main__':
argv = FLAGS(sys.argv)
glog.init()
sys.exit(main(argv))
| 24.835616 | 87 | 0.674021 |
12810e363b2fde4bb2f563894e88d9b033fc5d56 | 2,666 | py | Python | utils/tools.py | alipay/Pyraformer | 84af4dbd93b7b96975b5034f0dde412005260123 | [
"Apache-2.0"
] | 7 | 2022-03-24T03:42:14.000Z | 2022-03-27T16:27:31.000Z | utils/tools.py | alipay/Pyraformer | 84af4dbd93b7b96975b5034f0dde412005260123 | [
"Apache-2.0"
] | 1 | 2022-03-17T08:54:42.000Z | 2022-03-17T08:54:42.000Z | utils/tools.py | alipay/Pyraformer | 84af4dbd93b7b96975b5034f0dde412005260123 | [
"Apache-2.0"
] | 1 | 2022-03-29T16:33:44.000Z | 2022-03-29T16:33:44.000Z | from torch.nn.modules import loss
import torch
import numpy as np
def AE_loss(mu, labels, ignore_zero):
if ignore_zero:
indexes = (labels != 0)
else:
indexes = (labels >= 0)
ae = torch.abs(labels[indexes] - mu[indexes])
return ae
| 28.361702 | 112 | 0.62003 |
1282bd510ec173d21c0fd86f0dd67b09824e394a | 2,772 | py | Python | .venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 115 | 2020-06-18T15:00:58.000Z | 2022-03-02T10:13:19.000Z | .venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 37 | 2020-10-20T08:30:53.000Z | 2020-12-22T13:15:45.000Z | .venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 60 | 2020-07-22T14:53:10.000Z | 2022-03-23T10:17:59.000Z | import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import TimedeltaIndex
import pandas._testing as tm
| 35.538462 | 82 | 0.544372 |
1282edeb2a30864dc3a5aa0e406d5fae2795f292 | 1,974 | py | Python | webScraping/Instagram/2a_selenium_corriere.py | PythonBiellaGroup/MaterialeSerate | 58b45ecda7b9a8a298b9ca966d2806618a277372 | [
"MIT"
] | 12 | 2021-12-12T22:19:52.000Z | 2022-03-18T11:45:17.000Z | webScraping/Instagram/2a_selenium_corriere.py | PythonGroupBiella/MaterialeLezioni | 58b45ecda7b9a8a298b9ca966d2806618a277372 | [
"MIT"
] | 1 | 2022-03-23T13:58:33.000Z | 2022-03-23T14:05:08.000Z | webScraping/Instagram/2a_selenium_corriere.py | PythonGroupBiella/MaterialeLezioni | 58b45ecda7b9a8a298b9ca966d2806618a277372 | [
"MIT"
] | 5 | 2021-11-30T19:38:41.000Z | 2022-01-30T14:50:44.000Z | # use selenium to scrape headlines from corriere.it
# pip install selenium
from re import L
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import pandas as pd
import time
import sys
HOME = "https://corriere.it"
# open Firefox
driver = webdriver.Firefox()
# navigate to corriere.it
driver.get(HOME)
# In order to extract the information that youre looking to scrape,
# you need to locate the elements XPath.
# An XPath is a syntax used for finding any element on a webpage.
# We can see the headline
#<a class="has-text-black" href="https://www.corriere.it/sport/calcio/coppa-italia/22_aprile_19/inter-milan-formazioni-news-risultato-f607f438-bfef-11ec-9f78-c9d279c21b38.shtml">Inter-Milan, doppio Lautaro e Gosens, nerazzurri in finale di Coppa Italia </a>
# --> [@class=name]
# all great but we need to sort out this coxokie pop-up
#driver.find_element_by_xpath("//*[@id='_cpmt-accept']").click()
#WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, '_cpmt-accept'))).click()
#WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div#_cpmt-buttons button#_cpmt-accept"))).click()
time.sleep(5)
# carefully look at the env, we have an iframe here
cookie_iframe = driver.find_element_by_xpath("//iframe[@id='_cpmt-iframe']")
driver.switch_to.frame(cookie_iframe)
print(cookie_iframe)
#driver.switch_to.frame(driver.find_element(By.XPATH("//iframe[@id='_cpmt-iframe']")))
button = driver.find_element_by_id("_cpmt-accept").click()
# back to the main class
driver.get(HOME)
# elements --> find_all
headlines = driver.find_elements_by_xpath('//h4[@class="title-art-hp is-medium is-line-h-106"]')
# here we get all the headlines from the corriere
# we can get the text
for headline in headlines:
print(headline.text) | 44.863636 | 258 | 0.766971 |
1283922931293c1f0272600761d089b38ea78f4b | 2,033 | py | Python | stolos/tests/test_bin.py | sailthru/stolos | 7b74da527033b2da7f3ccd6d19ed6fb0245ea0fc | [
"Apache-2.0"
] | 121 | 2015-01-20T08:58:35.000Z | 2021-08-08T15:13:11.000Z | stolos/tests/test_bin.py | sailthru/stolos | 7b74da527033b2da7f3ccd6d19ed6fb0245ea0fc | [
"Apache-2.0"
] | 3 | 2015-01-20T22:19:49.000Z | 2016-02-10T10:48:11.000Z | stolos/tests/test_bin.py | sailthru/stolos | 7b74da527033b2da7f3ccd6d19ed6fb0245ea0fc | [
"Apache-2.0"
] | 20 | 2016-02-03T17:08:31.000Z | 2021-04-19T10:43:28.000Z | import os
from subprocess import check_output, CalledProcessError
from nose import tools as nt
from stolos import queue_backend as qb
from stolos.testing_tools import (
with_setup, validate_zero_queued_task, validate_one_queued_task,
validate_n_queued_task
)
| 36.303571 | 75 | 0.713724 |
1283e6ee8cf196eb827ab2c20c8605ca98bca840 | 12,442 | py | Python | senlin/tests/unit/engine/actions/test_create.py | chenyb4/senlin | 8b9ec31566890dc9989fe08e221172d37c0451b4 | [
"Apache-2.0"
] | null | null | null | senlin/tests/unit/engine/actions/test_create.py | chenyb4/senlin | 8b9ec31566890dc9989fe08e221172d37c0451b4 | [
"Apache-2.0"
] | null | null | null | senlin/tests/unit/engine/actions/test_create.py | chenyb4/senlin | 8b9ec31566890dc9989fe08e221172d37c0451b4 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from senlin.common import consts
from senlin.engine.actions import base as ab
from senlin.engine.actions import cluster_action as ca
from senlin.engine import cluster as cm
from senlin.engine import dispatcher
from senlin.engine import node as nm
from senlin.objects import action as ao
from senlin.objects import cluster as co
from senlin.objects import dependency as dobj
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
| 43.201389 | 79 | 0.60987 |
12848f59193336131bb837186f98da6abb8ba010 | 1,665 | py | Python | tests/test_api.py | bh-chaker/wetterdienst | b0d51bb4c7392eb47834e4978e26882d74b22e35 | [
"MIT"
] | 155 | 2020-07-03T05:09:22.000Z | 2022-03-28T06:57:39.000Z | tests/test_api.py | bh-chaker/wetterdienst | b0d51bb4c7392eb47834e4978e26882d74b22e35 | [
"MIT"
] | 453 | 2020-07-02T21:21:52.000Z | 2022-03-31T21:35:36.000Z | tests/test_api.py | bh-chaker/wetterdienst | b0d51bb4c7392eb47834e4978e26882d74b22e35 | [
"MIT"
] | 21 | 2020-09-07T12:13:27.000Z | 2022-03-26T16:26:09.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import pytest
from wetterdienst import Wetterdienst
| 26.015625 | 79 | 0.587988 |
128572fd0692d7bc47b673410cce38c578481632 | 5,803 | py | Python | examples/sentence_embedding/task_sentence_embedding_sbert_unsupervised_TSDAE.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | 49 | 2022-03-15T07:28:16.000Z | 2022-03-31T07:16:15.000Z | examples/sentence_embedding/task_sentence_embedding_sbert_unsupervised_TSDAE.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | null | null | null | examples/sentence_embedding/task_sentence_embedding_sbert_unsupervised_TSDAE.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | null | null | null | #! -*- coding:utf-8 -*-
# -pretrain, devsts-b
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from sklearn.metrics.pairwise import paired_cosine_distances
from scipy.stats import pearsonr, spearmanr
import copy
import random
import numpy as np
random.seed(2022)
np.random.seed(2002)
maxlen = 256
batch_size = 8
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#
tokenizer = Tokenizer(dict_path, do_lower_case=True)
#
train_data = get_data('F:/Projects/data/corpus/pretrain/film/film.txt')
train_dataloader = DataLoader(ListDataset(data=train_data), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
from task_sentence_embedding_sbert_sts_b__CosineSimilarityLoss import valid_dataloader
# bert
model = Model().to(device)
# lossoptimizer
model.compile(
loss=nn.CrossEntropyLoss(ignore_index=0),
optimizer=optim.Adam(model.parameters(), lr=2e-5), #
)
#
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader,
epochs=20,
steps_per_epoch=100,
callbacks=[evaluator]
)
else:
model.load_weights('best_model.pt')
| 37.681818 | 196 | 0.689988 |
12867ea275e82f412c64f544501dc211d18fb6b3 | 2,761 | py | Python | crowd_anki/export/anki_exporter_wrapper.py | katrinleinweber/CrowdAnki | c78d837e082365d69bde5b1361b1dd4d11cd3d63 | [
"MIT"
] | 391 | 2016-08-31T21:55:07.000Z | 2022-03-30T16:30:12.000Z | crowd_anki/export/anki_exporter_wrapper.py | katrinleinweber/CrowdAnki | c78d837e082365d69bde5b1361b1dd4d11cd3d63 | [
"MIT"
] | 150 | 2016-09-01T00:35:35.000Z | 2022-03-30T23:26:48.000Z | crowd_anki/export/anki_exporter_wrapper.py | katrinleinweber/CrowdAnki | c78d837e082365d69bde5b1361b1dd4d11cd3d63 | [
"MIT"
] | 51 | 2016-09-04T17:02:39.000Z | 2022-02-04T11:49:10.000Z | from pathlib import Path
from .anki_exporter import AnkiJsonExporter
from ..anki.adapters.anki_deck import AnkiDeck
from ..config.config_settings import ConfigSettings
from ..utils import constants
from ..utils.notifier import AnkiModalNotifier, Notifier
from ..utils.disambiguate_uuids import disambiguate_note_model_uuids
EXPORT_FAILED_TITLE = "Export failed"
| 40.014493 | 139 | 0.680913 |
1286fbd5f6c9f344c50efdbd092dd4dcc7eb7bc9 | 1,086 | py | Python | shadow/apis/item.py | f1uzz/shadow | 0c2a1308f8bbe77ce4be005153148aac8ea0b4b2 | [
"MIT"
] | 1 | 2020-09-10T22:31:54.000Z | 2020-09-10T22:31:54.000Z | shadow/apis/item.py | f1uzz/shadow | 0c2a1308f8bbe77ce4be005153148aac8ea0b4b2 | [
"MIT"
] | 1 | 2020-03-12T15:47:14.000Z | 2020-09-11T18:46:44.000Z | shadow/apis/item.py | f1uzz/shadow | 0c2a1308f8bbe77ce4be005153148aac8ea0b4b2 | [
"MIT"
] | null | null | null | from functools import lru_cache
from typing import Optional
import requests
from .patches import Patches
| 25.255814 | 107 | 0.598527 |
128751ef3f270c09dd8bfd854209616c9fbc00a9 | 2,694 | py | Python | tests/test_lmdb_eager.py | rjpower/tensorflow-io | 39aa0b46cfaa403121fdddbd491a03d2f3190a87 | [
"Apache-2.0"
] | null | null | null | tests/test_lmdb_eager.py | rjpower/tensorflow-io | 39aa0b46cfaa403121fdddbd491a03d2f3190a87 | [
"Apache-2.0"
] | null | null | null | tests/test_lmdb_eager.py | rjpower/tensorflow-io | 39aa0b46cfaa403121fdddbd491a03d2f3190a87 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LMDBDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
import tensorflow_io.lmdb as lmdb_io # pylint: disable=wrong-import-position
def test_lmdb_read_from_file():
"""test_read_from_file"""
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_lmdb", "data.mdb")
tmp_path = tempfile.mkdtemp()
filename = os.path.join(tmp_path, "data.mdb")
shutil.copy(path, filename)
num_repeats = 2
lmdb_dataset = lmdb_io.LMDBDataset([filename]).repeat(num_repeats)
ii = 0
for vv in lmdb_dataset:
i = ii % 10
k, v = vv
assert k.numpy() == str(i).encode()
assert v.numpy() == str(chr(ord("a") + i)).encode()
ii += 1
shutil.rmtree(tmp_path)
def test_lmdb_read_from_file_with_batch():
"""test_read_from_file"""
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_lmdb", "data.mdb")
tmp_path = tempfile.mkdtemp()
filename = os.path.join(tmp_path, "data.mdb")
shutil.copy(path, filename)
lmdb_dataset = lmdb_io.LMDBDataset([filename], batch=3)
i = 0
for vv in lmdb_dataset:
k, v = vv
if i < 9:
assert np.alltrue(k.numpy() == [
str(i).encode(),
str(i + 1).encode(),
str(i + 2).encode()])
assert np.alltrue(v.numpy() == [
str(chr(ord("a") + i)).encode(),
str(chr(ord("a") + i + 1)).encode(),
str(chr(ord("a") + i + 2)).encode()])
else:
assert k.numpy() == str(9).encode()
assert v.numpy() == str('j').encode()
i += 3
shutil.rmtree(tmp_path)
if __name__ == "__main__":
test.main()
| 33.259259 | 80 | 0.655902 |
128792253fac3bfe35e8e9d68865a244469d6f80 | 5,211 | py | Python | recbole/quick_start/quick_start.py | RuihongQiu/DuoRec | 4ebc30d8b7d9465f854867887b127a0bbc38bc31 | [
"MIT"
] | 16 | 2021-11-03T02:12:49.000Z | 2022-03-27T05:48:19.000Z | recbole/quick_start/quick_start.py | RuihongQiu/DuoRec | 4ebc30d8b7d9465f854867887b127a0bbc38bc31 | [
"MIT"
] | 2 | 2021-11-21T14:12:25.000Z | 2022-03-11T03:00:04.000Z | recbole/quick_start/quick_start.py | RuihongQiu/DuoRec | 4ebc30d8b7d9465f854867887b127a0bbc38bc31 | [
"MIT"
] | 4 | 2021-11-25T09:23:41.000Z | 2022-03-26T11:23:26.000Z | # @Time : 2020/10/6
# @Author : Shanlei Mu
# @Email : slmu@ruc.edu.cn
"""
recbole.quick_start
########################
"""
import logging
from logging import getLogger
from recbole.config import Config
from recbole.data import create_dataset, data_preparation
from recbole.utils import init_logger, get_model, get_trainer, init_seed
from recbole.utils.utils import set_color
def run_recbole(model=None, dataset=None, config_file_list=None, config_dict=None, saved=True):
r""" A fast running api, which includes the complete process of
training and testing a model on a specified dataset
Args:
model (str): model name
dataset (str): dataset name
config_file_list (list): config files used to modify experiment parameters
config_dict (dict): parameters dictionary used to modify experiment parameters
saved (bool): whether to save the model
"""
# configurations initialization
config = Config(model=model, dataset=dataset, config_file_list=config_file_list, config_dict=config_dict)
# init_seed(config['seed'], config['reproducibility'])
# logger initialization
init_logger(config)
logger = getLogger()
import os
log_dir = os.path.dirname(logger.handlers[0].baseFilename)
config['log_dir'] = log_dir
logger.info(config)
# dataset filtering
dataset = create_dataset(config)
logger.info(dataset)
# dataset splitting
train_data, valid_data, test_data = data_preparation(config, dataset)
# model loading and initialization
model = get_model(config['model'])(config, train_data).to(config['device'])
logger.info(model)
# trainer loading and initialization
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
# model training
best_valid_score, best_valid_result = trainer.fit(
train_data, valid_data, saved=saved, show_progress=config['show_progress']
)
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
embedding_matrix = model.item_embedding.weight[1:].cpu().detach().numpy()
svd = TruncatedSVD(n_components=2)
svd.fit(embedding_matrix)
comp_tr = np.transpose(svd.components_)
proj = np.dot(embedding_matrix, comp_tr)
cnt = {}
for i in dataset['item_id']:
if i.item() in cnt:
cnt[i.item()] += 1
else:
cnt[i.item()] = 1
freq = np.zeros(embedding_matrix.shape[0])
for i in cnt:
freq[i-1] = cnt[i]
# freq /= freq.max()
sns.set(style='darkgrid')
sns.set_context("notebook", font_scale=1.8, rc={"lines.linewidth": 3, 'lines.markersize': 20})
plt.figure(figsize=(6, 4.5))
plt.scatter(proj[:, 0], proj[:, 1], s=1, c=freq, cmap='viridis_r')
plt.colorbar()
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.axis('square')
# plt.show()
plt.savefig(log_dir + '/' + config['model'] + '-' + config['dataset'] + '.pdf', format='pdf', transparent=False, bbox_inches='tight')
from scipy.linalg import svdvals
svs = svdvals(embedding_matrix)
svs /= svs.max()
np.save(log_dir + '/sv.npy', svs)
sns.set(style='darkgrid')
sns.set_context("notebook", font_scale=1.8, rc={"lines.linewidth": 3, 'lines.markersize': 20})
plt.figure(figsize=(6, 4.5))
plt.plot(svs)
# plt.show()
plt.savefig(log_dir + '/svs.pdf', format='pdf', transparent=False, bbox_inches='tight')
# model evaluation
test_result = trainer.evaluate(test_data, load_best_model=saved, show_progress=config['show_progress'])
logger.info(set_color('best valid ', 'yellow') + f': {best_valid_result}')
logger.info(set_color('test result', 'yellow') + f': {test_result}')
return {
'best_valid_score': best_valid_score,
'valid_score_bigger': config['valid_metric_bigger'],
'best_valid_result': best_valid_result,
'test_result': test_result
}
def objective_function(config_dict=None, config_file_list=None, saved=True):
r""" The default objective_function used in HyperTuning
Args:
config_dict (dict): parameters dictionary used to modify experiment parameters
config_file_list (list): config files used to modify experiment parameters
saved (bool): whether to save the model
"""
config = Config(config_dict=config_dict, config_file_list=config_file_list)
init_seed(config['seed'], config['reproducibility'])
logging.basicConfig(level=logging.ERROR)
dataset = create_dataset(config)
train_data, valid_data, test_data = data_preparation(config, dataset)
model = get_model(config['model'])(config, train_data).to(config['device'])
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
best_valid_score, best_valid_result = trainer.fit(train_data, valid_data, verbose=False, saved=saved)
test_result = trainer.evaluate(test_data, load_best_model=saved)
return {
'best_valid_score': best_valid_score,
'valid_score_bigger': config['valid_metric_bigger'],
'best_valid_result': best_valid_result,
'test_result': test_result
}
| 35.209459 | 137 | 0.682978 |
1287e0c57eb8a30f8e6d4ada3266d63abc50f722 | 4,947 | py | Python | inferlo/generic/inference/bucket_renormalization.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 1 | 2022-01-27T18:44:07.000Z | 2022-01-27T18:44:07.000Z | inferlo/generic/inference/bucket_renormalization.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 3 | 2022-01-23T18:02:30.000Z | 2022-01-27T23:10:51.000Z | inferlo/generic/inference/bucket_renormalization.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 1 | 2021-09-03T06:12:57.000Z | 2021-09-03T06:12:57.000Z | # Copyright (c) The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE.
import warnings
import numpy as np
from sklearn.utils.extmath import randomized_svd
from .bucket_elimination import BucketElimination
from .factor import Factor, default_factor_name, product_over_
from .graphical_model import GraphicalModel
from .mini_bucket_elimination import MiniBucketElimination
| 41.571429 | 80 | 0.595512 |
1287eefddb9d27db413d1feaac4d915eb6887055 | 5,519 | py | Python | oldcode/guestbook111013.py | mdreid/dinkylink | 34370633c9361f6625227440d4aca6ed2b57bfab | [
"MIT"
] | 1 | 2015-05-06T20:07:36.000Z | 2015-05-06T20:07:36.000Z | oldcode/guestbook111013.py | mdreid/dinkylink | 34370633c9361f6625227440d4aca6ed2b57bfab | [
"MIT"
] | null | null | null | oldcode/guestbook111013.py | mdreid/dinkylink | 34370633c9361f6625227440d4aca6ed2b57bfab | [
"MIT"
] | null | null | null | import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
from sys import argv
import datetime
import pickle
import sys
sys.path.insert(0, 'libs')
import BeautifulSoup
from bs4 import BeautifulSoup
import requests
import json
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape', 'jinja2.ext.loopcontrols'],
autoescape=True)
url = 'http://www.njtransit.com/sf/sf_servlet.srv?hdnPageAction=TrainSchedulesFrom'
pu_code = "124_PRIN"
ny_code = "105_BNTN"
prs = "Princeton"
nyp = "New York Penn Station"
# get date
today = datetime.date.today()
str_date = today.__format__("%m/%d/%Y")
# trip info
toNY_dict = {'selOrigin': pu_code, 'selDestination': ny_code, 'datepicker': str_date, 'OriginDescription': prs, 'DestDescription': nyp}
toPU_dict = {'selOrigin': ny_code, 'selDestination': pu_code, 'datepicker': str_date, 'OriginDescription': nyp, 'DestDescription': prs}
# get to webpage with data for the day
with requests.Session() as re:
toNY = re.post(url, data=toNY_dict)
toPU = re.post(url, data=toPU_dict)
toPUhtml = toPU.text
toNYhtml = toNY.text
#Reads in html file and name of destination and outputs csv file with comma spliced file of train information
#Create csv files for to Princeton and to New York
toPUDict = scrape(toPUhtml, 'PU')
toNYDict = scrape(toNYhtml, 'NY')
globalPUDict = {}
application = webapp2.WSGIApplication([
('/', MainPage),
('/toNY', ToNY),
('/toPU', ToPU),
('/test', Test123),
], debug=True)
| 31.901734 | 181 | 0.698315 |
1289c37f5bf5c6f565d40cc79d0b3cb7b6862bc0 | 4,482 | py | Python | is_core/tests/crawler.py | zzuzzy/django-is-core | 3f87ec56a814738683c732dce5f07e0328c2300d | [
"BSD-3-Clause"
] | null | null | null | is_core/tests/crawler.py | zzuzzy/django-is-core | 3f87ec56a814738683c732dce5f07e0328c2300d | [
"BSD-3-Clause"
] | null | null | null | is_core/tests/crawler.py | zzuzzy/django-is-core | 3f87ec56a814738683c732dce5f07e0328c2300d | [
"BSD-3-Clause"
] | null | null | null | import json
from django.utils.encoding import force_text
from germanium.tools import assert_true, assert_not_equal
from germanium.test_cases.client import ClientTestCase
from germanium.decorators import login
from germanium.crawler import Crawler, LinkExtractor, HtmlLinkExtractor as OriginalHtmlLinkExtractor
| 34.744186 | 118 | 0.594378 |
1289e9a1e3edba91a08623829d6f72757cbc5c8d | 136 | py | Python | example/geometry/admin.py | emelianovss-yandex-praktikum/07_pyplus_django_2 | 09bda00f9c8e9fd1ff0f3a483ecb210041d19a48 | [
"MIT"
] | null | null | null | example/geometry/admin.py | emelianovss-yandex-praktikum/07_pyplus_django_2 | 09bda00f9c8e9fd1ff0f3a483ecb210041d19a48 | [
"MIT"
] | null | null | null | example/geometry/admin.py | emelianovss-yandex-praktikum/07_pyplus_django_2 | 09bda00f9c8e9fd1ff0f3a483ecb210041d19a48 | [
"MIT"
] | 2 | 2021-11-27T08:06:35.000Z | 2021-11-27T13:52:41.000Z | from django.contrib import admin
from geometry.models import Shape
| 17 | 35 | 0.772059 |
128a2d7a634e13b30d2d38fc5ac9815e890ebcfe | 943 | py | Python | demo2/demo2_consume2.py | YuYanzy/kafka-python-demo | fc01ac29230b41fe1821f6e5a9d7226dea9688fe | [
"Apache-2.0"
] | 3 | 2021-05-07T01:48:37.000Z | 2021-09-24T20:53:51.000Z | demo2/demo2_consume2.py | YuYanzy/kafka-python-demo | fc01ac29230b41fe1821f6e5a9d7226dea9688fe | [
"Apache-2.0"
] | null | null | null | demo2/demo2_consume2.py | YuYanzy/kafka-python-demo | fc01ac29230b41fe1821f6e5a9d7226dea9688fe | [
"Apache-2.0"
] | 1 | 2021-05-08T08:46:01.000Z | 2021-05-08T08:46:01.000Z | # -*- coding: utf-8 -*-
# @Author : Ecohnoch(xcy)
# @File : demo2_consume.py
# @Function : TODO
import kafka
demo2_config = {
'kafka_host': 'localhost:9092',
'kafka_topic': 'demo2',
'kafka_group_id': 'demo2_group1'
}
if __name__ == '__main__':
consume()
| 29.46875 | 118 | 0.604454 |
128a56c54e5b4a6dbabdff93bd337ad93578a5cd | 2,280 | py | Python | autoscalingsim/scaling/scaling_model/scaling_model.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | 6 | 2021-03-10T16:23:10.000Z | 2022-01-14T04:57:46.000Z | autoscalingsim/scaling/scaling_model/scaling_model.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | null | null | null | autoscalingsim/scaling/scaling_model/scaling_model.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | 1 | 2022-01-14T04:57:55.000Z | 2022-01-14T04:57:55.000Z | import json
import pandas as pd
from .application_scaling_model import ApplicationScalingModel
from .platform_scaling_model import PlatformScalingModel
from autoscalingsim.deltarepr.group_of_services_delta import GroupOfServicesDelta
from autoscalingsim.deltarepr.node_group_delta import NodeGroupDelta
from autoscalingsim.utils.error_check import ErrorChecker
| 43.018868 | 124 | 0.744298 |
128b3b5e8ee085ddcb7d0e7d01778d05032f8030 | 1,662 | py | Python | src/zojax/filefield/copy.py | Zojax/zojax.filefield | 36d92242dffbd5a7b4ce3c6886d8d5898067245a | [
"ZPL-2.1"
] | null | null | null | src/zojax/filefield/copy.py | Zojax/zojax.filefield | 36d92242dffbd5a7b4ce3c6886d8d5898067245a | [
"ZPL-2.1"
] | null | null | null | src/zojax/filefield/copy.py | Zojax/zojax.filefield | 36d92242dffbd5a7b4ce3c6886d8d5898067245a | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import component, interface
from zc.copy.interfaces import ICopyHook
from data import File, Image
from interfaces import IFile, IImage
| 29.157895 | 78 | 0.642599 |
128cfb0881a4cb2a09e645ca55b7c92a498aaab7 | 192 | py | Python | verbose.py | lowrey/myjsonstore | 4d47f147fa5d86bea5d4e9b0bcab567583a794af | [
"MIT"
] | 1 | 2018-07-30T14:17:25.000Z | 2018-07-30T14:17:25.000Z | verbose.py | lowrey/myjsonstore | 4d47f147fa5d86bea5d4e9b0bcab567583a794af | [
"MIT"
] | null | null | null | verbose.py | lowrey/myjsonstore | 4d47f147fa5d86bea5d4e9b0bcab567583a794af | [
"MIT"
] | null | null | null | import sys
verbose = False
| 10.105263 | 27 | 0.583333 |
128d0ee6d357971754e6aa9345f8db462e223612 | 1,087 | py | Python | app/component_b/command/services.py | mirevsky/django-grpc-cqrs-kafka-template | 31af0bf5d15e393837f937cace90f82a7de26355 | [
"MIT"
] | 2 | 2022-01-10T19:52:36.000Z | 2022-03-19T07:34:54.000Z | app/component_b/command/services.py | mirevsky/django-grpc-cqrs-kafka-template | 31af0bf5d15e393837f937cace90f82a7de26355 | [
"MIT"
] | null | null | null | app/component_b/command/services.py | mirevsky/django-grpc-cqrs-kafka-template | 31af0bf5d15e393837f937cace90f82a7de26355 | [
"MIT"
] | null | null | null | import grpc
from google.protobuf import empty_pb2
from django_grpc_framework.services import Service
from component_b.common.serializers import PersonProtoSerializer
from component_b.common.models import PersonModel
| 31.970588 | 84 | 0.706532 |
128d2e658f8131c779045c3cbeaae1830ec9ef68 | 485 | py | Python | Lab 5/course_reader.py | kq4hy/CS3240-Lab-Files | 2611c3185a405da95547434825da9052cd4c6cec | [
"MIT"
] | null | null | null | Lab 5/course_reader.py | kq4hy/CS3240-Lab-Files | 2611c3185a405da95547434825da9052cd4c6cec | [
"MIT"
] | null | null | null | Lab 5/course_reader.py | kq4hy/CS3240-Lab-Files | 2611c3185a405da95547434825da9052cd4c6cec | [
"MIT"
] | null | null | null | __author__ = 'kq4hy'
import csv
import sqlite3
load_course_database('course1.db', 'seas-courses-5years.csv') | 28.529412 | 78 | 0.610309 |
128e53da4b600437f498e3a40b34bc75e174bc07 | 117 | py | Python | marshmallow_helpers/__init__.py | hilearn/marsh-enum | 2003ed850b076cd9d29a340ee44abe1c73aadc66 | [
"MIT"
] | null | null | null | marshmallow_helpers/__init__.py | hilearn/marsh-enum | 2003ed850b076cd9d29a340ee44abe1c73aadc66 | [
"MIT"
] | null | null | null | marshmallow_helpers/__init__.py | hilearn/marsh-enum | 2003ed850b076cd9d29a340ee44abe1c73aadc66 | [
"MIT"
] | null | null | null | from .enum_field import EnumField, RegisteredEnum # noqa
from .marsh_schema import attr_with_schema, derive # noqa
| 39 | 58 | 0.811966 |
128e7777e186dad8ff8ca443386abd102aa7f54e | 1,492 | py | Python | Weather Station using DHT Sensor with Raspberry Pi and ThingSpeak Platform/Weather Station - ThingSpeak - Raspberry Pi.py | MeqdadDev/ai-robotics-cv-iot-mini-projects | 0c591bc495c95aa95d436e51f38e55bf510349ac | [
"MIT"
] | null | null | null | Weather Station using DHT Sensor with Raspberry Pi and ThingSpeak Platform/Weather Station - ThingSpeak - Raspberry Pi.py | MeqdadDev/ai-robotics-cv-iot-mini-projects | 0c591bc495c95aa95d436e51f38e55bf510349ac | [
"MIT"
] | null | null | null | Weather Station using DHT Sensor with Raspberry Pi and ThingSpeak Platform/Weather Station - ThingSpeak - Raspberry Pi.py | MeqdadDev/ai-robotics-cv-iot-mini-projects | 0c591bc495c95aa95d436e51f38e55bf510349ac | [
"MIT"
] | 1 | 2022-03-29T07:41:23.000Z | 2022-03-29T07:41:23.000Z | '''
IoT Mini Project
Weather Station using DHT Sensor and Raspberry Pi with ThingSpeak Platform
Code Sample: Interfacing DHT22 with Raspberry Pi and sending the data to an IoT Platform (ThingSpeak Platform)
'''
from time import sleep
# import Adafruit_DHT # Not supported library
import adafruit_dht
from board import *
import requests
# After creating your account on ThingSpeak platform, put your channel id below
channel_id = 12345
write_key = 'WriteYourKeyAsString.......' # Put your write key here
# D4 = GPIO4 / D17 = GPIO17 ...etc.
SENSOR_PIN = D17
params = {'key': write_key, 'field1': temp, 'field2': humidity}
res = requests.get(url, params=params)
if __name__ == "__main__":
while True:
# 15 seconds is the minimum time for the free account on ThingSpeak
sleep(15)
try:
temperature, humidity = get_measurements()
except:
print("Error: Can't get the sensor values, check out your wiring connection.")
try:
sendData(temperature, humidity)
except:
print("Error: Can't push the sensor values to ThingSpeak server.")
| 29.84 | 110 | 0.690349 |
128e873ecfed93a46701bf97c5bfb7c6ee49fa55 | 931 | py | Python | Demo2_PageObjectModel/features/steps/PageObject_Registration.py | imademethink/imademethink_python_selenium_demo | cc364bda00e75eb9115c680ddea5e2fbca1d7acb | [
"BSD-4-Clause"
] | 2 | 2019-04-05T05:09:14.000Z | 2020-07-21T16:06:53.000Z | Demo2_PageObjectModel/features/steps/PageObject_Registration.py | imademethink/Python_Selenium_Demo | cc364bda00e75eb9115c680ddea5e2fbca1d7acb | [
"BSD-4-Clause"
] | 1 | 2020-01-08T08:15:42.000Z | 2020-01-08T08:15:42.000Z | Demo2_PageObjectModel/features/steps/PageObject_Registration.py | imademethink/Python_Selenium_Demo | cc364bda00e75eb9115c680ddea5e2fbca1d7acb | [
"BSD-4-Clause"
] | 4 | 2018-04-13T08:28:53.000Z | 2018-12-30T20:35:19.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
from page_objects import PageObject, PageElement
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
delay_min = 3 # sec
delay_medium = 5 # sec
delay_max = 9 # sec
| 35.807692 | 128 | 0.784103 |
128eba5345a78af068fb819342cfe180d8d296fd | 53 | py | Python | Tests/TestData/HOSimulation/HOTrialWavefunction/config.py | McCoyGroup/RynLib | 8d7e119ebbd3da4c8b0efb49facba9ff1cbaa09d | [
"MIT"
] | 1 | 2019-05-04T00:34:11.000Z | 2019-05-04T00:34:11.000Z | Tests/TestData/HOSimulation/HOTrialWavefunction/config.py | McCoyGroup/RynLib | 8d7e119ebbd3da4c8b0efb49facba9ff1cbaa09d | [
"MIT"
] | null | null | null | Tests/TestData/HOSimulation/HOTrialWavefunction/config.py | McCoyGroup/RynLib | 8d7e119ebbd3da4c8b0efb49facba9ff1cbaa09d | [
"MIT"
] | 1 | 2020-03-04T22:47:09.000Z | 2020-03-04T22:47:09.000Z |
config = dict(
module="HOTrialWavefunction.py"
) | 13.25 | 35 | 0.698113 |
128f728bec79cfe03c54bf8f06695117449e7c5a | 5,771 | py | Python | python/ucloud/import_data.py | oldthreefeng/miscellany | 8d3c7a14b53929d752c7356c85ae6681000cd526 | [
"MIT"
] | 1 | 2019-01-04T07:44:08.000Z | 2019-01-04T07:44:08.000Z | python/ucloud/import_data.py | oldthreefeng/miscellany | 8d3c7a14b53929d752c7356c85ae6681000cd526 | [
"MIT"
] | null | null | null | python/ucloud/import_data.py | oldthreefeng/miscellany | 8d3c7a14b53929d752c7356c85ae6681000cd526 | [
"MIT"
] | 2 | 2018-12-10T12:55:38.000Z | 2019-01-04T07:43:55.000Z | #!/usr/bin/python2
import sys
import os
import redis
import time
import datetime
string_keys = []
hash_keys = []
list_keys = []
set_keys = []
zset_keys = []
if __name__ == '__main__':
config = {
"source": ['10.4.1.91:0', '10.4.13.124:0', '10.4.12.16:0', '10.4.2.250:0'],
"dest": ['127.0.0.1:11', '127.0.0.1:12', '127.0.0.1:2', '127.0.0.1:1']
}
start = datetime.datetime.now()
for group in zip(config["source"], config["dest"]):
print group
SrcIP = group[0].split(':')[0]
SrcPort = 6379
DstIP = group[1].split(':')[0]
DstPort = 6379
DstDB = group[1].split(':')[1]
source = redis.Redis(host=SrcIP, port=SrcPort)
dest = redis.Redis(host=DstIP, port=DstPort, db=DstDB)
print "Begin Read Keys"
read_type_keys(source)
print "String Key Count is:", len(string_keys)
print "Set Key Count is:", len(set_keys)
print "ZSet Key Count is:", len(zset_keys)
print "List Key Count is:", len(list_keys)
print "Hash Key Count is:", len(hash_keys)
import_string(source, dest)
import_hash(source, dest)
import_list(source, dest)
import_set(source, dest)
import_zset(source, dest)
stop = datetime.datetime.now()
diff = stop - start
print "Finish, token time:", str(diff)
| 30.21466 | 83 | 0.562468 |
128ffa30d0305f7d87c64ef11d99dcfb6d3e311f | 5,990 | py | Python | kinlin/core/strategy.py | the-lay/kinlin | ce7c95d46d130049e356104ba77fad51bc59fb3f | [
"MIT"
] | null | null | null | kinlin/core/strategy.py | the-lay/kinlin | ce7c95d46d130049e356104ba77fad51bc59fb3f | [
"MIT"
] | null | null | null | kinlin/core/strategy.py | the-lay/kinlin | ce7c95d46d130049e356104ba77fad51bc59fb3f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
from enum import Enum
from typing import List, Callable, Any
from tqdm import tqdm
from .model import Model
from .dataset import Dataset
from .experiment import Experiment
from .callback import Callback
| 40.748299 | 119 | 0.645576 |
1290da62e7e73de3c4c75ef861a9d5a9bcbe1f4b | 2,924 | py | Python | tests/test_utils.py | jamesmcclain/pystac | 993b54f5a10b0d55db18dbda81c5ad7acc06d921 | [
"Apache-2.0"
] | 1 | 2018-08-04T05:24:58.000Z | 2018-08-04T05:24:58.000Z | tests/test_utils.py | jamesmcclain/pystac | 993b54f5a10b0d55db18dbda81c5ad7acc06d921 | [
"Apache-2.0"
] | 4 | 2017-12-11T22:15:44.000Z | 2018-06-15T15:20:34.000Z | tests/test_utils.py | jamesmcclain/pystac | 993b54f5a10b0d55db18dbda81c5ad7acc06d921 | [
"Apache-2.0"
] | 5 | 2018-06-15T14:51:50.000Z | 2019-08-22T05:33:55.000Z | import unittest
from pystac.utils import (make_relative_href, make_absolute_href,
is_absolute_href)
| 46.412698 | 77 | 0.548906 |
1290db3be5d147e6281013adc1419767bcf94d89 | 1,322 | py | Python | services/web/manage.py | EMBEDDIA/ULR_NER_REST | 520accbced155a43543969f8a0a96a02c0b2d46d | [
"MIT"
] | null | null | null | services/web/manage.py | EMBEDDIA/ULR_NER_REST | 520accbced155a43543969f8a0a96a02c0b2d46d | [
"MIT"
] | 3 | 2020-04-24T11:38:40.000Z | 2021-12-03T09:01:17.000Z | services/web/manage.py | EMBEDDIA/ULR_NER_REST | 520accbced155a43543969f8a0a96a02c0b2d46d | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Michael Herman
# Copyright (c) 2020 Vid Podpean
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from flask.cli import FlaskGroup
from flask_cors import CORS
from project import flask_app
CORS(flask_app)
cli = FlaskGroup(flask_app)
if __name__ == "__main__":
cli()
#flask_app.run(debug=True)
| 45.586207 | 96 | 0.781392 |
12916103d8a5f146e7baa8906defb115aac95a11 | 5,737 | py | Python | GUI/PopUps/ExportPopUp.py | iagerogiannis/Image_to_plot | 15c01c50dcd23dfd187069145b3f2fdc06ed73a9 | [
"BSD-3-Clause"
] | null | null | null | GUI/PopUps/ExportPopUp.py | iagerogiannis/Image_to_plot | 15c01c50dcd23dfd187069145b3f2fdc06ed73a9 | [
"BSD-3-Clause"
] | null | null | null | GUI/PopUps/ExportPopUp.py | iagerogiannis/Image_to_plot | 15c01c50dcd23dfd187069145b3f2fdc06ed73a9 | [
"BSD-3-Clause"
] | null | null | null | from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton
from GUI.CustomWidgets.PathFileLineEdit import PathFileLineEdit
from GUI.CustomWidgets.InputField import InputField
| 40.401408 | 134 | 0.656092 |
1291ab8aed0db6cb7b1e8e05e5e25b1e6da39aea | 7,993 | py | Python | cwltool/update.py | PlatformedTasks/PLAS-cwl-tes | 5e66a5f9309906d1e8caa0f7148b8517a17f840d | [
"Apache-2.0"
] | null | null | null | cwltool/update.py | PlatformedTasks/PLAS-cwl-tes | 5e66a5f9309906d1e8caa0f7148b8517a17f840d | [
"Apache-2.0"
] | null | null | null | cwltool/update.py | PlatformedTasks/PLAS-cwl-tes | 5e66a5f9309906d1e8caa0f7148b8517a17f840d | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import copy
import re
from typing import (Any, Callable, Dict, List, MutableMapping, MutableSequence,
Optional, Tuple, Union)
from functools import partial
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad import validate
from schema_salad.ref_resolver import Loader # pylint: disable=unused-import
from six import string_types
from six.moves import urllib
from typing_extensions import Text
from schema_salad.sourceline import SourceLine
from .loghandler import _logger
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from .utils import visit_class, visit_field, aslist
def v1_0to1_1(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Text]
"""Public updater for v1.0 to v1.1."""
doc = copy.deepcopy(doc)
rewrite = {
"http://commonwl.org/cwltool#WorkReuse": "WorkReuse",
"http://arvados.org/cwl#ReuseRequirement": "WorkReuse",
"http://commonwl.org/cwltool#TimeLimit": "ToolTimeLimit",
"http://commonwl.org/cwltool#NetworkAccess": "NetworkAccess",
"http://commonwl.org/cwltool#InplaceUpdateRequirement": "InplaceUpdateRequirement",
"http://commonwl.org/cwltool#LoadListingRequirement": "LoadListingRequirement"
}
visit_class(doc, ("CommandLineTool","Workflow"), rewrite_requirements)
visit_class(doc, ("ExpressionTool","Workflow"), fix_inputBinding)
visit_field(doc, "secondaryFiles", partial(update_secondaryFiles, top=True))
upd = doc
if isinstance(upd, MutableMapping) and "$graph" in upd:
upd = upd["$graph"]
for proc in aslist(upd):
proc.setdefault("hints", CommentedSeq())
proc["hints"].insert(0, CommentedMap([("class", "NetworkAccess"),( "networkAccess", True)]))
proc["hints"].insert(0, CommentedMap([("class", "LoadListingRequirement"),("loadListing", "deep_listing")]))
if "cwlVersion" in proc:
del proc["cwlVersion"]
return (doc, "v1.1")
UPDATES = {
u"v1.0": v1_0to1_1,
u"v1.1": None
} # type: Dict[Text, Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]]
DEVUPDATES = {
u"v1.0": v1_0to1_1,
u"v1.1.0-dev1": v1_1_0dev1to1_1,
u"v1.1": None
} # type: Dict[Text, Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]]
ALLUPDATES = UPDATES.copy()
ALLUPDATES.update(DEVUPDATES)
INTERNAL_VERSION = u"v1.1"
def identity(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Union[Text, Text]]
"""Default, do-nothing, CWL document upgrade function."""
return (doc, doc["cwlVersion"])
def checkversion(doc, # type: Union[CommentedSeq, CommentedMap]
metadata, # type: CommentedMap
enable_dev # type: bool
):
# type: (...) -> Tuple[CommentedMap, Text]
"""Check the validity of the version of the give CWL document.
Returns the document and the validated version string.
"""
cdoc = None # type: Optional[CommentedMap]
if isinstance(doc, CommentedSeq):
if not isinstance(metadata, CommentedMap):
raise Exception("Expected metadata to be CommentedMap")
lc = metadata.lc
metadata = copy.deepcopy(metadata)
metadata.lc.data = copy.copy(lc.data)
metadata.lc.filename = lc.filename
metadata[u"$graph"] = doc
cdoc = metadata
elif isinstance(doc, CommentedMap):
cdoc = doc
else:
raise Exception("Expected CommentedMap or CommentedSeq")
version = metadata[u"cwlVersion"]
cdoc["cwlVersion"] = version
if version not in UPDATES:
if version in DEVUPDATES:
if enable_dev:
pass
else:
keys = list(UPDATES.keys())
keys.sort()
raise validate.ValidationException(
u"Version '%s' is a development or deprecated version.\n "
"Update your document to a stable version (%s) or use "
"--enable-dev to enable support for development and "
"deprecated versions." % (version, ", ".join(keys)))
else:
raise validate.ValidationException(
u"Unrecognized version %s" % version)
return (cdoc, version)
| 39.181373 | 116 | 0.600025 |
129258b78096fc56ca7d44ecd92404b8c97448a2 | 2,072 | py | Python | plottify/plottify.py | neutrinoceros/plottify | 21f4858dabe1228559a8beb385f134ccfb25321e | [
"MIT"
] | null | null | null | plottify/plottify.py | neutrinoceros/plottify | 21f4858dabe1228559a8beb385f134ccfb25321e | [
"MIT"
] | null | null | null | plottify/plottify.py | neutrinoceros/plottify | 21f4858dabe1228559a8beb385f134ccfb25321e | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib import collections
from matplotlib.lines import Line2D
if __name__ == "__main__":
import numpy as np
from plottify import autosize
import matplotlib.pyplot as plt
n = 100
x = np.random.uniform(low=-5, high=5, size=n)
y = x + np.random.normal(scale=0.5, size=n)
for size in [3, 10, 20]:
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Default")
plt.show()
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Autosized")
autosize()
plt.show()
| 26.227848 | 87 | 0.598456 |
12928ccd7dc4a56b7be40e6eb4668aed89dd266b | 8,546 | py | Python | ocular_algorithm/0x04_BasicRecurrenceAndRecursion.py | DistinctWind/ManimProjects | 6318643afcc24574cbd9a0a45ff0d913d4711b13 | [
"MIT"
] | 2 | 2020-03-15T01:27:09.000Z | 2020-03-20T02:08:09.000Z | ocular_algorithm/0x04_BasicRecurrenceAndRecursion.py | DistinctWind/ManimProjects | 6318643afcc24574cbd9a0a45ff0d913d4711b13 | [
"MIT"
] | null | null | null | ocular_algorithm/0x04_BasicRecurrenceAndRecursion.py | DistinctWind/ManimProjects | 6318643afcc24574cbd9a0a45ff0d913d4711b13 | [
"MIT"
] | null | null | null | from re import S
from manimlib import *
import sys
import os
from tqdm.std import tqdm
sys.path.append(os.getcwd())
from utils.imports import *
| 32.371212 | 121 | 0.589165 |
1292ffb60fd870f5e14b52506ec687c6761bed39 | 299 | py | Python | utility.py | Ming-desu/POKEMING | 2def3b47e7c08b71885f14944bffe105a63cc12a | [
"MIT"
] | null | null | null | utility.py | Ming-desu/POKEMING | 2def3b47e7c08b71885f14944bffe105a63cc12a | [
"MIT"
] | null | null | null | utility.py | Ming-desu/POKEMING | 2def3b47e7c08b71885f14944bffe105a63cc12a | [
"MIT"
] | null | null | null | # POKEMING - GON'NA CATCH 'EM ALL
# -- A simple hack 'n slash game in console
# -- This class is handles all utility related things
| 37.375 | 55 | 0.665552 |
12932a6f23a6e9331d41a53f62dfc3d9f6482d92 | 2,057 | py | Python | gpv2/data/lessons/mil.py | michalsr/gpv2 | 00a22b311dbaeefb04e1df676eb6ae3373d8d4b5 | [
"Apache-2.0"
] | null | null | null | gpv2/data/lessons/mil.py | michalsr/gpv2 | 00a22b311dbaeefb04e1df676eb6ae3373d8d4b5 | [
"Apache-2.0"
] | null | null | null | gpv2/data/lessons/mil.py | michalsr/gpv2 | 00a22b311dbaeefb04e1df676eb6ae3373d8d4b5 | [
"Apache-2.0"
] | null | null | null | import logging
import sys
from typing import Union, Optional, Dict, Any, List
from dataclasses import dataclass, replace
from exp.ours import file_paths
from exp.ours.boosting import MaskSpec
from exp.ours.data.dataset import Dataset, Task
from exp.ours.data.gpv_example import GPVExample
from exp.ours.models.model import PredictionArg
from os.path import join, exists
from exp.ours.util.py_utils import int_to_str
from utils.io import load_json_object, dump_json_object
import numpy as np
ID_LIST = set([0])
LAST_ID = 0
def _intern(x):
if x is None:
return None
return sys.intern(x)
def load_mil(split):
#file = join(file_paths.WEBQA_DIR, split + "_image_info.json")
#file = file_paths.IMAGECONTRAST_DIR+'/train_large_2.json'
#file = '/data/michal5/gpv/text_contrast/train_large.json'
if split == 'small':
file = '/data/michal5/gpv/lessons/mil_small.json'
else:
file = '/data/michal5/gpv/lessons/mil_train.json'
#file = '/data/michal5/gpv/lessons/mil_small.json'
logging.info(f"Loading mil data from {file}")
raw_instances = load_json_object(file)
out = []
for i, x in enumerate(raw_instances):
if isinstance(x["image"], dict):
image_id = x["image"]["image_id"]
else:
image_id = x["image"]
ex = MILExample(gpv_id=x['gpv_id'],image_id=image_id,answer=x['answer'],
query=x['query'],correct_answer=x['correct'],rel_query=x['rel_query']
)
out.append(ex)
return out
| 21.206186 | 76 | 0.701507 |
12932d615b9cdc4848ccdf491cf3ec6f30e667d0 | 6,968 | py | Python | creel_portal/api/filters/FN024_Filter.py | AdamCottrill/CreelPortal | 5ec867c4f11b4231c112e8209116b6b96c2830ec | [
"MIT"
] | null | null | null | creel_portal/api/filters/FN024_Filter.py | AdamCottrill/CreelPortal | 5ec867c4f11b4231c112e8209116b6b96c2830ec | [
"MIT"
] | null | null | null | creel_portal/api/filters/FN024_Filter.py | AdamCottrill/CreelPortal | 5ec867c4f11b4231c112e8209116b6b96c2830ec | [
"MIT"
] | null | null | null | import django_filters
from ...models import FN024
from .filter_utils import NumberInFilter, ValueInFilter
| 33.180952 | 87 | 0.695896 |
1295c606d9e77831f602309b8cf0e51374c22061 | 7,148 | py | Python | modules/utils.py | PaulLerner/deep_parkinson_handwriting | 806f34eaa6c5dde2a8230a07615c69e0873c0535 | [
"MIT"
] | 2 | 2021-01-19T02:47:32.000Z | 2021-05-20T08:29:36.000Z | modules/utils.py | PaulLerner/deep_parkinson_handwriting | 806f34eaa6c5dde2a8230a07615c69e0873c0535 | [
"MIT"
] | null | null | null | modules/utils.py | PaulLerner/deep_parkinson_handwriting | 806f34eaa6c5dde2a8230a07615c69e0873c0535 | [
"MIT"
] | 2 | 2021-01-23T18:20:19.000Z | 2021-08-09T03:53:32.000Z | import numpy as np
from time import time
import matplotlib.pyplot as plt
measure2index={"y-coordinate":0,"x-coordinate":1,"timestamp":2, "button_status":3,"tilt":4, "elevation":5,"pressure":6}
index2measure=list(measure2index.keys())
task2index={"spiral":0,"l":1,"le":2 ,"les":3,"lektorka" :4,"porovnat":5,"nepopadnout":6, "tram":7}
index2task=list(task2index.keys())
max_lengths=[16071, 4226, 6615, 6827, 7993, 5783, 4423, 7676]#max length per task
token_lengths=[16071,1242,1649,1956]#max length per token
stroke_lengths=[16071,752,1104,1476,3568,2057,2267,1231]#max length per stroke (either on paper or in air)
stroke_avg_plus_std=[2904,277,363,411,484,346,324,218]#stroke avg length + stroke avg length std
max_strokes=[25,15,15,21,29,43,35, 67]#max n of strokes per task (in air + on paper)
plot2index={"loss":0,"accuracy":1}
index2plot= list(plot2index.keys())
on_paper_value=1.0#on_paper_stroke iff button_status==1.0
one_hot=np.identity(8)
def get_significance(p):
"""used to print significance of a statistic test given p-value)"""
if p<0.01:
significance="***"
elif p<0.05:
significance="**"
elif p<0.1:
significance="*"
else:
significance="_"
return significance
def CorrectPool(out_size,current_pool):
"""makes convolved size divisible by pooling kernel"""
ratio=out_size/current_pool
if (ratio)%1==0:#whole number
return int(current_pool)
else:
whole_ratio=round(ratio)
if whole_ratio==0:
whole_ratio+=1
return int(out_size/whole_ratio)
def CorrectHyperparameters(input_size,seq_len,hidden_size,conv_kernel,pool_kernel ,padding=0,
stride=1,dilation=1, dropout=0.0,output_size=1,n_seq=1):
"""makes convolved size divisible by pooling kernel and computes size of sequence after convolutions"""
out_size=seq_len
print("seq_len :",out_size)
for i, (h,c,p,pad,d) in enumerate(list(zip(hidden_size,conv_kernel,pool_kernel,padding,dilation))):
print("layer",i+1)
in_size=out_size
out_size=get_out_size(out_size,pad,d,c,stride=1)
print("\tafter conv{} :{}".format(i+1,out_size))
if out_size<1:
c=(in_size-1)//d+1
out_size=get_out_size(in_size,pad,d,c,stride=1)
print("\t\tupdate c. after conv{} :{}".format(i+1,out_size))
conv_kernel[i]=c
pool_kernel[i]=CorrectPool(out_size,p)
out_size=get_out_size(out_size,padding=0,dilation=1,kernel_size=pool_kernel[i],stride=pool_kernel[i])
print("\tafter pool{} :{}".format(i+1,out_size))
out_size*=hidden_size[-1]
print("after flatting",out_size)
return input_size,out_size,hidden_size,conv_kernel,pool_kernel ,padding,stride,dilation, dropout,output_size
def get_out_size(in_size,padding,dilation,kernel_size,stride):
"""computes output size after a conv or a pool layer"""
return (in_size+2*padding-dilation*(kernel_size-1)-1)//stride +1
def count_params(model):
"""returns (total n of parameters, n of trainable parameters)"""
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_params, trainable_params
def ReshapeAndVote(model_train_predictions,round_before_voting=True):
"""used to fuse the predictions of n_models models after n_CV CV"""
n_CV=len(model_train_predictions[0])
n_models=len(model_train_predictions)
if round_before_voting:
reshaped_train_predictions=[[np.around(model_train_predictions[i][j]) for i in range(n_models)] for j in range(n_CV)]
else:
reshaped_train_predictions=[[model_train_predictions[i][j] for i in range(n_models)] for j in range(n_CV)]
voted_train_predictions=[np.around(np.mean(reshaped_train_predictions[i],axis=0)) for i in range(n_CV)]
return voted_train_predictions
| 42.047059 | 133 | 0.663123 |
1296326732d0f3f0616b1b674348b31dbce55859 | 574 | py | Python | Mundo2/Desafio039.py | Marcoakira/Desafios_Python_do_Curso_Guanabara | c49b774148a2232f8f3c21b83e3dc97610480757 | [
"MIT"
] | null | null | null | Mundo2/Desafio039.py | Marcoakira/Desafios_Python_do_Curso_Guanabara | c49b774148a2232f8f3c21b83e3dc97610480757 | [
"MIT"
] | null | null | null | Mundo2/Desafio039.py | Marcoakira/Desafios_Python_do_Curso_Guanabara | c49b774148a2232f8f3c21b83e3dc97610480757 | [
"MIT"
] | null | null | null | import datetime
datenasc = int(input(f'insert you date of bit '))
atualdate = str(datetime.date.today())[0:4]
datestr = int(atualdate)
datefinal = datestr - datenasc
print(datefinal)
if datefinal < 18:
print(f'voce esta com {datefinal}Faltam {18-datefinal} pra voc se alistar ao exercito hahahah' )
elif datefinal == 18:
print(f'Voc completa 18 anos agora em {atualdate}'
f'Chegou a hora ser servir seu pas como bucha de canho otario.\nPegue seus documentos ')
else:
print(f'Voc escapou sabicho, ja esta com {datefinal}, se livrou n safadenho')
| 41 | 101 | 0.728223 |
1296680de0a376242d8b5859461295d893d5f13c | 4,180 | py | Python | local_test/test_pullparser.py | rmoskal/e-springpad | d2c1dfbae63a29737d9cfdee571704b7a5e85bd5 | [
"MIT"
] | 1 | 2017-01-10T17:12:25.000Z | 2017-01-10T17:12:25.000Z | local_test/test_pullparser.py | rmoskal/e-springpad | d2c1dfbae63a29737d9cfdee571704b7a5e85bd5 | [
"MIT"
] | null | null | null | local_test/test_pullparser.py | rmoskal/e-springpad | d2c1dfbae63a29737d9cfdee571704b7a5e85bd5 | [
"MIT"
] | null | null | null | __author__ = 'rob'
import unittest
import logging
import evernotebookparser
from xml.etree import ElementTree
import re
| 32.403101 | 157 | 0.570335 |
1296f3adb86af7c4bde450922af6cd40c775ef6d | 6,872 | py | Python | test/test_sysroot_compiler.py | prajakta-gokhale/cross_compile | cbdc94ed5b25d6fc336aa5c0faa2838d9ce61db4 | [
"Apache-2.0"
] | null | null | null | test/test_sysroot_compiler.py | prajakta-gokhale/cross_compile | cbdc94ed5b25d6fc336aa5c0faa2838d9ce61db4 | [
"Apache-2.0"
] | null | null | null | test/test_sysroot_compiler.py | prajakta-gokhale/cross_compile | cbdc94ed5b25d6fc336aa5c0faa2838d9ce61db4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the `create_cc_sysroot.py` script."""
import getpass
from pathlib import Path
from typing import Tuple
from cross_compile.sysroot_compiler import DockerConfig
from cross_compile.sysroot_compiler import Platform
from cross_compile.sysroot_compiler import QEMU_DIR_NAME
from cross_compile.sysroot_compiler import ROS_DOCKERFILE_NAME
from cross_compile.sysroot_compiler import SYSROOT_DIR_NAME
from cross_compile.sysroot_compiler import SysrootCompiler
import pytest
def setup_mock_sysroot(path: Path) -> Tuple[Path, Path]:
"""Create mock directories to correctly construct the SysrootCreator."""
sysroot_dir = path / SYSROOT_DIR_NAME
sysroot_dir.mkdir()
ros_workspace_dir = sysroot_dir / 'ros_ws'
ros_workspace_dir.mkdir()
qemu_dir = sysroot_dir / QEMU_DIR_NAME
qemu_dir.mkdir()
qemu_binary_mock = qemu_dir / 'qemu'
qemu_binary_mock.ensure()
docker_ws_dir = sysroot_dir / ROS_DOCKERFILE_NAME
docker_ws_dir.ensure()
return sysroot_dir, ros_workspace_dir
def test_get_workspace_image_tag(platform_config):
"""Make sure the image tag is created correctly."""
image_tag = platform_config.get_workspace_image_tag()
test_tag = '{}/{}:latest'.format(getpass.getuser(), str(platform_config))
assert isinstance(image_tag, str)
assert image_tag == test_tag
def test_docker_config_args(docker_config):
"""Make sure the Docker configuration is setup correctly."""
args = _default_docker_kwargs()
test_config_string = (
'Base Image: {}\n'
'Network Mode: {}\n'
'Caching: {}'
).format(
args['sysroot_base_image'], args['docker_network_mode'], args['sysroot_nocache']
)
config_string = str(docker_config)
assert isinstance(config_string, str)
assert config_string == test_config_string
def test_sysroot_compiler_constructor(
platform_config, docker_config, tmpdir):
"""Test the SysrootCompiler constructor assuming valid path setup."""
# Create mock directories and files
sysroot_dir, ros_workspace_dir = setup_mock_sysroot(tmpdir)
sysroot_compiler = SysrootCompiler(
str(tmpdir), 'ros_ws', platform_config,
docker_config, None)
assert isinstance(sysroot_compiler.get_build_setup_script_path(), Path)
assert isinstance(sysroot_compiler.get_system_setup_script_path(), Path)
def test_sysroot_compiler_tree_validation(platform_config, docker_config, tmpdir):
"""
Ensure that the SysrootCompiler constructor validates the workspace.
Start with empty directory and add one piece at a time, expecting failures until
all parts are present.
"""
kwargs = {
'cc_root_dir': str(tmpdir),
'ros_workspace_dir': 'ros_ws',
'platform': platform_config,
'docker_config': docker_config,
'custom_setup_script_path': None,
}
# There's no 'sysroot' at all yet
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
sysroot_dir = tmpdir / SYSROOT_DIR_NAME
sysroot_dir.mkdir()
# ROS2 ws and qemu dirs are missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
ros_workspace_dir = sysroot_dir / 'ros_ws'
ros_workspace_dir.mkdir()
# qemu dirs are missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
qemu_dir = sysroot_dir / QEMU_DIR_NAME
qemu_dir.mkdir()
# the qemu binary is still missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
qemu_binary_mock = qemu_dir / 'qemu'
qemu_binary_mock.ensure()
# everything is present now
compiler = SysrootCompiler(**kwargs)
assert compiler
def verify_base_docker_images(arch, os, rosdistro, image_name):
"""Assert correct base image is generated."""
sysroot_base_image = None
docker_network_mode = 'host'
sysroot_nocache = 'False'
assert DockerConfig(
arch, os, rosdistro, sysroot_base_image,
docker_network_mode, sysroot_nocache).base_image == image_name
def test_get_docker_base_image():
"""Test that the correct base docker image is used for all arguments."""
verify_base_docker_images('aarch64', 'ubuntu', 'dashing', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'ubuntu', 'eloquent', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'ubuntu', 'kinetic', 'arm64v8/ubuntu:xenial')
verify_base_docker_images('aarch64', 'ubuntu', 'melodic', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'debian', 'dashing', 'arm64v8/debian:stretch')
verify_base_docker_images('aarch64', 'debian', 'eloquent', 'arm64v8/debian:buster')
verify_base_docker_images('aarch64', 'debian', 'kinetic', 'arm64v8/debian:jessie')
verify_base_docker_images('aarch64', 'debian', 'melodic', 'arm64v8/debian:stretch')
verify_base_docker_images('armhf', 'ubuntu', 'dashing', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'ubuntu', 'eloquent', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'ubuntu', 'kinetic', 'arm32v7/ubuntu:xenial')
verify_base_docker_images('armhf', 'ubuntu', 'melodic', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'debian', 'dashing', 'arm32v7/debian:stretch')
verify_base_docker_images('armhf', 'debian', 'eloquent', 'arm32v7/debian:buster')
verify_base_docker_images('armhf', 'debian', 'kinetic', 'arm32v7/debian:jessie')
verify_base_docker_images('armhf', 'debian', 'melodic', 'arm32v7/debian:stretch')
| 37.551913 | 88 | 0.721478 |
1297e5fb738245835e074daab17948395423d0ba | 2,083 | py | Python | estimate.py | farr/galmassproxy | f4a1c7acc19d130a6f57030bceef03c993a7170c | [
"MIT"
] | null | null | null | estimate.py | farr/galmassproxy | f4a1c7acc19d130a6f57030bceef03c993a7170c | [
"MIT"
] | null | null | null | estimate.py | farr/galmassproxy | f4a1c7acc19d130a6f57030bceef03c993a7170c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
r"""estimate.py
Use to estimate masses based on observed proxy values (and associated
errors) from a pre-calibrated generative model for the mass-proxy
relationship. The estimates will be returned as samples (fair draws)
from the model's posterior on the mass given the proxy observation.
This program expects the proxy data in a file with at least 'proxy'
and 'dp' column headers, followed by observed proxy values and
relative errors in those columns:
proxy dp
p1 dp1
...
The output will have one row for each proxy measurement, with one
column for each draw from the mass posterior for that system:
m1_draw m1_draw ...
m2_draw m2_draw ...
...
"""
import argparse
import bz2
import numpy as np
import os.path as op
import pickle
import posterior as pos
import plotutils.runner as pr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--caldir', metavar='DIR', required=True, help='directory with calibration data')
parser.add_argument('--proxyfile', metavar='FILE', required=True, help='proxy observations')
parser.add_argument('--output', metavar='FILE', default='masses.dat.bz2', help='mass posterior draws')
args = parser.parse_args()
runner = pr.load_runner(args.caldir)
with bz2.BZ2File(op.join(args.caldir, 'logpost.pkl.bz2'), 'r') as inp:
logpost = pickle.load(inp)
flatchain = runner.thin_chain[:,-16:,:].reshape((-1, runner.chain.shape[2]))
data = np.genfromtxt(args.proxyfile, names=True)
ms = []
for log_p, dp in zip(np.log(data['proxy']), data['dp']):
mdraws = []
for p in flatchain:
((log_m, log_p_est), (var_log_m, var_log_p)) = \
logpost.mass_proxy_estimate(p, log_p, dp)
mdraws.append(np.exp(np.random.normal(loc=log_m, scale=np.sqrt(var_log_m))))
ms.append(mdraws)
ms = np.array(ms)
fname = args.output
fbase, fext = op.splitext(fname)
if not (fext == '.bz2'):
fname = fname + '.bz2'
with bz2.BZ2File(fname, 'w') as out:
np.savetxt(out, ms)
| 30.632353 | 106 | 0.683629 |
129824738bfae0f0fbd02b667cf74972ac9ca42e | 143 | py | Python | scripts/python/printings.py | samk-ai/cmd-tools-course-materials | fa3615df7ae70bbc701661bdeef588cbbf17be97 | [
"MIT"
] | null | null | null | scripts/python/printings.py | samk-ai/cmd-tools-course-materials | fa3615df7ae70bbc701661bdeef588cbbf17be97 | [
"MIT"
] | null | null | null | scripts/python/printings.py | samk-ai/cmd-tools-course-materials | fa3615df7ae70bbc701661bdeef588cbbf17be97 | [
"MIT"
] | null | null | null | str1 = "Python"
str2 = "Python"
print("\nMemory location of str1 =", hex(id(str1)))
print("Memory location of str2 =", hex(id(str2)))
print() | 23.833333 | 51 | 0.657343 |
12990c8712d2523d8e2f0753d7b1faee0bbfa287 | 353 | py | Python | plots_lib/architecture_config.py | cmimprota/ASL-SIFT | e6e489e9cc06746e2ab8cd11193fc9fc0112e5df | [
"Zlib"
] | 1 | 2021-12-30T14:59:43.000Z | 2021-12-30T14:59:43.000Z | plots_lib/architecture_config.py | cmimprota/ASL-SIFT | e6e489e9cc06746e2ab8cd11193fc9fc0112e5df | [
"Zlib"
] | null | null | null | plots_lib/architecture_config.py | cmimprota/ASL-SIFT | e6e489e9cc06746e2ab8cd11193fc9fc0112e5df | [
"Zlib"
] | 1 | 2021-04-12T11:13:32.000Z | 2021-04-12T11:13:32.000Z | config = dict()
config['fixed_cpu_frequency'] = "@ 3700 MHz"
config['frequency'] = 3.7e9
config['maxflops_sisd'] = 2
config['maxflops_sisd_fma'] = 4
config['maxflops_simd'] = 16
config['maxflops_simd_fma'] = 32
config['roofline_beta'] = 64 # According to WikiChip (Skylake)
config['figure_size'] = (20,9)
config['save_folder'] = '../all_plots/' | 29.416667 | 69 | 0.691218 |
129b2012dab2f92bc6a116945f46ccc5481200f2 | 562 | py | Python | telemetry_f1_2021/generate_dataset.py | jasperan/f1-telemetry-oracle | 5b2d7efac265539931849863655a5f92d86c75a8 | [
"MIT"
] | 4 | 2022-02-21T16:36:09.000Z | 2022-03-28T06:50:54.000Z | telemetry_f1_2021/generate_dataset.py | jasperan/f1-telemetry-oracle | 5b2d7efac265539931849863655a5f92d86c75a8 | [
"MIT"
] | null | null | null | telemetry_f1_2021/generate_dataset.py | jasperan/f1-telemetry-oracle | 5b2d7efac265539931849863655a5f92d86c75a8 | [
"MIT"
] | 2 | 2022-02-17T19:25:04.000Z | 2022-02-23T04:16:16.000Z | import cx_Oracle
from oracledb import OracleJSONDatabaseConnection
import json
jsondb = OracleJSONDatabaseConnection()
connection = jsondb.get_connection()
connection.autocommit = True
soda = connection.getSodaDatabase()
x_collection = soda.createCollection('f1_2021_weather')
all_data = list()
for doc in x_collection.find().getCursor():
content = doc.getContent()
all_data.append(content)
print('Data length: {}'.format(len(all_data)))
with open("weather.json", 'w') as outfile:
outfile.write(json.dumps(all_data, indent=4))
outfile.close() | 24.434783 | 55 | 0.765125 |
129b447d8e3a2e21029c717a45661b4dd2311adc | 8,257 | py | Python | UserPage.py | muath22/BookStore | db5b30e540de311931b234e71937ace3db9750c8 | [
"MIT"
] | 9 | 2018-09-13T10:43:34.000Z | 2021-05-05T08:51:52.000Z | UserPage.py | muath22/BookStore | db5b30e540de311931b234e71937ace3db9750c8 | [
"MIT"
] | 4 | 2018-09-13T10:09:32.000Z | 2021-03-20T00:03:10.000Z | UserPage.py | muath22/BookStore | db5b30e540de311931b234e71937ace3db9750c8 | [
"MIT"
] | 5 | 2020-02-26T13:54:03.000Z | 2021-01-06T09:38:56.000Z | from Tkinter import *
import ttk
import BuyBook
import BookInformationPage
import Message
| 32.128405 | 84 | 0.592588 |
129b4ea5990948782bef80ca4f25a0a104636e5b | 775 | py | Python | migrations/versions/1b57e397deea_initial_migration.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | migrations/versions/1b57e397deea_initial_migration.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | migrations/versions/1b57e397deea_initial_migration.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | """initial migration
Revision ID: 1b57e397deea
Revises:
Create Date: 2021-12-20 20:57:14.696646
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1b57e397deea'
down_revision = None
branch_labels = None
depends_on = None
| 25 | 70 | 0.676129 |
129b54403eb231e9102fbf7abe8cda7f3996ce5b | 5,596 | py | Python | app/utility/base_planning_svc.py | scottctaylor12/caldera | 4e81aaaf0ed592232a0474dda36ea2fd505da0de | [
"Apache-2.0"
] | null | null | null | app/utility/base_planning_svc.py | scottctaylor12/caldera | 4e81aaaf0ed592232a0474dda36ea2fd505da0de | [
"Apache-2.0"
] | null | null | null | app/utility/base_planning_svc.py | scottctaylor12/caldera | 4e81aaaf0ed592232a0474dda36ea2fd505da0de | [
"Apache-2.0"
] | null | null | null | import copy
import itertools
import re
from base64 import b64decode
from app.utility.base_service import BaseService
from app.utility.rule import RuleSet
| 38.593103 | 103 | 0.606862 |
129c2cba3840cfd8f3de73d2239ee04d334e5bc9 | 215 | py | Python | pyclid/__init__.py | Kaundur/pyclid | c59865fed9120b76cba6e41a84653256ac3072ee | [
"MIT"
] | 2 | 2019-02-12T11:31:04.000Z | 2021-12-31T10:39:01.000Z | pyclid/__init__.py | Kaundur/pyclid | c59865fed9120b76cba6e41a84653256ac3072ee | [
"MIT"
] | null | null | null | pyclid/__init__.py | Kaundur/pyclid | c59865fed9120b76cba6e41a84653256ac3072ee | [
"MIT"
] | null | null | null | import math
from pyclid.vector import *
from pyclid.matrix import *
from pyclid.quaternion import *
#from pyclid.vector import vector
#from pyclid.quaternion import quaternion
#from pyclid.matrix import matrix
| 16.538462 | 41 | 0.8 |
129c738a3288c017144786e45c751a99bdb4acea | 2,939 | py | Python | tools/gen_histograms.py | mistajuliax/pbrt-v3-IILE | afda605d92517d2396e494d81465ead22d0c25e1 | [
"BSD-2-Clause"
] | 16 | 2018-10-12T15:29:22.000Z | 2022-03-16T11:24:10.000Z | tools/gen_histograms.py | mistajuliax/pbrt-v3-IILE | afda605d92517d2396e494d81465ead22d0c25e1 | [
"BSD-2-Clause"
] | 16 | 2018-02-02T11:49:36.000Z | 2018-04-21T09:07:08.000Z | tools/gen_histograms.py | giuliojiang/pbrt-v3-IISPT | b9be01096293ab0f50b14b9043556c93ff9e07ec | [
"BSD-2-Clause"
] | 2 | 2018-12-12T08:49:43.000Z | 2019-12-03T12:20:04.000Z | import os
rootdir = os.path.abspath(os.path.join(__file__, "..", ".."))
mldir = os.path.join(rootdir, "ml")
import sys
sys.path.append(mldir)
import pfm
import iispt_transforms
import math
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
# =============================================================================
# Conf
NUM_BUCKETS = 100
INPUTDIR = "/home/gj/git/pbrt-v3-IISPT-dataset-indirect/breakfast"
SELECTOR = "p"
GAMMA_VALUE = 1.8
NORMALIZATION_INTENSITY = 3.807115077972
# =============================================================================
# Script
flist = []
for f in os.listdir(INPUTDIR):
fpath = os.path.join(INPUTDIR, f)
if f.startswith(SELECTOR) and f.endswith(".pfm"):
flist.append(fpath)
# Generate histogram for raw data
standard_imgs = []
for fpath in flist:
standard_imgs.append(pfm.load(fpath))
histogram(standard_imgs, "Raw intensity")
# Generate histogram after log transform
log_imgs = []
for fpath in flist:
img = pfm.load(fpath)
img.map(iispt_transforms.LogTransform())
log_imgs.append(img)
histogram(log_imgs, "Log transform")
# GEnerate histogram after log + gamma transform
lg_imgs = []
for fpath in flist:
img = pfm.load(fpath)
img.normalize_log_gamma(NORMALIZATION_INTENSITY, GAMMA_VALUE)
lg_imgs.append(img)
histogram(lg_imgs, "Log + Gamma transform") | 25.780702 | 79 | 0.555971 |
129ced52ad5bddf6d93136148de2d32cf2de02ec | 4,762 | py | Python | crownstone_uart/core/uart/UartBridge.py | RicArch97/crownstone-lib-python-uart | c0aaf1415936e5e622aa6395fdac4f88ebcf82bf | [
"MIT"
] | null | null | null | crownstone_uart/core/uart/UartBridge.py | RicArch97/crownstone-lib-python-uart | c0aaf1415936e5e622aa6395fdac4f88ebcf82bf | [
"MIT"
] | null | null | null | crownstone_uart/core/uart/UartBridge.py | RicArch97/crownstone-lib-python-uart | c0aaf1415936e5e622aa6395fdac4f88ebcf82bf | [
"MIT"
] | null | null | null | import logging
import sys
import threading
import serial
import serial.tools.list_ports
from crownstone_uart.Constants import UART_READ_TIMEOUT, UART_WRITE_TIMEOUT
from crownstone_uart.core.UartEventBus import UartEventBus
from crownstone_uart.core.uart.UartParser import UartParser
from crownstone_uart.core.uart.UartReadBuffer import UartReadBuffer
from crownstone_uart.topics.SystemTopics import SystemTopics
_LOGGER = logging.getLogger(__name__)
| 40.355932 | 144 | 0.640277 |
129d3359e74cfc680cc1a6d1b0edd803c1383270 | 20,753 | py | Python | data-batch-treatment/test_agg_script/locations.py | coder-baymax/taxi-poc-aws | 4be8021873ee6b58b2dba5a5d41df12cdd3b67fc | [
"MIT"
] | null | null | null | data-batch-treatment/test_agg_script/locations.py | coder-baymax/taxi-poc-aws | 4be8021873ee6b58b2dba5a5d41df12cdd3b67fc | [
"MIT"
] | null | null | null | data-batch-treatment/test_agg_script/locations.py | coder-baymax/taxi-poc-aws | 4be8021873ee6b58b2dba5a5d41df12cdd3b67fc | [
"MIT"
] | null | null | null |
Locations = [
Location(1, "EWR", "Newark Airport", 40.6895314, -74.1744624),
Location(2, "Queens", "Jamaica Bay", 40.6056632, -73.8713099),
Location(3, "Bronx", "Allerton/Pelham Gardens", 40.8627726, -73.84343919999999),
Location(4, "Manhattan", "Alphabet City", 40.7258428, -73.9774916),
Location(5, "Staten Island", "Arden Heights", 40.556413, -74.1735044),
Location(6, "Staten Island", "Arrochar/Fort Wadsworth", 40.6012117, -74.0579185),
Location(7, "Queens", "Astoria", 40.7643574, -73.92346189999999),
Location(8, "Queens", "Astoria Park", 40.7785364, -73.92283359999999),
Location(9, "Queens", "Auburndale", 40.7577672, -73.78339609999999),
Location(10, "Queens", "Baisley Park", 40.6737751, -73.786025),
Location(11, "Brooklyn", "Bath Beach", 40.6038852, -74.0062078),
Location(12, "Manhattan", "Battery Park", 40.703141, -74.0159996),
Location(13, "Manhattan", "Battery Park City", 40.7115786, -74.0158441),
Location(14, "Brooklyn", "Bay Ridge", 40.6263732, -74.0298767),
Location(15, "Queens", "Bay Terrace/Fort Totten", 40.7920899, -73.7760996),
Location(16, "Queens", "Bayside", 40.7585569, -73.7654367),
Location(17, "Brooklyn", "Bedford", 40.6872176, -73.9417735),
Location(18, "Bronx", "Bedford Park", 40.8700999, -73.8856912),
Location(19, "Queens", "Bellerose", 40.7361769, -73.7137365),
Location(20, "Bronx", "Belmont", 40.8534507, -73.88936819999999),
Location(21, "Brooklyn", "Bensonhurst East", 40.6139307, -73.9921833),
Location(22, "Brooklyn", "Bensonhurst West", 40.6139307, -73.9921833),
Location(23, "Staten Island", "Bloomfield/Emerson Hill", 40.6074525, -74.0963115),
Location(24, "Manhattan", "Bloomingdale", 40.7988958, -73.9697795),
Location(25, "Brooklyn", "Boerum Hill", 40.6848689, -73.9844722),
Location(26, "Brooklyn", "Borough Park", 40.6350319, -73.9921028),
Location(27, "Queens", "Breezy Point/Fort Tilden/Riis Beach", 40.5597687, -73.88761509999999),
Location(28, "Queens", "Briarwood/Jamaica Hills", 40.7109315, -73.81356099999999),
Location(29, "Brooklyn", "Brighton Beach", 40.5780706, -73.9596565),
Location(30, "Queens", "Broad Channel", 40.6158335, -73.8213213),
Location(31, "Bronx", "Bronx Park", 40.8608544, -73.8706278),
Location(32, "Bronx", "Bronxdale", 40.8474697, -73.8599132),
Location(33, "Brooklyn", "Brooklyn Heights", 40.6959294, -73.9955523),
Location(34, "Brooklyn", "Brooklyn Navy Yard", 40.7025634, -73.9697795),
Location(35, "Brooklyn", "Brownsville", 40.665214, -73.9125304),
Location(36, "Brooklyn", "Bushwick North", 40.6957755, -73.9170604),
Location(37, "Brooklyn", "Bushwick South", 40.7043655, -73.9383476),
Location(38, "Queens", "Cambria Heights", 40.692158, -73.7330753),
Location(39, "Brooklyn", "Canarsie", 40.6402325, -73.9060579),
Location(40, "Brooklyn", "Carroll Gardens", 40.6795331, -73.9991637),
Location(41, "Manhattan", "Central Harlem", 40.8089419, -73.9482305),
Location(42, "Manhattan", "Central Harlem North", 40.8142585, -73.9426617),
Location(43, "Manhattan", "Central Park", 40.7812199, -73.9665138),
Location(44, "Staten Island", "Charleston/Tottenville", 40.5083408, -74.23554039999999),
Location(45, "Manhattan", "Chinatown", 40.7157509, -73.9970307),
Location(46, "Bronx", "City Island", 40.8468202, -73.7874983),
Location(47, "Bronx", "Claremont/Bathgate", 40.84128339999999, -73.9001573),
Location(48, "Manhattan", "Clinton East", 40.7637581, -73.9918181),
Location(49, "Brooklyn", "Clinton Hill", 40.6896834, -73.9661144),
Location(50, "Manhattan", "Clinton West", 40.7628785, -73.9940134),
Location(51, "Bronx", "Co-Op City", 40.8738889, -73.82944440000001),
Location(52, "Brooklyn", "Cobble Hill", 40.686536, -73.9962255),
Location(53, "Queens", "College Point", 40.786395, -73.8389657),
Location(54, "Brooklyn", "Columbia Street", 40.6775239, -74.00634409999999),
Location(55, "Brooklyn", "Coney Island", 40.5755438, -73.9707016),
Location(56, "Queens", "Corona", 40.7449859, -73.8642613),
Location(57, "Queens", "Corona", 40.7449859, -73.8642613),
Location(58, "Bronx", "Country Club", 40.8391667, -73.8197222),
Location(59, "Bronx", "Crotona Park", 40.8400367, -73.8953489),
Location(60, "Bronx", "Crotona Park East", 40.8365344, -73.8933509),
Location(61, "Brooklyn", "Crown Heights North", 40.6694022, -73.9422324),
Location(62, "Brooklyn", "Crown Heights South", 40.6694022, -73.9422324),
Location(63, "Brooklyn", "Cypress Hills", 40.6836873, -73.87963309999999),
Location(64, "Queens", "Douglaston", 40.76401509999999, -73.7433727),
Location(65, "Brooklyn", "Downtown Brooklyn/MetroTech", 40.6930987, -73.98566339999999),
Location(66, "Brooklyn", "DUMBO/Vinegar Hill", 40.70371859999999, -73.98226830000002),
Location(67, "Brooklyn", "Dyker Heights", 40.6214932, -74.00958399999999),
Location(68, "Manhattan", "East Chelsea", 40.7465004, -74.00137370000002),
Location(69, "Bronx", "East Concourse/Concourse Village", 40.8255863, -73.9184388),
Location(70, "Queens", "East Elmhurst", 40.7737505, -73.8713099),
Location(71, "Brooklyn", "East Flatbush/Farragut", 40.63751329999999, -73.9280797),
Location(72, "Brooklyn", "East Flatbush/Remsen Village", 40.6511399, -73.9181602),
Location(73, "Queens", "East Flushing", 40.7540534, -73.8086418),
Location(74, "Manhattan", "East Harlem North", 40.7957399, -73.93892129999999),
Location(75, "Manhattan", "East Harlem South", 40.7957399, -73.93892129999999),
Location(76, "Brooklyn", "East New York", 40.6590529, -73.8759245),
Location(77, "Brooklyn", "East New York/Pennsylvania Avenue", 40.65845729999999, -73.8904498),
Location(78, "Bronx", "East Tremont", 40.8453781, -73.8909693),
Location(79, "Manhattan", "East Village", 40.7264773, -73.98153370000001),
Location(80, "Brooklyn", "East Williamsburg", 40.7141953, -73.9316461),
Location(81, "Bronx", "Eastchester", 40.8859837, -73.82794710000002),
Location(82, "Queens", "Elmhurst", 40.737975, -73.8801301),
Location(83, "Queens", "Elmhurst/Maspeth", 40.7294018, -73.9065883),
Location(84, "Staten Island", "Eltingville/Annadale/Prince's Bay", 40.52899439999999, -74.197644),
Location(85, "Brooklyn", "Erasmus", 40.649649, -73.95287379999999),
Location(86, "Queens", "Far Rockaway", 40.5998931, -73.74484369999999),
Location(87, "Manhattan", "Financial District North", 40.7077143, -74.00827869999999),
Location(88, "Manhattan", "Financial District South", 40.705123, -74.0049259),
Location(89, "Brooklyn", "Flatbush/Ditmas Park", 40.6414876, -73.9593998),
Location(90, "Manhattan", "Flatiron", 40.740083, -73.9903489),
Location(91, "Brooklyn", "Flatlands", 40.6232714, -73.9321664),
Location(92, "Queens", "Flushing", 40.7674987, -73.833079),
Location(93, "Queens", "Flushing Meadows-Corona Park", 40.7400275, -73.8406953),
Location(94, "Bronx", "Fordham South", 40.8592667, -73.8984694),
Location(95, "Queens", "Forest Hills", 40.718106, -73.8448469),
Location(96, "Queens", "Forest Park/Highland Park", 40.6960418, -73.8663024),
Location(97, "Brooklyn", "Fort Greene", 40.6920638, -73.97418739999999),
Location(98, "Queens", "Fresh Meadows", 40.7335179, -73.7801447),
Location(99, "Staten Island", "Freshkills Park", 40.5772365, -74.1858183),
Location(100, "Manhattan", "Garment District", 40.7547072, -73.9916342),
Location(101, "Queens", "Glen Oaks", 40.7471504, -73.7118223),
Location(102, "Queens", "Glendale", 40.7016662, -73.8842219),
Location(103, "Manhattan", "Governor's Island/Ellis Island/Liberty Island", 40.6892494, -74.04450039999999),
Location(104, "Manhattan", "Governor's Island/Ellis Island/Liberty Island", 40.6892494, -74.04450039999999),
Location(105, "Manhattan", "Governor's Island/Ellis Island/Liberty Island", 40.6892494, -74.04450039999999),
Location(106, "Brooklyn", "Gowanus", 40.6751161, -73.9879753),
Location(107, "Manhattan", "Gramercy", 40.7367783, -73.9844722),
Location(108, "Brooklyn", "Gravesend", 40.5918636, -73.9768653),
Location(109, "Staten Island", "Great Kills", 40.5543273, -74.156292),
Location(110, "Staten Island", "Great Kills Park", 40.5492367, -74.1238486),
Location(111, "Brooklyn", "Green-Wood Cemetery", 40.6579777, -73.9940634),
Location(112, "Brooklyn", "Greenpoint", 40.7304701, -73.95150319999999),
Location(113, "Manhattan", "Greenwich Village North", 40.7335719, -74.0027418),
Location(114, "Manhattan", "Greenwich Village South", 40.7335719, -74.0027418),
Location(115, "Staten Island", "Grymes Hill/Clifton", 40.6189726, -74.0784785),
Location(116, "Manhattan", "Hamilton Heights", 40.8252793, -73.94761390000001),
Location(117, "Queens", "Hammels/Arverne", 40.5880813, -73.81199289999999),
Location(118, "Staten Island", "Heartland Village/Todt Hill", 40.5975007, -74.10189749999999),
Location(119, "Bronx", "Highbridge", 40.836916, -73.9271294),
Location(120, "Manhattan", "Highbridge Park", 40.8537599, -73.9257492),
Location(121, "Queens", "Hillcrest/Pomonok", 40.732341, -73.81077239999999),
Location(122, "Queens", "Hollis", 40.7112203, -73.762495),
Location(123, "Brooklyn", "Homecrest", 40.6004787, -73.9565551),
Location(124, "Queens", "Howard Beach", 40.6571222, -73.8429989),
Location(125, "Manhattan", "Hudson Sq", 40.7265834, -74.0074731),
Location(126, "Bronx", "Hunts Point", 40.8094385, -73.8803315),
Location(127, "Manhattan", "Inwood", 40.8677145, -73.9212019),
Location(128, "Manhattan", "Inwood Hill Park", 40.8722007, -73.9255549),
Location(129, "Queens", "Jackson Heights", 40.7556818, -73.8830701),
Location(130, "Queens", "Jamaica", 40.702677, -73.7889689),
Location(131, "Queens", "Jamaica Estates", 40.7179512, -73.783822),
Location(132, "Queens", "JFK Airport", 40.6413111, -73.77813909999999),
Location(133, "Brooklyn", "Kensington", 40.63852019999999, -73.97318729999999),
Location(134, "Queens", "Kew Gardens", 40.705695, -73.8272029),
Location(135, "Queens", "Kew Gardens Hills", 40.724707, -73.8207618),
Location(136, "Bronx", "Kingsbridge Heights", 40.8711235, -73.8976328),
Location(137, "Manhattan", "Kips Bay", 40.74232920000001, -73.9800645),
Location(138, "Queens", "LaGuardia Airport", 40.7769271, -73.8739659),
Location(139, "Queens", "Laurelton", 40.67764, -73.7447853),
Location(140, "Manhattan", "Lenox Hill East", 40.7662315, -73.9602312),
Location(141, "Manhattan", "Lenox Hill West", 40.7662315, -73.9602312),
Location(142, "Manhattan", "Lincoln Square East", 40.7741769, -73.98491179999999),
Location(143, "Manhattan", "Lincoln Square West", 40.7741769, -73.98491179999999),
Location(144, "Manhattan", "Little Italy/NoLiTa", 40.7230413, -73.99486069999999),
Location(145, "Queens", "Long Island City/Hunters Point", 40.7485587, -73.94964639999999),
Location(146, "Queens", "Long Island City/Queens Plaza", 40.7509846, -73.9402762),
Location(147, "Bronx", "Longwood", 40.8248438, -73.8915875),
Location(148, "Manhattan", "Lower East Side", 40.715033, -73.9842724),
Location(149, "Brooklyn", "Madison", 40.60688529999999, -73.947958),
Location(150, "Brooklyn", "Manhattan Beach", 40.57815799999999, -73.93892129999999),
Location(151, "Manhattan", "Manhattan Valley", 40.7966989, -73.9684247),
Location(152, "Manhattan", "Manhattanville", 40.8169443, -73.9558333),
Location(153, "Manhattan", "Marble Hill", 40.8761173, -73.9102628),
Location(154, "Brooklyn", "Marine Park/Floyd Bennett Field", 40.58816030000001, -73.8969745),
Location(155, "Brooklyn", "Marine Park/Mill Basin", 40.6055157, -73.9348698),
Location(156, "Staten Island", "Mariners Harbor", 40.63677010000001, -74.1587547),
Location(157, "Queens", "Maspeth", 40.7294018, -73.9065883),
Location(158, "Manhattan", "Meatpacking/West Village West", 40.7342331, -74.0100622),
Location(159, "Bronx", "Melrose South", 40.824545, -73.9104143),
Location(160, "Queens", "Middle Village", 40.717372, -73.87425),
Location(161, "Manhattan", "Midtown Center", 40.7314658, -73.9970956),
Location(162, "Manhattan", "Midtown East", 40.7571432, -73.9718815),
Location(163, "Manhattan", "Midtown North", 40.7649516, -73.9851039),
Location(164, "Manhattan", "Midtown South", 40.7521795, -73.9875438),
Location(165, "Brooklyn", "Midwood", 40.6204388, -73.95997779999999),
Location(166, "Manhattan", "Morningside Heights", 40.8105443, -73.9620581),
Location(167, "Bronx", "Morrisania/Melrose", 40.824545, -73.9104143),
Location(168, "Bronx", "Mott Haven/Port Morris", 40.8022025, -73.9166051),
Location(169, "Bronx", "Mount Hope", 40.8488863, -73.9051185),
Location(170, "Manhattan", "Murray Hill", 40.7478792, -73.9756567),
Location(171, "Queens", "Murray Hill-Queens", 40.7634996, -73.8073261),
Location(172, "Staten Island", "New Dorp/Midland Beach", 40.5739937, -74.1159755),
Location(173, "Queens", "North Corona", 40.7543725, -73.8669188),
Location(174, "Bronx", "Norwood", 40.8810341, -73.878486),
Location(175, "Queens", "Oakland Gardens", 40.7408584, -73.758241),
Location(176, "Staten Island", "Oakwood", 40.563994, -74.1159754),
Location(177, "Brooklyn", "Ocean Hill", 40.6782737, -73.9108212),
Location(178, "Brooklyn", "Ocean Parkway South", 40.61287799999999, -73.96838620000001),
Location(179, "Queens", "Old Astoria", 40.7643574, -73.92346189999999),
Location(180, "Queens", "Ozone Park", 40.6794072, -73.8507279),
Location(181, "Brooklyn", "Park Slope", 40.6710672, -73.98142279999999),
Location(182, "Bronx", "Parkchester", 40.8382522, -73.8566087),
Location(183, "Bronx", "Pelham Bay", 40.8505556, -73.83333329999999),
Location(184, "Bronx", "Pelham Bay Park", 40.8670144, -73.81006339999999),
Location(185, "Bronx", "Pelham Parkway", 40.8553279, -73.8639594),
Location(186, "Manhattan", "Penn Station/Madison Sq West", 40.7505045, -73.9934387),
Location(187, "Staten Island", "Port Richmond", 40.63549140000001, -74.1254641),
Location(188, "Brooklyn", "Prospect-Lefferts Gardens", 40.6592355, -73.9533895),
Location(189, "Brooklyn", "Prospect Heights", 40.6774196, -73.9668408),
Location(190, "Brooklyn", "Prospect Park", 40.6602037, -73.9689558),
Location(191, "Queens", "Queens Village", 40.7156628, -73.7419017),
Location(192, "Queens", "Queensboro Hill", 40.7429383, -73.8251741),
Location(193, "Queens", "Queensbridge/Ravenswood", 40.7556711, -73.9456723),
Location(194, "Manhattan", "Randalls Island", 40.7932271, -73.92128579999999),
Location(195, "Brooklyn", "Red Hook", 40.6733676, -74.00831889999999),
Location(196, "Queens", "Rego Park", 40.72557219999999, -73.8624893),
Location(197, "Queens", "Richmond Hill", 40.6958108, -73.8272029),
Location(198, "Queens", "Ridgewood", 40.7043986, -73.9018292),
Location(199, "Bronx", "Rikers Island", 40.79312770000001, -73.88601),
Location(200, "Bronx", "Riverdale/North Riverdale/Fieldston", 40.89961830000001, -73.9088276),
Location(201, "Queens", "Rockaway Park", 40.57978629999999, -73.8372237),
Location(202, "Manhattan", "Roosevelt Island", 40.76050310000001, -73.9509934),
Location(203, "Queens", "Rosedale", 40.6584068, -73.7389596),
Location(204, "Staten Island", "Rossville/Woodrow", 40.5434385, -74.19764409999999),
Location(205, "Queens", "Saint Albans", 40.6895283, -73.76436880000001),
Location(206, "Staten Island", "Saint George/New Brighton", 40.6404369, -74.090226),
Location(207, "Queens", "Saint Michaels Cemetery/Woodside", 40.7646761, -73.89850419999999),
Location(208, "Bronx", "Schuylerville/Edgewater Park", 40.8235967, -73.81029269999999),
Location(209, "Manhattan", "Seaport", 40.70722629999999, -74.0027431),
Location(210, "Brooklyn", "Sheepshead Bay", 40.5953955, -73.94575379999999),
Location(211, "Manhattan", "SoHo", 40.723301, -74.0029883),
Location(212, "Bronx", "Soundview/Bruckner", 40.8247566, -73.8710929),
Location(213, "Bronx", "Soundview/Castle Hill", 40.8176831, -73.8507279),
Location(214, "Staten Island", "South Beach/Dongan Hills", 40.5903824, -74.06680759999999),
Location(215, "Queens", "South Jamaica", 40.6808594, -73.7919103),
Location(216, "Queens", "South Ozone Park", 40.6764003, -73.8124984),
Location(217, "Brooklyn", "South Williamsburg", 40.7043921, -73.9565551),
Location(218, "Queens", "Springfield Gardens North", 40.6715916, -73.779798),
Location(219, "Queens", "Springfield Gardens South", 40.6715916, -73.779798),
Location(220, "Bronx", "Spuyten Duyvil/Kingsbridge", 40.8833912, -73.9051185),
Location(221, "Staten Island", "Stapleton", 40.6264929, -74.07764139999999),
Location(222, "Brooklyn", "Starrett City", 40.6484272, -73.88236119999999),
Location(223, "Queens", "Steinway", 40.7745459, -73.9037477),
Location(224, "Manhattan", "Stuy Town/Peter Cooper Village", 40.7316903, -73.9778494),
Location(225, "Brooklyn", "Stuyvesant Heights", 40.6824166, -73.9319933),
Location(226, "Queens", "Sunnyside", 40.7432759, -73.9196324),
Location(227, "Brooklyn", "Sunset Park East", 40.65272, -74.00933479999999),
Location(228, "Brooklyn", "Sunset Park West", 40.65272, -74.00933479999999),
Location(229, "Manhattan", "Sutton Place/Turtle Bay North", 40.7576281, -73.961698),
Location(230, "Manhattan", "Times Sq/Theatre District", 40.759011, -73.9844722),
Location(231, "Manhattan", "TriBeCa/Civic Center", 40.71625299999999, -74.0122396),
Location(232, "Manhattan", "Two Bridges/Seward Park", 40.7149056, -73.98924699999999),
Location(233, "Manhattan", "UN/Turtle Bay South", 40.7571432, -73.9718815),
Location(234, "Manhattan", "Union Sq", 40.7358633, -73.9910835),
Location(235, "Bronx", "University Heights/Morris Heights", 40.8540855, -73.9198498),
Location(236, "Manhattan", "Upper East Side North", 40.7600931, -73.9598414),
Location(237, "Manhattan", "Upper East Side South", 40.7735649, -73.9565551),
Location(238, "Manhattan", "Upper West Side North", 40.7870106, -73.9753676),
Location(239, "Manhattan", "Upper West Side South", 40.7870106, -73.9753676),
Location(240, "Bronx", "Van Cortlandt Park", 40.8972233, -73.8860668),
Location(241, "Bronx", "Van Cortlandt Village", 40.8837203, -73.89313899999999),
Location(242, "Bronx", "Van Nest/Morris Park", 40.8459682, -73.8625946),
Location(243, "Manhattan", "Washington Heights North", 40.852476, -73.9342996),
Location(244, "Manhattan", "Washington Heights South", 40.8417082, -73.9393554),
Location(245, "Staten Island", "West Brighton", 40.6270298, -74.10931409999999),
Location(246, "Manhattan", "West Chelsea/Hudson Yards", 40.7542535, -74.0023331),
Location(247, "Bronx", "West Concourse", 40.8316761, -73.9227554),
Location(248, "Bronx", "West Farms/Bronx River", 40.8430609, -73.8816001),
Location(249, "Manhattan", "West Village", 40.73468, -74.0047554),
Location(250, "Bronx", "Westchester Village/Unionport", 40.8340447, -73.8531349),
Location(251, "Staten Island", "Westerleigh", 40.616296, -74.1386767),
Location(252, "Queens", "Whitestone", 40.7920449, -73.8095574),
Location(253, "Queens", "Willets Point", 40.7606911, -73.840436),
Location(254, "Bronx", "Williamsbridge/Olinville", 40.8787602, -73.85283559999999),
Location(255, "Brooklyn", "Williamsburg (North Side)", 40.71492, -73.9528472),
Location(256, "Brooklyn", "Williamsburg (South Side)", 40.70824229999999, -73.9571487),
Location(257, "Brooklyn", "Windsor Terrace", 40.6539346, -73.9756567),
Location(258, "Queens", "Woodhaven", 40.6901366, -73.8566087),
Location(259, "Bronx", "Woodlawn/Wakefield", 40.8955885, -73.8627133),
Location(260, "Queens", "Woodside", 40.7532952, -73.9068973),
Location(261, "Manhattan", "World Trade Center", 40.7118011, -74.0131196),
Location(262, "Manhattan", "Yorkville East", 40.7762231, -73.94920789999999),
Location(263, "Manhattan", "Yorkville West", 40.7762231, -73.94920789999999)
]
| 72.562937 | 112 | 0.679131 |
129d53076c9002e63bb6e233e94f66b83a1c9e37 | 114 | py | Python | main.py | viniciuslimafernandes/interpolation | 1aff08cba6026143fd267a0c648bad8975ae5d74 | [
"MIT"
] | null | null | null | main.py | viniciuslimafernandes/interpolation | 1aff08cba6026143fd267a0c648bad8975ae5d74 | [
"MIT"
] | null | null | null | main.py | viniciuslimafernandes/interpolation | 1aff08cba6026143fd267a0c648bad8975ae5d74 | [
"MIT"
] | null | null | null | import math
from utils import *
main() | 12.666667 | 25 | 0.701754 |
129e3285af4caf68d1f91b717a406d9814f4383d | 222 | py | Python | tests/helper.py | blehers/PyViCare | e74b854afe6678f30c05bdef5e642ab66d1c0b6a | [
"Apache-2.0"
] | null | null | null | tests/helper.py | blehers/PyViCare | e74b854afe6678f30c05bdef5e642ab66d1c0b6a | [
"Apache-2.0"
] | null | null | null | tests/helper.py | blehers/PyViCare | e74b854afe6678f30c05bdef5e642ab66d1c0b6a | [
"Apache-2.0"
] | null | null | null | import os
import simplejson as json
| 24.666667 | 69 | 0.72973 |
129f44f6dc7578a9b45f3abd7e3b50f1fe3a4274 | 1,999 | py | Python | examples/client-example.py | pkalemba/python-warp10client | 25a9b446a217066a7d6c39aeb7d19d1be93a7688 | [
"BSD-3-Clause"
] | 8 | 2017-11-20T13:31:58.000Z | 2021-07-13T08:34:52.000Z | examples/client-example.py | pkalemba/python-warp10client | 25a9b446a217066a7d6c39aeb7d19d1be93a7688 | [
"BSD-3-Clause"
] | 2 | 2017-11-20T21:16:16.000Z | 2017-12-11T13:56:44.000Z | examples/client-example.py | regel/python-warp10client | bee380513d899ae7c55a26e43a8914f8c29b5279 | [
"BSD-3-Clause"
] | 4 | 2017-11-21T07:51:01.000Z | 2020-04-07T12:03:23.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import daiquiri
from time import time
import warp10client
LOG = daiquiri.getLogger(__name__)
warp10_api_url = '' # Add here backend url where metrics are stored
read_token = '' # Add here your metrics read token
write_token = '' # Add here your metrics write token
# To get metrics:
metric_get = {
'name': 'cpu_util',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
},
'aggregate': {
'type': 'mean',
'span': 1000000 * 3600,
},
'timestamp': {
'start': "2017-01-01T00:00:00.000Z",
'end': "2018-01-01T00:00:00.000Z"
}
# 'timestamp': { 'end': "2018-01-01T00:00:00.000Z" }
# 'timestamp': { 'start': None, 'end': None }
}
# To write metrics:
metric_write = {
'name': 'cpu_util_mjozefcz',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
'unit': '%',
},
'position': {
'longitude': None,
'latitude': None,
'elevation': None,
'timestamp': time() * 1000 * 1000,
},
'value': 11,
}
# To check metrics
metric_check = {
'name': 'cpu_util',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
},
}
# arguments need to authorize in metrics backend
kwargs = {
'write_token': write_token,
'read_token': read_token,
'warp10_api_url': warp10_api_url,
}
client = warp10client.Warp10Client(**kwargs)
# Consider to create timeseries, new object with included metrics as each point
# Thats goooood idea.
metric_get_test = client.get(metric_get)
metric_exists = client.exists(metric_check)
metric_obj = warp10client.Metric(**metric_write)
metric_send = client.set(metric_write)
# delete method is not yet implemented
# metric_send = client.delete(metric_write)
| 24.9875 | 79 | 0.64032 |
12a0170295fb80e383d69995765e135510da8362 | 3,094 | py | Python | ports/stm32/boards/NUCLEO_WB55/rfcore_makefirmware.py | H-Grobben/micropython | fce96b11f3ff444c1ac24501db465dbe9e5902bf | [
"MIT"
] | null | null | null | ports/stm32/boards/NUCLEO_WB55/rfcore_makefirmware.py | H-Grobben/micropython | fce96b11f3ff444c1ac24501db465dbe9e5902bf | [
"MIT"
] | null | null | null | ports/stm32/boards/NUCLEO_WB55/rfcore_makefirmware.py | H-Grobben/micropython | fce96b11f3ff444c1ac24501db465dbe9e5902bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This script obfuscates the ST wireless binaries so they can be safely copied
# to the flash filesystem and not be accidentally discovered by the FUS during
# an update. See more information (and the corresponding de-obfuscation) in
# rfcore_firmware.py as well as instructions on how to use.
import os
import struct
import sys
# Must match rfcore_firmware.py.
_OBFUSCATION_KEY = 0x0573B55AA
_FIRMWARE_FILES = {
"stm32wb5x_FUS_fw_1_0_2.bin": "fus_102.bin",
"stm32wb5x_FUS_fw.bin": "fus_112.bin",
"stm32wb5x_BLE_HCILayer_fw.bin": "ws_ble_hci.bin",
}
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: {} src_path dest_path".format(sys.argv[0]))
print()
print(
'"src_path" should be the location of the ST binaries from https://github.com/STMicroelectronics/STM32CubeWB/tree/master/Projects/STM32WB_Copro_Wireless_Binaries/STM32WB5x'
)
print(
'"dest_path" will be where fus_102.bin, fus_110.bin, and ws_ble_hci.bin will be written to.'
)
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| 38.675 | 184 | 0.671946 |
12a080db56a168dea64d817c232a427dfdd87858 | 1,081 | py | Python | universal/spiders/universalSpider.py | universalscraper/universal-spider | 0b6d82ee0c749cf32dcf501e6d84f518ee2e8437 | [
"MIT"
] | 2 | 2017-01-14T20:09:24.000Z | 2019-09-23T09:26:23.000Z | universal/spiders/universalSpider.py | scraperize/universal-spider | 0b6d82ee0c749cf32dcf501e6d84f518ee2e8437 | [
"MIT"
] | null | null | null | universal/spiders/universalSpider.py | scraperize/universal-spider | 0b6d82ee0c749cf32dcf501e6d84f518ee2e8437 | [
"MIT"
] | null | null | null | import scrapy
import yaml
| 29.216216 | 130 | 0.60592 |
12a0f3a1d45fe59fa067cf5c06c3bffbb58f6bd1 | 11,715 | py | Python | environments/IPP_BO_Ypacarai.py | FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs | 23b9b181499a4b06f2ca2951c002359c1959e727 | [
"MIT"
] | null | null | null | environments/IPP_BO_Ypacarai.py | FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs | 23b9b181499a4b06f2ca2951c002359c1959e727 | [
"MIT"
] | null | null | null | environments/IPP_BO_Ypacarai.py | FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs | 23b9b181499a4b06f2ca2951c002359c1959e727 | [
"MIT"
] | null | null | null | import warnings
import gym
import matplotlib.pyplot as plt
import numpy as np
from skopt.acquisition import gaussian_ei
from environments.groundtruthgenerator import GroundTruth
warnings.simplefilter("ignore", UserWarning)
from skopt.learning.gaussian_process import gpr, kernels
if __name__ == "__main__":
""" Test to check the wall-time for an episode to run and the average number of steps per episode """
my_map = np.genfromtxt('YpacaraiMap_big.csv', delimiter=',').astype(int) / 255
env = ContinuousBO(scenario_map=my_map, resolution=1)
# env.render()
import time
t0 = time.time()
for i in range(100):
env.reset()
d = False
print('Episode ', i)
avg_r_ep = 0
while not d:
a = get_action_using_bo(env)
s, r_, d, _ = env.step(a)
avg_r_ep += r_
if r_ == -10:
print("collision")
# env.render()
print('Number of steps: ', env.step_count)
print((time.time() - t0) / 100, ' segundos la iteracion')
| 38.284314 | 120 | 0.626376 |
12a151b9a4e765ed24ceecf3aa9bec0771ac3589 | 5,281 | py | Python | utils/metrics.py | 0b3d/Image-Map-Embeddings | a9fc65ac92094bcfcd0f19a3604f0b9d8bd3174f | [
"MIT"
] | 2 | 2022-02-11T06:05:35.000Z | 2022-03-14T02:10:31.000Z | utils/metrics.py | 0b3d/Image-Map-Embeddings | a9fc65ac92094bcfcd0f19a3604f0b9d8bd3174f | [
"MIT"
] | null | null | null | utils/metrics.py | 0b3d/Image-Map-Embeddings | a9fc65ac92094bcfcd0f19a3604f0b9d8bd3174f | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
| 51.271845 | 166 | 0.667487 |
12a1ccdc2c994161fe55e1738031ece8631b2305 | 693 | py | Python | tests/bugs/test-200908181430.py | eLBati/pyxb | 14737c23a125fd12c954823ad64fc4497816fae3 | [
"Apache-2.0"
] | 123 | 2015-01-12T06:43:22.000Z | 2022-03-20T18:06:46.000Z | tests/bugs/test-200908181430.py | eLBati/pyxb | 14737c23a125fd12c954823ad64fc4497816fae3 | [
"Apache-2.0"
] | 103 | 2015-01-08T18:35:57.000Z | 2022-01-18T01:44:14.000Z | tests/bugs/test-200908181430.py | eLBati/pyxb | 14737c23a125fd12c954823ad64fc4497816fae3 | [
"Apache-2.0"
] | 54 | 2015-02-15T17:12:00.000Z | 2022-03-07T23:02:32.000Z | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="foo"/>
</xs:schema>'''
from pyxb.exceptions_ import *
import unittest
if __name__ == '__main__':
unittest.main()
| 25.666667 | 108 | 0.735931 |
12a26d1b84cfd62fa98cec13a5aa4a115ddadb78 | 779 | py | Python | bin/print_data_structure.py | JohanComparat/pyEmerge | 9b5bfa01959d48ea41221609b8f375f27e3e39ff | [
"Unlicense"
] | null | null | null | bin/print_data_structure.py | JohanComparat/pyEmerge | 9b5bfa01959d48ea41221609b8f375f27e3e39ff | [
"Unlicense"
] | null | null | null | bin/print_data_structure.py | JohanComparat/pyEmerge | 9b5bfa01959d48ea41221609b8f375f27e3e39ff | [
"Unlicense"
] | null | null | null | import sys
ii = int(sys.argv[1])
env = sys.argv[2]
# python3 print_data_structure.py 22 MD10
import glob
import os
import numpy as n
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(ii, env)
iterate.open_snapshots()
print_data_structure(iterate.f0)
| 23.606061 | 55 | 0.56611 |
12a383eaf645019cefa1dc9f3842290ed2752e23 | 1,999 | py | Python | setup.py | ljdursi/mergevcf | b400385936417c6e517d3c7daec8b9ca6389c51f | [
"MIT"
] | 25 | 2015-06-22T15:30:32.000Z | 2021-05-13T14:59:18.000Z | setup.py | ljdursi/mergevcf | b400385936417c6e517d3c7daec8b9ca6389c51f | [
"MIT"
] | 7 | 2015-08-14T11:20:35.000Z | 2021-05-18T17:48:38.000Z | setup.py | ljdursi/mergevcf | b400385936417c6e517d3c7daec8b9ca6389c51f | [
"MIT"
] | 6 | 2017-04-17T18:35:43.000Z | 2018-05-15T21:47:13.000Z | # based on https://github.com/pypa/sampleproject
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='mergevcf',
version='1.0.1',
description='Merge VCF calls',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ljdursi/mergevcf',
# Author details
author='Jonathan Dursi',
author_email='Jonathan.Dursi@oicr.on.ca',
# Choose your license
license='GPL',
classifiers=[
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.8',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
keywords='merge vcfs',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['pyvcf'],
test_suite='tests',
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
entry_points={
'console_scripts': [
'mergevcf=mergevcf:main',
],
},
)
| 26.653333 | 85 | 0.617309 |
12a4188c00b7c8a1abdb2f2f512a6ed7085ea497 | 1,291 | py | Python | tests/test_coders.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
] | 1 | 2018-05-26T20:10:51.000Z | 2018-05-26T20:10:51.000Z | tests/test_coders.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
] | 37 | 2017-10-22T12:00:59.000Z | 2022-02-08T19:17:58.000Z | tests/test_coders.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
] | null | null | null | import pytest
import six
import ujson
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline as _TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.coders import typecoders
from apache_beam.typehints import Dict, Union
from pipe_tools.coders import JSONDictCoder
from pipe_tools.coders import JSONDict
from pipe_tools.generator import MessageGenerator
| 26.346939 | 118 | 0.676995 |
12a668f147490b052289202d9372f523023dc419 | 3,820 | py | Python | yeti/core/model/stix/sro.py | yeti-platform/TibetanBrownBear | 8ab520bd199a63e404b3a6a5b49a29f277384e8e | [
"Apache-2.0"
] | 9 | 2018-01-15T22:44:24.000Z | 2021-05-28T11:13:03.000Z | yeti/core/model/stix/sro.py | yeti-platform/TibetanBrownBear | 8ab520bd199a63e404b3a6a5b49a29f277384e8e | [
"Apache-2.0"
] | 140 | 2018-01-12T10:07:47.000Z | 2021-08-02T23:03:49.000Z | yeti/core/model/stix/sro.py | yeti-platform/TibetanBrownBear | 8ab520bd199a63e404b3a6a5b49a29f277384e8e | [
"Apache-2.0"
] | 11 | 2018-01-16T19:49:35.000Z | 2022-01-18T16:30:34.000Z | """Detail Yeti's Entity object structure."""
import json
from yeti.core.errors import ValidationError
from .base import StixObject
| 28.296296 | 80 | 0.606545 |
12a754908091d00ea075e8ffe5d6a23ed6d1b3e0 | 4,761 | py | Python | netforce_mfg/netforce_mfg/models/barcode_qc.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 27 | 2015-09-30T23:53:30.000Z | 2021-06-07T04:56:25.000Z | netforce_mfg/netforce_mfg/models/barcode_qc.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 191 | 2015-10-08T11:46:30.000Z | 2019-11-14T02:24:36.000Z | netforce_mfg/netforce_mfg/models/barcode_qc.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 32 | 2015-10-01T03:59:43.000Z | 2022-01-13T07:31:05.000Z | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
BarcodeQC.register()
| 40.008403 | 122 | 0.603025 |
12a82679ea427e2384e89df55cbadd443f41af9e | 4,739 | py | Python | src/data/domain.py | AlexMoreo/pydci | 44f8fe1ce95da45709061cbe19fa6f462c1f2164 | [
"BSD-3-Clause"
] | 7 | 2018-10-21T17:34:08.000Z | 2021-05-17T11:37:56.000Z | src/data/domain.py | AlexMoreo/pydci | 44f8fe1ce95da45709061cbe19fa6f462c1f2164 | [
"BSD-3-Clause"
] | null | null | null | src/data/domain.py | AlexMoreo/pydci | 44f8fe1ce95da45709061cbe19fa6f462c1f2164 | [
"BSD-3-Clause"
] | 4 | 2018-11-22T10:30:07.000Z | 2021-03-20T10:07:57.000Z | import pickle
from scipy.sparse import lil_matrix
import numpy as np
def _preproc(analyzer, str):
return analyzer(str)[0] if analyzer(str) else 'null__'
def pack_domains(source, target, pivots_source, pivots_target):
dX = {source.name(): source.X, target.name(): target.X}
dU = {source.name(): source.U, target.name(): target.U}
dP = {source.name(): pivots_source, target.name(): pivots_target}
dV = {source.name(): source.V, target.name(): target.V}
return dX, dU, dP, dV
def unify_feat_space(source, target):
"""
Given a source and a target domain, returns two new versions of them in which the feature spaces are common, by
trivially juxtapossing the two vocabularies
:param source: the source domain
:param target: the target domain
:return: a new version of the source and the target domains where the feature space is common
"""
word_set = source.V.term_set().union(target.V.term_set())
word2idx = {w:i for i,w in enumerate(word_set)}
Vshared = Vocabulary(word2idx)
return reindexDomain(source, Vshared), reindexDomain(target, Vshared) | 34.845588 | 130 | 0.650559 |
12a832b1e6427f5514100a7f00be3d2042f2ed0f | 207 | py | Python | LeetCode_1304.py | xulu199705/LeetCode | 9a654a10117a93f9ad9728d6b86eb3713185545e | [
"MIT"
] | null | null | null | LeetCode_1304.py | xulu199705/LeetCode | 9a654a10117a93f9ad9728d6b86eb3713185545e | [
"MIT"
] | null | null | null | LeetCode_1304.py | xulu199705/LeetCode | 9a654a10117a93f9ad9728d6b86eb3713185545e | [
"MIT"
] | null | null | null | from typing import List
| 18.818182 | 51 | 0.492754 |
12a8abd596e75426da116460419af8dc9c55b01d | 1,506 | py | Python | models/universal_sentence_encoder_multilingual_qa/v1/utils.py | rhangelxs/russian_embeddings | 64821cdff03ff97752b6c80621bedf9e2227a0ba | [
"MIT"
] | null | null | null | models/universal_sentence_encoder_multilingual_qa/v1/utils.py | rhangelxs/russian_embeddings | 64821cdff03ff97752b6c80621bedf9e2227a0ba | [
"MIT"
] | 5 | 2020-09-26T00:18:44.000Z | 2022-02-10T00:22:42.000Z | models/universal_sentence_encoder_multilingual_qa/v1/utils.py | rhangelxs/russian_embeddings | 64821cdff03ff97752b6c80621bedf9e2227a0ba | [
"MIT"
] | null | null | null | import numpy
import tensorflow as tf
import tensorflow_hub as hub
import tf_sentencepiece
| 36.731707 | 109 | 0.592961 |
12a970b715888d87283271740bd7a109a0ea7f3e | 921 | py | Python | jade/extensions/demo/create_merge_pred_gdp.py | jgu2/jade | e643830be89a7df74a82065400b2e82f6b181ec8 | [
"BSD-3-Clause"
] | 15 | 2021-05-15T21:58:26.000Z | 2022-03-17T08:26:48.000Z | jade/extensions/demo/create_merge_pred_gdp.py | jgu2/jade | e643830be89a7df74a82065400b2e82f6b181ec8 | [
"BSD-3-Clause"
] | 22 | 2021-02-04T20:02:33.000Z | 2021-09-14T13:29:30.000Z | jade/extensions/demo/create_merge_pred_gdp.py | jgu2/jade | e643830be89a7df74a82065400b2e82f6b181ec8 | [
"BSD-3-Clause"
] | 3 | 2021-01-11T15:11:31.000Z | 2021-06-07T17:36:51.000Z | #!/usr/bin/env python
"""Creates the JADE configuration for stage 2 of the demo pipeline."""
import os
import sys
from jade.models import PipelineConfig
from jade.utils.subprocess_manager import run_command
from jade.utils.utils import load_data
PRED_GDP_COMMANDS_FILE = "pred_gdp_commands.txt"
if __name__ == "__main__":
main()
| 27.909091 | 81 | 0.733985 |
12aa4d4698103b11546cfe0e6f724650c7f1a730 | 3,165 | py | Python | hamal/hamal/conf/identity.py | JackDan9/hamal | 965be9db066209300c52f0cf17d251290d8901b7 | [
"MIT"
] | 3 | 2020-06-12T13:03:46.000Z | 2020-08-06T11:25:46.000Z | hamal/hamal/conf/identity.py | JackDan9/hamal | 965be9db066209300c52f0cf17d251290d8901b7 | [
"MIT"
] | null | null | null | hamal/hamal/conf/identity.py | JackDan9/hamal | 965be9db066209300c52f0cf17d251290d8901b7 | [
"MIT"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import passlib.utils
from hamal.conf import utils
max_password_length = cfg.IntOpt(
'max_password_length',
default=4096,
max=passlib.utils.MAX_PASSWORD_SIZE,
help=utils.fmt("""
Maximum allowed length for user passwords. Decrease this value to improve
performance. Changing this value does not effect existing passwords.
"""))
password_hash_algorithm = cfg.StrOpt(
'password_hash_algorithm',
choices=['bcrypt', 'scrypt', 'pbkdf2_sha512'],
default='bcrypt',
help=utils.fmt("""
The password hashing algorithm to use for passwords stored within hamal.
"""))
password_hash_rounds = cfg.IntOpt(
'password_hash_rounds',
help=utils.fmt("""
This option represents a trade off between security and performance. Higher
values lead to slower performance, but higher security. Changing this option
will only affect newly created passwords as existing password hashes already
have a fixed number of rounds applied, so it is safe to tune this option in a
running cluster.
The default for bcrypt is 12, must be between 4 and 31, inclusive.
The default for scrypt is 16, must be within `range(1,32)`.
The default for pbkdf_sha512 is 60000, must be within `range(1,1<32)`
WARNING: If using scrypt, increasing this value increases BOTH time AND
memory requirements to hash a password.
"""))
salt_bytesize = cfg.IntOpt(
'salt_bytesize',
min=0,
max=96,
help=utils.fmt("""
Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt.
Default for scrypt is 16 bytes.
Default for pbkfd2_sha512 is 16 bytes.
Limited to a maximum of 96 bytes due to the size of the column used to store
password hashes.
"""))
scrypt_block_size = cfg.IntOpt(
'scrypt_block_size',
help=utils.fmt("""
Optional block size to pass to scrypt hash function (the `r` parameter).
Useful for tuning scrypt to optimal performance for your CPU architecture.
This option is only used when the `password_hash_algorithm` option is set
to `scrypt`. Defaults to 8.
"""))
scrypt_paralellism = cfg.IntOpt(
'scrypt_parallelism',
help=utils.fmt("""
Optional parallelism to pass to scrypt hash function (the `p` parameter).
This option is only used when the `password_hash_algorithm` option is set
to `scrypt`. Defaults to 1.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
max_password_length,
password_hash_algorithm,
password_hash_rounds,
scrypt_block_size,
scrypt_paralellism,
salt_bytesize
]
| 30.728155 | 77 | 0.749447 |
12aab253143e67156c54f44e65c0b36caa2ab283 | 2,631 | py | Python | fact/time.py | mackaiver/slowREST | 8ae07d8657164abe83f071216b6e9d00a57ae705 | [
"MIT"
] | 1 | 2015-03-03T08:07:52.000Z | 2015-03-03T08:07:52.000Z | fact/time.py | mackaiver/slowREST | 8ae07d8657164abe83f071216b6e9d00a57ae705 | [
"MIT"
] | null | null | null | fact/time.py | mackaiver/slowREST | 8ae07d8657164abe83f071216b6e9d00a57ae705 | [
"MIT"
] | null | null | null | from __future__ import print_function
__author__ = 'dneise, mnoethe'
""" This file contains some functions to deal with FACT modified modified julian date
The time used most of the time in FACT is the number of days since 01.01.1970
So this time is related to unix time, since it has the same offset
(unix time is the number of seconds since 01.01.1970 00:00:00)
but it is also related to "the" Modified Julian Date (MJD),
which is used by astronomers
in the sense, that it also counts days.
According to http://en.wikipedia.org/wiki/Julian_day,
there is quite a large number of
somehow modified julian dates, of which the MJD is only one.
So it might be okay, to introduce a new modification,
going by the name of FACT Julian Date (FJD).
"""
import time
import calendar
from datetime import datetime
import logging
import dateutil
import dateutil.parser
OFFSET = (datetime(1970, 1, 1) - datetime(1, 1, 1)).days
def fjd(datetime_inst):
""" convert datetime instance to FJD
"""
if datetime_inst.tzinfo is None:
logging.warning("datetime instance is not aware of its timezone."
" Result possibly wrong!")
return calendar.timegm(datetime_inst.utctimetuple()) / (24.*3600.)
def iso2dt(iso_time_string):
""" parse ISO time string to timezone aware datetime instance
example
2015-01-23T08:08+01:00
"""
datetime_inst = dateutil.parser.parse(iso_time_string)
# make aware at any cost!
if datetime_inst.tzinfo is None:
print("ISO time string did not contain timezone info. I assume UTC!")
datetime_inst = datetime_inst.replace(tzinfo=dateutil.tz.tzutc())
return datetime_inst
def run2dt(run_string):
""" parse typical FACT run file path string to datetime instance (UTC)
example
first you do this:
"/path/to/file/20141231.more_text" --> "20141231"
then call
run2dt("20141231")
"""
format_ = "%Y%m%d"
datetime_inst = datetime.strptime(run_string, format_)
datetime_inst = datetime_inst.replace(tzinfo=dateutil.tz.tzutc())
return datetime_inst
def facttime(time_string):
""" conver time-string with format %Y%m%d %H:%M to fact time
"""
return calendar.timegm(time.strptime(
time_string, "%Y%m%d %H:%M")) / (24.*3600.)
def to_datetime(fact_julian_date):
""" convert facttime to datetime instance
"""
unix_time = fact_julian_date*24*3600
datetime_inst = datetime.utcfromtimestamp(unix_time)
return datetime_inst
def datestr(datetime_inst):
""" make iso time string from datetime instance
"""
return datetime_inst.isoformat("T")
| 28.912088 | 85 | 0.708476 |
12aabf7a6ed3903e5b3fb7b076bf621fe0068180 | 1,318 | py | Python | nipype/interfaces/ants/tests/test_auto_ImageMath.py | TRO-HIT/nipype | c453eac5d7efdd4e19a9bcc8a7f3d800026cc125 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/ants/tests/test_auto_ImageMath.py | TRO-HIT/nipype | c453eac5d7efdd4e19a9bcc8a7f3d800026cc125 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/ants/tests/test_auto_ImageMath.py | TRO-HIT/nipype | c453eac5d7efdd4e19a9bcc8a7f3d800026cc125 | [
"Apache-2.0"
] | 1 | 2020-12-16T16:36:48.000Z | 2020-12-16T16:36:48.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..utils import ImageMath
| 34.684211 | 77 | 0.618361 |
12b0b747a8e429f2bfcdc96202c017eb8b47dbba | 72,049 | py | Python | tests/chainerx_tests/unit_tests/routines_tests/test_math.py | tkerola/chainer | 572f6eef2c3f1470911ac08332c2b5c3440edf44 | [
"MIT"
] | null | null | null | tests/chainerx_tests/unit_tests/routines_tests/test_math.py | tkerola/chainer | 572f6eef2c3f1470911ac08332c2b5c3440edf44 | [
"MIT"
] | null | null | null | tests/chainerx_tests/unit_tests/routines_tests/test_math.py | tkerola/chainer | 572f6eef2c3f1470911ac08332c2b5c3440edf44 | [
"MIT"
] | null | null | null | import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
_in_out_dtypes_arithmetic_invalid = [
(('bool_', 'bool_'), 'bool_'),
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
(('bool_', 'float16'), 'float16'),
(('bool_', 'float32'), 'float32'),
(('bool_', 'float64'), 'float64'),
(('int8', 'bool_'), 'int8'),
(('int16', 'bool_'), 'int16'),
(('int32', 'bool_'), 'int32'),
(('int64', 'bool_'), 'int64'),
(('uint8', 'bool_'), 'uint8'),
(('float16', 'bool_'), 'float16'),
(('float32', 'bool_'), 'float32'),
(('float64', 'bool_'), 'float64'),
]
_in_out_dtypes_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_arithmetic_invalid
]
_in_out_dtypes_inplace_arithmetic_invalid = [
((t1, t2), t3) for (t1, t2), t3 in _in_out_dtypes_arithmetic
if (numpy.dtype(t1).kind != 'f' and numpy.dtype(t2).kind == 'f')
] + _in_out_dtypes_arithmetic_invalid
_in_out_dtypes_inplace_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_inplace_arithmetic_invalid
]
_in_out_dtypes_array_int_scalar = [
# Int scalar.
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
(('float64',), numpy.int8, 'float64'),
(('float16',), numpy.int64, 'float16'),
]
_in_out_dtypes_int_array_float_scalar = [
# Int arrays and float scalars.
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
(('int8',), numpy.float32, 'float32'),
(('int64',), numpy.float16, 'float32'),
(('uint8',), numpy.float64, 'float32'),
]
_in_out_dtypes_float_array_float_scalar = [
# Float arrays and flaot scalars.
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
(('float64',), float, 'float64'),
(('float16',), numpy.float64, 'float16'),
(('float64',), numpy.float16, 'float64'),
]
_in_out_dtypes_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_float_arithmetic_scalar = (
_in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_float_arithmetic_scalar = (
_in_out_dtypes_float_array_float_scalar)
# TODO(imanishi): Support and test zero division and mixed dtypes.
# TODO(imanishi): Support and test chainerx.Scalar // chainerx.ndarray.
# TODO(imanishi): Support and test bool dtype.
_in_out_dtypes_inplace_truediv = [
(('float32', 'int16'), 'float32'),
(('float64', 'uint8'), 'float64'),
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float16', 'float64'), 'float64'),
]
_in_out_dtypes_truediv = _in_out_dtypes_inplace_truediv + [
(('int8', 'int8'), 'float32'),
(('int16', 'int16'), 'float32'),
(('int32', 'int32'), 'float32'),
(('int64', 'int64'), 'float32'),
(('uint8', 'uint8'), 'float32'),
(('int8', 'int32'), 'float32'),
(('uint8', 'int64'), 'float32'),
(('int8', 'uint8'), 'float32'),
(('int32', 'float16'), 'float16'),
(('uint8', 'float32'), 'float32'),
]
_in_out_dtypes_inplace_truediv_scalar = [
(('int8',), int, 'float32'),
(('int16',), int, 'float32'),
(('int32',), int, 'float32'),
(('int64',), int, 'float32'),
(('uint8',), int, 'float32'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
]
_in_out_dtypes_truediv_scalar = _in_out_dtypes_inplace_truediv_scalar + [
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
]
# TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds).
def _create_dummy_array_for_dot(xp, shape, dtype):
x = numpy.arange(numpy.prod(shape)).reshape(shape)
if dtype == 'bool_':
x = numpy.asarray(x % 2 == 0)
else:
x = x.astype(dtype)
return xp.array(x)
_logsumexp_params = [
((2,), 0),
((2,), -1),
((2, 3), None),
((2, 3), 0),
((2, 3), 1),
((2, 3), -2),
((2, 3), (0, 1)),
((2, 3), (-2, 1)),
((1, 2, 3), None),
((1, 2, 3), (1)),
((1, 2, 3), (1, 0)),
((1, 2, 3), (0, 1, 2)),
]
_invalid_logsumexp_params = [
# Axis out of bounds
((2,), 1),
((2,), -2),
((2,), (0, 1)),
((2, 3), (0, 1, 2)),
# Duplicate axes
((2,), (0, 0)),
((2, 3), (0, 0)),
]
| 32.914116 | 79 | 0.579106 |
12b0f94ae97150323ed0af8a6fe2aba3cc7d3f40 | 445 | py | Python | 7.py | flpcan/project_euler | 2cabb0a51c70b0b6e145328f3e3c55de41ac2854 | [
"CC0-1.0"
] | null | null | null | 7.py | flpcan/project_euler | 2cabb0a51c70b0b6e145328f3e3c55de41ac2854 | [
"CC0-1.0"
] | null | null | null | 7.py | flpcan/project_euler | 2cabb0a51c70b0b6e145328f3e3c55de41ac2854 | [
"CC0-1.0"
] | null | null | null |
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
primes = []
for i in range(2, 100):
if len(primes) == 10001:
break
x = list(map(lambda y: i % y == 0, range(2,i)))
if sum(x) == False:
primes.append(i)
print(i)
print(primes[-1] , "Len: ", len(primes))
# x = list(map(lambda y: i % y == 0, range(2,i)))
| 18.541667 | 102 | 0.546067 |
12b14a676fba1294e88631fcf085323cedbf845c | 5,707 | py | Python | src/plot_scripts/plot_sigcomm_bars_cellular.py | zxxia/RL-CC | d3d3be0097d69ee07b06363ad531cf2479029d74 | [
"Apache-2.0"
] | null | null | null | src/plot_scripts/plot_sigcomm_bars_cellular.py | zxxia/RL-CC | d3d3be0097d69ee07b06363ad531cf2479029d74 | [
"Apache-2.0"
] | null | null | null | src/plot_scripts/plot_sigcomm_bars_cellular.py | zxxia/RL-CC | d3d3be0097d69ee07b06363ad531cf2479029d74 | [
"Apache-2.0"
] | null | null | null | import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SAVE_ROOT = '../../figs_sigcomm22'
plt.style.use('seaborn-deep')
plt.rcParams['font.family'] = 'Arial'
# plt.rcParams['font.size'] = 42
# plt.rcParams['axes.labelsize'] = 42
# plt.rcParams['legend.fontsize'] = 42
# plt.rcParams['figure.figsize'] = (11, 9)
plt.rcParams['font.size'] = 36
plt.rcParams['axes.labelsize'] = 36
plt.rcParams['axes.titlesize'] = 36
plt.rcParams['legend.fontsize'] = 36
plt.rcParams['svg.fonttype'] = 'none'
HATCHES = ['/', '\\', 'x', 'o', '.', 'O', '-', '*', '+']
WIDTH = 0.3
bbr_reward, bbr_tput, bbr_tail_lat, bbr_loss = 118.07, 5.23, 517.02, 0.05
copa_reward, copa_tput, copa_tail_lat, copa_loss = 255.84, 4.58, 333.47, 0.01
cubic_reward, cubic_tput, cubic_tail_lat, cubic_loss = 69.75, 5.40, 858.46, 0.02
vivace_reward, vivace_tput, vivace_tail_lat, vivace_loss = -404.59, 4.04, 864.41, 0.21
vivace_latency_reward, vivace_latency_tput, vivace_latency_tail_lat, vivace_latency_loss = -422.16, 4.40, 888.76, 0.22
vivace_loss_reward = -616.31 #5.04 941.72 0.32
genet_reward = 252.28
genet_reward_err = 6.46
genet_tput, genet_tail_lat, genet_loss = 5.02, 251.02, 0.02
udr1_reward = 142.31
udr1_reward_err = 23.78 #
udr1_tput, udr1_tail_lat, udr1_loss = 4.59, 418.87, 0.03
udr2_reward = 187.61
udr2_reward_err = 5.03 #
udr2_tput, udr2_tail_lat, udr2_loss = 4.74, 408.95, 0.01
udr3_reward = 203.96
udr3_reward_err = 4.05 # 4.74 386.01 0.01
udr3_tput, udr3_tail_lat, udr3_loss = 4.74, 386.01, 0.01
real_reward = 171.61
real_reward_err = 3.18 # 5.01 459.23 0.02
cl1_reward = 206.56
cl1_reward_err = 3.07 # 4.88 413.40 0.01
cl2_reward = 211.89
cl2_reward_err = 4.05 # 4.82 419.74 0.00
column_wid = 0.7
capsize_wid = 8
eline_wid = 2
if __name__ == '__main__':
cellular_bars()
# cc_scatter()
| 41.963235 | 118 | 0.65884 |
12b1527e01e27cdb3f79857b70a9797275320e0e | 1,372 | py | Python | spacy/lang/th/__init__.py | snosrap/spaCy | 3f68bbcfec44ef55d101e6db742d353b72652129 | [
"MIT"
] | 1 | 2019-05-17T02:43:33.000Z | 2019-05-17T02:43:33.000Z | spacy/lang/th/__init__.py | snosrap/spaCy | 3f68bbcfec44ef55d101e6db742d353b72652129 | [
"MIT"
] | 49 | 2021-10-01T10:15:30.000Z | 2021-12-27T14:36:05.000Z | spacy/lang/th/__init__.py | snosrap/spaCy | 3f68bbcfec44ef55d101e6db742d353b72652129 | [
"MIT"
] | 1 | 2019-10-01T08:27:20.000Z | 2019-10-01T08:27:20.000Z | from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from ...language import Language, BaseDefaults
from ...tokens import Doc
from ...util import DummyTokenizer, registry, load_config_from_str
from ...vocab import Vocab
DEFAULT_CONFIG = """
[nlp]
[nlp.tokenizer]
@tokenizers = "spacy.th.ThaiTokenizer"
"""
__all__ = ["Thai"]
| 24.945455 | 69 | 0.68586 |
12b18047e85021cd05074093d60424bfe744046f | 167 | py | Python | src/setup/__init__.py | ScottDay/DFN-Maintenance-GUI-Backend | bfb05c75747fa9c334224b99609baef7321860a4 | [
"MIT"
] | 2 | 2017-03-31T00:57:35.000Z | 2017-08-04T10:38:28.000Z | src/setup/__init__.py | CPedersen3245/Desert-Fireball-Maintainence-GUI | bfb05c75747fa9c334224b99609baef7321860a4 | [
"MIT"
] | 10 | 2017-03-29T04:13:14.000Z | 2017-08-14T06:14:52.000Z | src/setup/__init__.py | ScottDay/DFN-Maintenance-GUI-Backend | bfb05c75747fa9c334224b99609baef7321860a4 | [
"MIT"
] | 4 | 2017-12-23T03:16:00.000Z | 2018-06-20T07:15:50.000Z | from .args import args
from .extensions import extensions
from .logger import logger
from .routes import routes
__all__ = ['args', 'extensions', 'logger', 'routes']
| 20.875 | 52 | 0.748503 |
12b22d55acd96929800d8872484a4576f43f6f08 | 6,223 | py | Python | cloudrunner_server/plugins/clouds/docker_host.py | ttrifonov/cloudrunner-server | 3b2426c8d9987e78425899010b534afc7734d8d4 | [
"Apache-2.0"
] | 2 | 2016-03-31T08:45:29.000Z | 2021-04-28T15:18:45.000Z | cloudrunner_server/plugins/clouds/docker_host.py | ttrifonov/cloudrunner-server | 3b2426c8d9987e78425899010b534afc7734d8d4 | [
"Apache-2.0"
] | null | null | null | cloudrunner_server/plugins/clouds/docker_host.py | ttrifonov/cloudrunner-server | 3b2426c8d9987e78425899010b534afc7734d8d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <info@cloudrunner.io>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
import json
import os
import requests
import tempfile
from cloudrunner import VAR_DIR
from .base import BaseCloudProvider, CR_SERVER
HEADERS = {'Content-Type': 'application/json'}
| 39.138365 | 78 | 0.472602 |
12b2fe22c669ef8f586778fb7af3dd29059295d7 | 4,702 | py | Python | scope/client_util/job_runner_check.py | drew-sinha/rpc-scope | 268864097b5b7d123a842f216adc446ec6b32d01 | [
"MIT"
] | 1 | 2017-11-10T17:23:11.000Z | 2017-11-10T17:23:11.000Z | scope/client_util/job_runner_check.py | drew-sinha/rpc-scope | 268864097b5b7d123a842f216adc446ec6b32d01 | [
"MIT"
] | 5 | 2018-08-01T03:05:35.000Z | 2018-11-29T22:11:25.000Z | scope/client_util/job_runner_check.py | drew-sinha/rpc-scope | 268864097b5b7d123a842f216adc446ec6b32d01 | [
"MIT"
] | 3 | 2016-05-25T18:58:35.000Z | 2018-11-29T23:40:45.000Z | # -*- coding: utf-8 -*-
# This code is licensed under the MIT License (see LICENSE file for details)
import platform
import datetime
import sys
import pathlib
import subprocess
import time
from .. import scope_job_runner
from ..config import scope_configuration
TIMER_UNIT = '''[Unit]
Description=Check that scope_job_runner is active if jobs are queued
[Timer]
OnBootSec=15min
OnUnitActiveSec=45min
[Install]
WantedBy=timers.target
'''
SERVICE_UNIT = '''[Unit]
Description=Check that scope_job_runner is active if jobs are queued
[Service]
ExecStart={executable}
'''
ERROR_SUBJECT = '{host}: scope job pending but scope_job_runner is inactive.'
ERROR_MESSAGE = '''One or more of your jobs is overdue on {host}, but the scope job runner daemon is not running.
These jobs will not be run until the command `scope_job_runner start` is executed on that machine.
Time: {time}
Queued Jobs:
{jobs}
'''
ALL_CLEAR_SUBJECT = '{host}: scope_job_runner was reactivated.'
ALL_CLEAR_MESSAGE = '''One or more of your jobs on {host} was stalled due to an inactive job runner.
The job runner has now been restarted and your jobs will be run as planned.
Time: {time}
Queued Jobs:
{jobs}
'''
| 38.227642 | 113 | 0.701616 |
12b402f977b10f55535c5a3654e5fda7b7dcf072 | 2,222 | py | Python | toffy/json_utils.py | angelolab/toffy | 4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6 | [
"Apache-2.0"
] | null | null | null | toffy/json_utils.py | angelolab/toffy | 4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6 | [
"Apache-2.0"
] | 46 | 2022-01-26T18:21:21.000Z | 2022-03-30T19:19:12.000Z | toffy/json_utils.py | angelolab/creed-helper | 4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6 | [
"Apache-2.0"
] | null | null | null | import copy
import json
import os
from ark.utils import io_utils
def rename_missing_fovs(fov_data):
"""Identify FOVs that are missing the 'name' key and create one with value placeholder_{n}
Args:
fov_data (dict): the FOV run JSON data
Returns:
dict: a copy of the run JSON data with placeholder names for FOVs that lack one
"""
copy_fov_data = copy.deepcopy(fov_data)
# count of FOVs that are missing the 'name' key
missing_count = 0
# iterate over each FOV and add a placeholder name if necessary
for fov in copy_fov_data['fovs']:
if 'name' not in fov.keys():
missing_count += 1
fov['name'] = f'placeholder_{missing_count}'
return copy_fov_data
def rename_duplicate_fovs(tma_fovs):
"""Identify and rename duplicate FOV names in `fov_list`
For a given FOV name, the subsequent duplicates get renamed `{FOV}_duplicate{n}`
Args:
tma_fovs (dict):
The TMA run JSON, should contain a `'fovs'` key defining the list of FOVs
Returns:
dict:
The same run JSON with the FOVs renamed to account for duplicates
"""
# used for identifying the number of times each FOV was found
fov_count = {}
# iterate over each FOV
for fov in tma_fovs['fovs']:
if fov['name'] not in fov_count:
fov_count[fov['name']] = 0
fov_count[fov['name']] += 1
if fov_count[fov['name']] > 1:
fov['name'] = '%s_duplicate%d' % (fov['name'], fov_count[fov['name']] - 1)
return tma_fovs
def list_moly_fovs(bin_file_dir):
"""Lists all of the FOVs in a directory which are moly FOVs
Args:
bin_file_dir (str): path to bin files
Returns:
list: list of FOVs which are moly FOVs"""
json_files = io_utils.list_files(bin_file_dir, '.json')
moly_fovs = []
for file in json_files:
json_path = os.path.join(bin_file_dir, file)
with open(json_path, 'r') as jp:
json_file = json.load(jp)
if json_file.get('standardTarget', "") == "Molybdenum Foil":
moly_name = file.split('.json')[0]
moly_fovs.append(moly_name)
return moly_fovs
| 26.771084 | 94 | 0.629613 |
12b6971b8aff245d6004cadaa44e2d26223997e6 | 545 | py | Python | app/plugins/task/upload.py | venturiscm/hcp | 74ad18180822301274daa9218d7bd9fbdb7807f7 | [
"Apache-2.0"
] | 1 | 2020-06-22T21:25:52.000Z | 2020-06-22T21:25:52.000Z | app/plugins/task/upload.py | venturiscm/hcp | 74ad18180822301274daa9218d7bd9fbdb7807f7 | [
"Apache-2.0"
] | 1 | 2020-05-21T02:46:24.000Z | 2020-05-25T07:19:23.000Z | app/plugins/task/upload.py | venturiscm/hcp | 74ad18180822301274daa9218d7bd9fbdb7807f7 | [
"Apache-2.0"
] | null | null | null | from systems.plugins.index import BaseProvider
import os
| 27.25 | 95 | 0.640367 |
12b73e722a7a33f56b3403eba3f5dbfb5e5538e6 | 2,955 | py | Python | win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py | takkii/dotfile | 7daf848c718ee10603a68a6e37a1002a827ec72f | [
"MIT"
] | 1 | 2018-10-11T21:31:43.000Z | 2018-10-11T21:31:43.000Z | win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py | takkii/dotfile | 7daf848c718ee10603a68a6e37a1002a827ec72f | [
"MIT"
] | null | null | null | win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py | takkii/dotfile | 7daf848c718ee10603a68a6e37a1002a827ec72f | [
"MIT"
] | null | null | null | # ============================================================================
# FILE: outline.py
# AUTHOR: Yasumasa Tamura (tamura.yasumasa _at_ gmail.com)
# License: MIT license
# ============================================================================
from .base import Base
from subprocess import check_output, CalledProcessError
from denite.util import parse_tagline
import re
import tempfile
OUTLINE_HIGHLIGHT_SYNTAX = [
{'name': 'Name', 'link': 'Identifier', 're': '\S\+\%(\s\+\[\)\@='},
{'name': 'Type', 'link': 'Type', 're': '\[.\{-}\]'},
{'name': 'Ref', 'link': 'Comment', 're': '\s\s.\+'}
]
| 35.60241 | 78 | 0.457868 |
12b887c446ea424a4bd8fd55a07bceb06b1c0206 | 1,656 | py | Python | test.py | Tweetsched/tweetsched-publisher | c639670fc9658251a02b8946b34dfae3f3145a72 | [
"MIT"
] | 1 | 2018-08-28T14:04:15.000Z | 2018-08-28T14:04:15.000Z | test.py | Tweetsched/tweetsched-publisher | c639670fc9658251a02b8946b34dfae3f3145a72 | [
"MIT"
] | null | null | null | test.py | Tweetsched/tweetsched-publisher | c639670fc9658251a02b8946b34dfae3f3145a72 | [
"MIT"
] | null | null | null | from base64 import b64encode
from app import app
import unittest
from mock import patch
import os
import json
from twython import Twython
if __name__ == '__main__':
unittest.main()
| 35.234043 | 100 | 0.607488 |
12b904baad9cd10c3b5e703a970ce798e635e1b7 | 372 | py | Python | Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py | darioGerussi/exercises | 414a3867d4db9449e402c58efd993153f55b91eb | [
"MIT"
] | 1 | 2022-03-31T01:57:55.000Z | 2022-03-31T01:57:55.000Z | Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py | darioGerussi/exercises | 414a3867d4db9449e402c58efd993153f55b91eb | [
"MIT"
] | null | null | null | Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py | darioGerussi/exercises | 414a3867d4db9449e402c58efd993153f55b91eb | [
"MIT"
] | null | null | null | # Converts a given temperature from Celsius to Fahrenheit
# Prompt user for Celsius temperature
degreesCelsius = float(input('\nEnter the temperature in Celsius: '))
# Calculate and display the converted
# temperature in Fahrenheit
degreesFahrenheit = ((9.0 / 5.0) * degreesCelsius) + 32
print('Fahrenheit equivalent: ', format(degreesFahrenheit, ',.1f'), '\n', sep='') | 37.2 | 81 | 0.744624 |
12b9be88a391697f2894a2c7dcc4147754edbf99 | 1,227 | py | Python | website/models/post.py | LKKTGB/lkkpomia | 0a814ed6d28757e07d6392ca27c914e68f0b3bda | [
"MIT"
] | null | null | null | website/models/post.py | LKKTGB/lkkpomia | 0a814ed6d28757e07d6392ca27c914e68f0b3bda | [
"MIT"
] | 5 | 2020-04-26T09:03:33.000Z | 2022-02-02T13:00:39.000Z | website/models/post.py | LKKTGB/lkkpomia | 0a814ed6d28757e07d6392ca27c914e68f0b3bda | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
| 32.289474 | 80 | 0.667482 |
12ba24dffd7a4983b46d43a9846f2ca9b1d6059e | 4,214 | py | Python | tests/sentry/api/serializers/test_alert_rule.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/sentry/api/serializers/test_alert_rule.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/serializers/test_alert_rule.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.alert_rule import DetailedAlertRuleSerializer
from sentry.incidents.logic import create_alert_rule, create_alert_rule_trigger
from sentry.incidents.models import AlertRuleThresholdType
from sentry.snuba.models import QueryAggregations
from sentry.testutils import TestCase
| 45.311828 | 100 | 0.701709 |
12bae8e939e905a92184b3c60e3fd70c58c999c2 | 1,003 | py | Python | mys/cli/subparsers/test.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | mys/cli/subparsers/test.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | mys/cli/subparsers/test.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | import os
from ..utils import add_jobs_argument
from ..utils import add_no_ccache_argument
from ..utils import add_optimize_argument
from ..utils import add_verbose_argument
from ..utils import build_prepare
from ..utils import run
| 27.108108 | 62 | 0.698903 |
12bc9ffc8a5d1fd39d7381b5bb5f4a16fad4749b | 14,579 | py | Python | plugins/modules/nsxt_transport_node_collections.py | madhukark/ansible-for-nsxt | f75c698e24073305a968ce2f70739fee77a14bb2 | [
"BSD-2-Clause"
] | null | null | null | plugins/modules/nsxt_transport_node_collections.py | madhukark/ansible-for-nsxt | f75c698e24073305a968ce2f70739fee77a14bb2 | [
"BSD-2-Clause"
] | null | null | null | plugins/modules/nsxt_transport_node_collections.py | madhukark/ansible-for-nsxt | f75c698e24073305a968ce2f70739fee77a14bb2 | [
"BSD-2-Clause"
] | 1 | 2021-12-03T08:26:09.000Z | 2021-12-03T08:26:09.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_transport_node_collections
short_description: Create transport node collection by attaching Transport Node Profile to cluster.
description: When transport node collection is created the hosts which are part
of compute collection will be prepared automatically i.e. NSX Manager
attempts to install the NSX components on hosts. Transport nodes for these
hosts are created using the configuration specified in transport node
profile.
version_added: "2.7"
author: Rahul Raghuvanshi
options:
hostname:
description: Deployed NSX manager hostname.
required: true
type: str
username:
description: The username to authenticate with the NSX manager.
required: true
type: str
password:
description: The password to authenticate with the NSX manager.
required: true
type: str
cluster_name:
description: CLuster Name
required: false
type: str
compute_manager_name:
description: Cluster Manager Name
required: false
type: str
description:
description: Description
required: true
type: str
display_name:
description: Display name
required: true
type: str
resource_type:
description: "Must be set to the value TransportNodeCollection"
required: true
type: str
state:
choices:
- present
- absent
description: "State can be either 'present' or 'absent'.
'present' is used to create or update resource.
'absent' is used to delete resource."
required: true
transport_node_profile_name:
description: Transport Node Profile Names
required: true
type: str
'''
EXAMPLES = '''
- name: Create transport node collection
nsxt_transport_node_collections:
hostname: "{{hostname}}"
username: "{{username}}"
password: "{{password}}"
validate_certs: False
display_name: "TNC1"
resource_type: "TransportNodeCollection"
description: "Transport Node Collections 1"
compute_manager_name: "VC1"
cluster_name: "cl1"
transport_node_profile_name: "TNP1"
state: present
'''
RETURN = '''# '''
import json, time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request
from ansible.module_utils._text import to_native
import ssl
import socket
import hashlib
if __name__ == '__main__':
main()
| 52.442446 | 183 | 0.721243 |
12bedc5672fe578c7205936e96d0685f45374da0 | 16,945 | py | Python | training/loss.py | drboog/Lafite | 10e109b9f46646ab793e0a5f38386af3012e9636 | [
"MIT"
] | 45 | 2022-03-10T23:49:44.000Z | 2022-03-31T21:47:45.000Z | training/loss.py | drboog/Lafite | 10e109b9f46646ab793e0a5f38386af3012e9636 | [
"MIT"
] | 7 | 2022-03-13T15:13:18.000Z | 2022-03-31T16:57:38.000Z | training/loss.py | drboog/Lafite | 10e109b9f46646ab793e0a5f38386af3012e9636 | [
"MIT"
] | 8 | 2022-03-10T23:49:29.000Z | 2022-03-31T18:20:17.000Z |
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
import torch.nn.functional as F
import torchvision.transforms as T
import clip
import dnnlib
import random
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
| 50.281899 | 190 | 0.5928 |
12bfd9fea84125596f1417fe60855b47416a33a6 | 4,203 | py | Python | lib/oitool/fetchoi.py | stockalgo/oichart | 962c373b34fcef09cc58abcf6e252dd746d413a1 | [
"MIT"
] | 8 | 2021-02-05T21:54:26.000Z | 2022-03-26T19:44:42.000Z | lib/oitool/fetchoi.py | stockalgo/oichart | 962c373b34fcef09cc58abcf6e252dd746d413a1 | [
"MIT"
] | 3 | 2021-03-15T18:41:12.000Z | 2021-12-18T09:23:47.000Z | lib/oitool/fetchoi.py | stockalgo/oichart | 962c373b34fcef09cc58abcf6e252dd746d413a1 | [
"MIT"
] | 5 | 2021-03-16T12:28:37.000Z | 2021-12-17T17:35:16.000Z | import time
import logging
from bandl.nse_data import NseData
from influxdb import InfluxDBClient
| 39.280374 | 106 | 0.602665 |
12c0367fe0f1278ce33a6a9b512ae1509254147d | 1,667 | py | Python | notebooks/HelperFunctions/RunModel.py | hh2110/continual-ml-stocks | 2a2baa330cd418b3cfb7eda8464c6b5b67bc608f | [
"CC0-1.0"
] | null | null | null | notebooks/HelperFunctions/RunModel.py | hh2110/continual-ml-stocks | 2a2baa330cd418b3cfb7eda8464c6b5b67bc608f | [
"CC0-1.0"
] | null | null | null | notebooks/HelperFunctions/RunModel.py | hh2110/continual-ml-stocks | 2a2baa330cd418b3cfb7eda8464c6b5b67bc608f | [
"CC0-1.0"
] | null | null | null | from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
| 27.783333 | 83 | 0.652669 |
12c19863b8bc11caf71dfdd9f3bff254268754da | 7,299 | py | Python | tools/build_defs/pkg/make_rpm.py | jpieper-tri/bazel | eef80048e2c59e3be974144ce9cd90b9f90294fb | [
"Apache-2.0"
] | 1 | 2018-03-27T17:18:20.000Z | 2018-03-27T17:18:20.000Z | tools/build_defs/pkg/make_rpm.py | Corroler/bazel | 073ea095a6c6a826ccdbbce1b213de47115e701a | [
"Apache-2.0"
] | 2 | 2018-11-06T01:01:16.000Z | 2019-04-10T02:25:49.000Z | tools/build_defs/pkg/make_rpm.py | Corroler/bazel | 073ea095a6c6a826ccdbbce1b213de47115e701a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple cross-platform helper to create an RPM package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fileinput
import os
import re
import shutil
import subprocess
import sys
from tempfile import mkdtemp
# pylint: disable=g-direct-third-party-import
from third_party.py import gflags
gflags.DEFINE_string('name', '', 'The name of the software being packaged.')
gflags.DEFINE_string('version', '',
'The version of the software being packaged.')
gflags.DEFINE_string('release', '',
'The release of the software being packaged.')
gflags.DEFINE_string('arch', '',
'The CPU architecture of the software being packaged.')
gflags.DEFINE_string('spec_file', '',
'The file containing the RPM specification.')
gflags.DEFINE_string('out_file', '',
'The destination to save the resulting RPM file to.')
# Setup to safely create a temporary directory and clean it up when done.
def GetFlagValue(flagvalue, strip=True):
if flagvalue:
if flagvalue[0] == '@':
with open(flagvalue[1:], 'r') as f:
flagvalue = f.read()
if strip:
return flagvalue.strip()
return flagvalue
WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE)
def FindOutputFile(log):
"""Find the written file from the log information."""
m = WROTE_FILE_RE.search(log)
if m:
return m.group('rpm_path')
return None
def CopyAndRewrite(input_file, output_file, replacements=None):
"""Copies the given file and optionally rewrites with replacements.
Args:
input_file: The file to copy.
output_file: The file to write to.
replacements: A dictionary of replacements.
Keys are prefixes scan for, values are the replacements to write after
the prefix.
"""
with open(output_file, 'w') as output:
for line in fileinput.input(input_file):
if replacements:
for prefix, text in replacements.items():
if line.startswith(prefix):
line = prefix + ' ' + text + '\n'
break
output.write(line)
if __name__ == '__main__':
FLAGS = gflags.FLAGS
main(FLAGS(sys.argv))
| 27.43985 | 80 | 0.68023 |
12c1f75f883cd400635b90784e88c06bdf2c4be4 | 2,739 | py | Python | data/datasets/gb_100.py | CharleyZhao123/graceful-few-shot | fae8170158a7a39ead7da40fecd787fea4abcf1a | [
"MIT"
] | 1 | 2021-08-11T12:56:29.000Z | 2021-08-11T12:56:29.000Z | data/datasets/gb_100.py | CharleyZhao123/graceful-few-shot | fae8170158a7a39ead7da40fecd787fea4abcf1a | [
"MIT"
] | null | null | null | data/datasets/gb_100.py | CharleyZhao123/graceful-few-shot | fae8170158a7a39ead7da40fecd787fea4abcf1a | [
"MIT"
] | null | null | null | import os
import pickle
import random
from torch.utils.data import Dataset
from .datasets import dataset_register
default_split = {
'train': 0.7,
'val': 0.3,
}
if __name__ == '__main__':
gb_100 = GB100(
root_path='/space1/zhaoqing/dataset/fsl/gb-100', split='val', split_method='novel')
print(len(gb_100))
# random
# val 3840
# train 8960
# novel
# val 4000
# train 8800
| 28.831579 | 91 | 0.588536 |
12c2d9d6cce98782d3ab5c1e821708313828e9f6 | 594 | py | Python | examples/analyze-outdated.py | duzvik/project-freta | 6c96b5d9af98380d695f0ad1c1636021793f30d2 | [
"CC-BY-4.0",
"MIT"
] | 67 | 2020-07-06T20:18:05.000Z | 2022-03-27T15:00:16.000Z | examples/analyze-outdated.py | hhfdserth/project-freta | b552267f87a4f5e4796ece6865232853d62f227c | [
"CC-BY-4.0",
"MIT"
] | 2 | 2020-07-06T23:35:47.000Z | 2020-07-14T15:22:47.000Z | examples/analyze-outdated.py | hhfdserth/project-freta | b552267f87a4f5e4796ece6865232853d62f227c | [
"CC-BY-4.0",
"MIT"
] | 21 | 2020-04-07T22:37:52.000Z | 2021-11-10T08:27:38.000Z | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
# Re-analyze all images that don't have latest version of the analysis available
from freta.api import Freta
if __name__ == "__main__":
main()
| 23.76 | 80 | 0.616162 |
12c342b7aef5ffeb0a48559a00dc029a6ad70253 | 4,041 | py | Python | utils/utils_fit.py | bubbliiiing/faster-rcnn-keras | aa1eb5e974785646b9fd86bfd269f2b6c12ec0e6 | [
"MIT"
] | 282 | 2020-02-25T00:19:28.000Z | 2022-03-20T08:14:20.000Z | utils/utils_fit.py | codertcm/faster-rcnn-keras | aa1eb5e974785646b9fd86bfd269f2b6c12ec0e6 | [
"MIT"
] | 46 | 2020-02-24T13:17:40.000Z | 2022-03-12T00:59:15.000Z | utils/utils_fit.py | codertcm/faster-rcnn-keras | aa1eb5e974785646b9fd86bfd269f2b6c12ec0e6 | [
"MIT"
] | 123 | 2020-02-23T09:28:36.000Z | 2022-03-16T01:43:46.000Z | import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
| 44.406593 | 153 | 0.554318 |
12c35e34c837e4d87b7e6155a3d32986c86a463f | 88 | py | Python | __init__.py | sbalen/TrafficSignsDataset | 39ae40a0d307ee83af57f70eed43c38bc5d25233 | [
"Apache-2.0"
] | 1 | 2021-05-05T14:23:34.000Z | 2021-05-05T14:23:34.000Z | __init__.py | sbalen/TrafficSignsDataset | 39ae40a0d307ee83af57f70eed43c38bc5d25233 | [
"Apache-2.0"
] | null | null | null | __init__.py | sbalen/TrafficSignsDataset | 39ae40a0d307ee83af57f70eed43c38bc5d25233 | [
"Apache-2.0"
] | null | null | null | """TrafficSignDataset dataset."""
from .TrafficSignsDataset import Trafficsignsdataset
| 22 | 52 | 0.829545 |
12c3f8688909dadef43a9224619f1323d1d373b9 | 972 | py | Python | exercicios-Python/ex042.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | exercicios-Python/ex042.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | exercicios-Python/ex042.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | #Refaa o DESAFIO 035 dos tringulos, acrescentando o recurso de mostrar que tipo de tringulo ser formado:
#- EQUILTERO: todos os lados iguais
#- ISSCELES: dois lados iguais, um diferente
#- ESCALENO: todos os lados diferentes
print('-' * 20, 'Programa Analisador de Tringulos', '-' * 20)
seg1 = float(input('Digite o valor do primeiro segmento: '))
seg2 = float(input('Digite o valor do segundo segmento: '))
seg3 = float(input('Digite o valor do terceiro segmento: '))
if seg1 < seg2 + seg3 and seg2 < seg1 + seg3 and seg3 < seg1 + seg2:
if seg1 == seg2 and seg3: # outra possibilidade --> seg1 == seg2 == seg3:
print('Os segmentos PODEM formar um tringulo do tipo EQUILTERO!')
elif seg1 != seg2 != seg3 != seg1:
print('Os segmentos acima PODEM formar um tringulo do tipo ESCALENO!')
else:
print('Os segmentos acima PODEM formar um tringulo do tipo ISSCELES!')
else:
print('Os segmentos NO PODEM formar um tringulo!')
| 54 | 108 | 0.700617 |
12c5579947927013c8506c4aecdbaabf5a5bd1d2 | 319 | py | Python | tests/test_extension.py | PeterWurmsdobler/mopidy-vfd | 8ae067d37b8670da2a0b9e876257c09ceb222be7 | [
"Apache-2.0"
] | null | null | null | tests/test_extension.py | PeterWurmsdobler/mopidy-vfd | 8ae067d37b8670da2a0b9e876257c09ceb222be7 | [
"Apache-2.0"
] | null | null | null | tests/test_extension.py | PeterWurmsdobler/mopidy-vfd | 8ae067d37b8670da2a0b9e876257c09ceb222be7 | [
"Apache-2.0"
] | null | null | null | from mopidy_vfd import Extension
| 16.789474 | 37 | 0.689655 |