hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1aafb58eef941757b45eb7221687eebd5c2e5ea | 799 | py | Python | app.py | mwinel/python-cicd-assignement | c4c08772094f983f8105090f0d91dfef5be042aa | [
"MIT"
] | null | null | null | app.py | mwinel/python-cicd-assignement | c4c08772094f983f8105090f0d91dfef5be042aa | [
"MIT"
] | null | null | null | app.py | mwinel/python-cicd-assignement | c4c08772094f983f8105090f0d91dfef5be042aa | [
"MIT"
] | null | null | null | import os
import logging
from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
## stream logs to a file
logging.basicConfig(filename='app.log', level=logging.DEBUG)
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
| 22.828571 | 79 | 0.673342 |
e1ad6793329afb999758e7af4b085f4de8b95b33 | 93 | py | Python | Configuration/StandardSequences/python/L1Reco_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/StandardSequences/python/L1Reco_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/StandardSequences/python/L1Reco_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from L1Trigger.Configuration.L1TReco_cff import *
| 18.6 | 49 | 0.83871 |
e1af67620fb53577d386b7ebbfdc3bab9dc25e7c | 2,347 | py | Python | pysrc/common/dataset_mod.py | Hibiki1020/classification_attitude_estimator | 7f7435c2ed5b5b5c8d0219df2da0426b108ff3aa | [
"MIT"
] | null | null | null | pysrc/common/dataset_mod.py | Hibiki1020/classification_attitude_estimator | 7f7435c2ed5b5b5c8d0219df2da0426b108ff3aa | [
"MIT"
] | null | null | null | pysrc/common/dataset_mod.py | Hibiki1020/classification_attitude_estimator | 7f7435c2ed5b5b5c8d0219df2da0426b108ff3aa | [
"MIT"
] | null | null | null |
import torch.utils.data as data
from PIL import Image
import numpy as np
import math
import csv | 28.621951 | 111 | 0.611845 |
e1b1063d345266bc6d42ef8301d5659cc8f0a43d | 1,846 | py | Python | scripts/darias_energy_control/moveit_traj_baseline/repulsive_potential_field.py | hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators | 9a8801e9c663174b753c4852b2313c5a3f302434 | [
"MIT"
] | null | null | null | scripts/darias_energy_control/moveit_traj_baseline/repulsive_potential_field.py | hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators | 9a8801e9c663174b753c4852b2313c5a3f302434 | [
"MIT"
] | null | null | null | scripts/darias_energy_control/moveit_traj_baseline/repulsive_potential_field.py | hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators | 9a8801e9c663174b753c4852b2313c5a3f302434 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import matplotlib.pyplot as plt
from icecream import ic
w = 5
Y, X = np.mgrid[-w:w:5j, -w:w:5j]
ic(Y)
ic(X)
import numpy as np
import matplotlib.pyplot as plt
# # Creating dataset
# x = np.arange(0, 10)
# y = np.arange(0, 10)
#
# # Creating grids
# X, Y = np.meshgrid(x, y)
# # ic(X)
# # ic(Y)
#
# # x-component to the right
# u = np.ones((15, 10))
#
# # y-component zero
# v = -np.ones((10, 10))
#
# fig = plt.figure(figsize=(12, 7))
#
# # Plotting stream plot
# plt.streamplot(X, Y, u, v, density=0.5)
#
# # show plot
# # plt.show()
import numpy as np
import matplotlib.pyplot as plt
# Creating data set
w = 3
Y, X = np.mgrid[-w:w:100j, -w:w:100j]
U1 = -1 - X ** 2 + Y
ic(type(U1))
ic(np.shape(U1))
V1 = 1 + X - Y ** 2
ic(np.shape(V1))
U2 = -1.1 - X ** 2 + Y
ic(np.shape(U1))
V2 = 2.1 + X - Y ** 2
# speed = np.sqrt(U ** 2 + V ** 2)
# Creating plot
fig = plt.figure(figsize=(12, 7))
plt.streamplot(X, Y, U1, V1, density=1)
plt.streamplot(X, Y, U2, V2, density=0.8)
# show plot
plt.show() | 20.977273 | 84 | 0.594258 |
e1b18b46651e9f66ff6958a9025b0bc1b9f9aca5 | 3,793 | py | Python | capa/features/extractors/ida/extractor.py | pombredanne/capa | b41d23930189c269608d4b705533fa45cf3c064c | [
"Apache-2.0"
] | null | null | null | capa/features/extractors/ida/extractor.py | pombredanne/capa | b41d23930189c269608d4b705533fa45cf3c064c | [
"Apache-2.0"
] | null | null | null | capa/features/extractors/ida/extractor.py | pombredanne/capa | b41d23930189c269608d4b705533fa45cf3c064c | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import idaapi
import capa.ida.helpers
import capa.features.extractors.elf
import capa.features.extractors.ida.file
import capa.features.extractors.ida.insn
import capa.features.extractors.ida.global_
import capa.features.extractors.ida.function
import capa.features.extractors.ida.basicblock
from capa.features.extractors.base_extractor import FeatureExtractor
| 33.566372 | 111 | 0.714474 |
e1b1b1bf75362e9f77713c3b8bcaddbf1477de81 | 55 | py | Python | Tests/playground.py | mbtaPredict/Main | e1c3320ff08b61355ac96f51be9e20c57372f13b | [
"MIT"
] | null | null | null | Tests/playground.py | mbtaPredict/Main | e1c3320ff08b61355ac96f51be9e20c57372f13b | [
"MIT"
] | null | null | null | Tests/playground.py | mbtaPredict/Main | e1c3320ff08b61355ac96f51be9e20c57372f13b | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
plt.plot()
plt.show() | 11 | 31 | 0.745455 |
e1b370db317e8d54c9c290cf01cbebc1cac20378 | 1,373 | py | Python | flex/extensions/jsondata.py | AWehrhahn/flex-format | 7fcc985559cd90e54d3ebde7946455aedc7293d7 | [
"MIT"
] | null | null | null | flex/extensions/jsondata.py | AWehrhahn/flex-format | 7fcc985559cd90e54d3ebde7946455aedc7293d7 | [
"MIT"
] | null | null | null | flex/extensions/jsondata.py | AWehrhahn/flex-format | 7fcc985559cd90e54d3ebde7946455aedc7293d7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import annotations
from tarfile import TarInfo
from typing import BinaryIO, Tuple
from ..base import FlexExtension
| 31.930233 | 78 | 0.640932 |
e1b37b3b7be2be9f06bdec60a631822373a8b7f7 | 185 | py | Python | awards/forms.py | danalvin/Django-IP3 | 6df0adaddf998fd4195b23ee97f81938e741215a | [
"MIT"
] | null | null | null | awards/forms.py | danalvin/Django-IP3 | 6df0adaddf998fd4195b23ee97f81938e741215a | [
"MIT"
] | 4 | 2020-06-05T19:20:59.000Z | 2021-09-08T00:32:49.000Z | awards/forms.py | danalvin/Django-IP3 | 6df0adaddf998fd4195b23ee97f81938e741215a | [
"MIT"
] | null | null | null | from django import forms
from .models import Project
| 18.5 | 52 | 0.67027 |
e1b43999f4dbbac898da1e996502f381b7896fa5 | 72,341 | py | Python | src/tale/syntax/grammar/TaleParser.py | tale-lang/tale | 1779f94aa13545e58a1d5a8819b85ad02ada4144 | [
"MIT"
] | 17 | 2020-02-11T10:38:19.000Z | 2020-09-22T16:36:25.000Z | src/tale/syntax/grammar/TaleParser.py | tale-lang/tale | 1779f94aa13545e58a1d5a8819b85ad02ada4144 | [
"MIT"
] | 18 | 2020-02-14T20:36:25.000Z | 2020-05-26T21:52:46.000Z | src/tale/syntax/grammar/TaleParser.py | tale-lang/tale | 1779f94aa13545e58a1d5a8819b85ad02ada4144 | [
"MIT"
] | 1 | 2020-02-16T12:04:07.000Z | 2020-02-16T12:04:07.000Z | # Generated from tale/syntax/grammar/Tale.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
| 34.317362 | 239 | 0.590868 |
e1b490b033e953f1585ccd81fdcb489a598e5706 | 353 | py | Python | 004.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | 004.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | 004.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | print('Ol, Mundo!')
print(7+4)
print('7'+'4')
print('Ol', 5)
# Toda varivel um objeto
# Um objeto mais do que uma varivel
nome = 'Gabriel'
idade = 30
peso = 79
print(nome,idade,peso)
nome = input('>>> Nome ')
idade = input('>>> Idade ')
peso = input('>>> Peso ')
print(nome,idade,peso)
print(f'Nome:{nome} ,Idade:{idade} ,Peso:{peso}')
| 14.12 | 49 | 0.620397 |
e1b5d39efe358fd9f5a0abeb927321f0eef6f285 | 680 | py | Python | examples/create_mac_table_entry.py | open-switch/opx-docs | f448f3f3dc0de38822bbf16c1e173eb108925a40 | [
"CC-BY-4.0"
] | 122 | 2017-02-10T01:47:04.000Z | 2022-03-23T20:11:11.000Z | examples/create_mac_table_entry.py | open-switch/opx-docs | f448f3f3dc0de38822bbf16c1e173eb108925a40 | [
"CC-BY-4.0"
] | 37 | 2017-03-01T07:07:22.000Z | 2021-11-11T16:47:42.000Z | examples/create_mac_table_entry.py | open-switch/opx-docs | f448f3f3dc0de38822bbf16c1e173eb108925a40 | [
"CC-BY-4.0"
] | 39 | 2017-01-18T16:22:58.000Z | 2020-11-18T13:23:43.000Z | #Python code block to configure MAC address table entry
import cps_utils
#Register the attribute type
cps_utils.add_attr_type('base-mac/table/mac-address', 'mac')
#Define the MAC address, interface index and VLAN attributes
d = {'mac-address': '00:0a:0b:cc:0d:0e', 'ifindex': 18, 'vlan': '100'}
#Create a CPS object
obj = cps_utils.CPSObject('base-mac/table', data=d)
#Associate the operation to the CPS object
tr_obj = ('create', obj.get())
#Create a transaction object
transaction = cps_utils.CPSTransaction([tr_obj])
#Check for failure
ret = transaction.commit()
if not ret:
raise RuntimeError('Error creating MAC Table Entry')
print 'Successfully created'
| 27.2 | 70 | 0.738235 |
e1b62639aea1ec0a6c6d66e1d90f5f610c374034 | 4,397 | py | Python | win/GUI/Configuration.py | WeAreAVP/mdqc | 3130a713c70017bc54d8e5da6bb5766ba9d97423 | [
"Apache-2.0"
] | 8 | 2019-01-18T08:58:02.000Z | 2021-05-20T16:51:14.000Z | osx/GUI/Configuration.py | WeAreAVP/mdqc | 3130a713c70017bc54d8e5da6bb5766ba9d97423 | [
"Apache-2.0"
] | 7 | 2016-02-25T21:50:03.000Z | 2017-12-13T14:27:29.000Z | osx/GUI/Configuration.py | WeAreAVP/mdqc | 3130a713c70017bc54d8e5da6bb5766ba9d97423 | [
"Apache-2.0"
] | 2 | 2020-02-13T16:00:07.000Z | 2020-08-12T16:31:49.000Z | # -*- coding: UTF-8 -*-
'''
Created on May 14, 2014
@author: Furqan Wasi <furqan@avpreserve.com>
'''
import os, datetime, sys, platform, base64
| 31.407143 | 139 | 0.596771 |
e1b62abc8e468748316b85f828dfc8de03775be8 | 17,306 | py | Python | MainController.py | samuelvp360/Microbiological-Assay-Calculator | 36317e266bf499f24f7e7d3f59328864a8723aa4 | [
"FSFAP"
] | null | null | null | MainController.py | samuelvp360/Microbiological-Assay-Calculator | 36317e266bf499f24f7e7d3f59328864a8723aa4 | [
"FSFAP"
] | null | null | null | MainController.py | samuelvp360/Microbiological-Assay-Calculator | 36317e266bf499f24f7e7d3f59328864a8723aa4 | [
"FSFAP"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from pathlib import Path
from datetime import datetime
from PyQt5 import QtCore as qtc
from PyQt5 import QtWidgets as qtw
from PyQt5 import uic
import numpy as np
from Models import AssaysModel, SamplesModel
from DB.AssaysDB import MyZODB
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from WellProcessor import WellProcessor
from Assay import Assay
matplotlib.use('Qt5Agg')
p = Path(__file__)
print(p)
isLink = os.path.islink(p)
if isLink:
theLink = os.readlink(p)
path = Path(theLink).resolve().parent
path = f'{path}/'
print('linked')
else:
path = ''
print('unlinked')
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
window = MainWindow(path)
window.show()
sys.exit(app.exec_())
| 42.836634 | 108 | 0.596729 |
e1b6ebd37b97bc9b109f511037c684ea5fa2de9b | 225 | py | Python | events/defaults.py | bozbalci/cython-experiments | a675571e09297e3cda9154e8b611562bb8b14f7e | [
"Unlicense"
] | 1 | 2018-06-23T17:52:20.000Z | 2018-06-23T17:52:20.000Z | events/defaults.py | bozbalci/cython-experiments | a675571e09297e3cda9154e8b611562bb8b14f7e | [
"Unlicense"
] | null | null | null | events/defaults.py | bozbalci/cython-experiments | a675571e09297e3cda9154e8b611562bb8b14f7e | [
"Unlicense"
] | null | null | null | # defaults.py: contains the built-in variables, events and methods
# used for scripting the C program
import event
events = {}
_event_names = ["on_start", "on_exit"]
for evt in _event_names:
events[evt] = event.Event()
| 22.5 | 66 | 0.724444 |
e1b7011c0667fc12e337dc4c85e26236aa831c39 | 15,526 | py | Python | src/parse_text.py | rflieshman/BOLSTM | be7551957001a9bdaab6051145f96ad9d7415209 | [
"Apache-2.0"
] | 13 | 2019-04-11T02:20:49.000Z | 2021-03-26T11:00:10.000Z | src/parse_text.py | rflieshman/BOLSTM | be7551957001a9bdaab6051145f96ad9d7415209 | [
"Apache-2.0"
] | 5 | 2018-12-05T14:38:40.000Z | 2021-12-13T19:46:53.000Z | src/parse_text.py | lasigeBioTM/BOLSTM | c33a8b2a7722acb5e3ff55c3735591aea4f76f49 | [
"Apache-2.0"
] | 2 | 2019-10-13T13:47:19.000Z | 2021-01-16T10:43:33.000Z | from itertools import combinations
import numpy as np
import spacy
import sys
from spacy.tokenizer import Tokenizer
import re
from subprocess import PIPE, Popen
import os
import logging
import networkx as nx
import en_core_web_sm
import string
from neg_gv import neg_gv_list
SSTDIR = "sst-light-0.4/"
TEMP_DIR = "temp/"
nlp = en_core_web_sm.load(disable=["ner"])
nlp.add_pipe(prevent_sentence_segmentation, name="prevent-sbd", before="parser")
# https://stackoverflow.com/a/41817795/3605086
def get_network_graph_spacy(document):
"""
Convert the dependencies of the spacy document object to a networkX graph
:param document: spacy parsed document object
:return: networkX graph object and nodes list
"""
edges = []
nodes = []
# ensure that every token is connected
# edges.append(("ROOT", '{0}-{1}'.format(list(document)[0].lower_, list(document)[0].i)))
for s in document.sents:
edges.append(("ROOT", "{0}-{1}".format(s.root.lower_, s.root.i)))
for token in document:
nodes.append("{0}-{1}".format(token.lower_, token.i))
# edges.append(("ROOT", '{0}-{1}'.format(token.lower_, token.i)))
# print('{0}-{1}'.format(token.lower_, token.i))
# FYI https://spacy.io/docs/api/token
for child in token.children:
# print("----", '{0}-{1}'.format(child.lower_, child.i))
edges.append(
(
"{0}-{1}".format(token.lower_, token.i),
"{0}-{1}".format(child.lower_, child.i),
)
)
return nx.Graph(edges), nodes
def get_head_tokens(entities, sentence):
"""
:param entities: dictionary mapping entity IDs to (offset, text)
:param sentence: sentence parsed by spacy
:return: dictionary mapping head tokens word-idx to entity IDs
"""
sentence_head_tokens = {}
for eid in entities:
offset = (entities[eid][0][0], entities[eid][0][-1])
# starts = {tok.i: tok.idx for tok in doc}
# entity_tokens = sentence.char_span(offset[0], offset[1])
entity_tokens = [
(t, i) for i, t in enumerate(sentence.token) if t.beginChar == offset[0]
]
# if not entity_tokens:
# try to include the next char
# entity_tokens = sentence.char_span(offset[0], offset[1] + 1)
# entity_tokens = [t for t in sentence.token if t.beginChar == offset[0]]
if not entity_tokens:
logging.warning(
(
"no tokens found:",
entities[eid],
sentence.text,
"|".join(
[
"{}({}-{})".format(t.word, t.beginChar, t.endChar)
for t in sentence.token
]
),
)
)
# sys.exit()
else:
head_token = "{0}-{1}".format(
entity_tokens[0][0].word.lower(), entity_tokens[0][1]
)
if head_token in sentence_head_tokens:
logging.warning(
(
"head token conflict:",
sentence_head_tokens[head_token],
entities[eid],
)
)
sentence_head_tokens[head_token] = eid
return sentence_head_tokens
def get_head_tokens_spacy(entities, sentence, positive_entities):
"""
:param entities: dictionary mapping entity IDs to (offset, text)
:param sentence: sentence parsed by spacy
:return: dictionary mapping head tokens word-idx to entity IDs
"""
sentence_head_tokens = {}
pos_gv = set()
neg_gv = set()
for eid in entities:
offset = (entities[eid][0][0], entities[eid][0][-1])
# starts = {tok.i: tok.idx for tok in doc}
entity_tokens = sentence.char_span(offset[0], offset[1])
# if not entity_tokens:
# try to include the next char
# entity_tokens = sentence.char_span(offset[0], offset[1] + 1)
i = 1
while not entity_tokens and i + offset[1] < len(sentence.text) + 1:
entity_tokens = sentence.char_span(offset[0], offset[1] + i)
i += 1
i = 0
while not entity_tokens and offset[0] - i > 0:
entity_tokens = sentence.char_span(offset[0] - i, offset[1])
i += 1
if not entity_tokens:
logging.warning(
(
"no tokens found:",
entities[eid],
sentence.text,
"|".join([t.text for t in sentence]),
)
)
else:
head_token = "{0}-{1}".format(
entity_tokens.root.lower_, entity_tokens.root.i
)
if eid in positive_entities:
pos_gv.add(entity_tokens.root.head.lower_)
else:
neg_gv.add(entity_tokens.root.head.lower_)
if head_token in sentence_head_tokens:
logging.warning(
(
"head token conflict:",
sentence_head_tokens[head_token],
entities[eid],
)
)
sentence_head_tokens[head_token] = eid
return sentence_head_tokens, pos_gv, neg_gv
def process_sentence_spacy(
sentence,
sentence_entities,
sentence_pairs,
positive_entities,
wordnet_tags=None,
mask_entities=True,
min_sdp_len=0,
max_sdp_len=15,
):
"""
Process sentence to obtain labels, instances and classes for a ML classifier
:param sentence: sentence processed by spacy
:param sentence_entities: dictionary mapping entity ID to ((e_start, e_end), text, paths_to_root)
:param sentence_pairs: dictionary mapping pairs of known entities in this sentence to pair types
:return: labels of each pair (according to sentence_entities,
word vectors and classes (pair types according to sentence_pairs)
"""
left_word_vectors = []
right_word_vectors = []
left_wordnets = []
right_wordnets = []
classes = []
labels = []
graph, nodes_list = get_network_graph_spacy(sentence)
sentence_head_tokens, pos_gv, neg_gv = get_head_tokens_spacy(
sentence_entities, sentence, positive_entities
)
# print(neg_gv - pos_gv)
entity_offsets = [sentence_entities[x][0][0] for x in sentence_entities]
# print(sentence_head_tokens)
for (e1, e2) in combinations(sentence_head_tokens, 2):
# print()
# print(sentence_head_tokens[e1], e1, sentence_head_tokens[e2], e2)
# reorder according to entity ID
if int(sentence_head_tokens[e1].split("e")[-1]) > int(
sentence_head_tokens[e2].split("e")[-1]
):
e1, e2 = e2, e1
e1_text = sentence_entities[sentence_head_tokens[e1]]
e2_text = sentence_entities[sentence_head_tokens[e2]]
if e1_text[1].lower() == e2_text[1].lower():
# logging.debug("skipped same text: {} {}".format(e1_text, e2_text))
continue
middle_text = sentence.text[e1_text[0][-1] : e2_text[0][0]]
# if middle_text.strip() == "or" or middle_text.strip() == "and":
# logging.debug("skipped entity list: {} {} {}".format(e1_text, middle_text, e2_text))
# continue
if middle_text.strip() in string.punctuation:
# logging.debug("skipped punctuation: {} {} {}".format(e1_text, middle_text, e2_text))
continue
# if len(middle_text) < 3:
# logging.debug("skipped entity list: {} {} {}".format(e1_text, middle_text, e2_text))
# continue
head_token1_idx = int(e1.split("-")[-1])
head_token2_idx = int(e2.split("-")[-1])
try:
sdp = nx.shortest_path(graph, source=e1, target=e2)
if len(sdp) < min_sdp_len or len(sdp) > max_sdp_len:
# logging.debug("skipped short sdp: {} {} {}".format(e1_text, str(sdp), e2_text))
continue
neg = False
is_neg_gv = False
for i, element in enumerate(sdp):
token_idx = int(element.split("-")[-1]) # get the index of the token
token_text = element.split("-")[0]
if (i == 1 or i == len(sdp) - 2) and token_text in neg_gv_list:
logging.info("skipped gv {} {}:".format(token_text, str(sdp)))
# is_neg_gv = True
sdp_token = sentence[token_idx] # get the token obj
# if any(c.dep_ == 'neg' for c in sdp_token.children):
# neg = True
if neg or is_neg_gv:
continue
# if len(sdp) < 3: # len=2, just entities
# sdp = [sdp[0]] + nodes_list[head_token1_idx-2:head_token1_idx]
# sdp += nodes_list[head_token2_idx+1:head_token2_idx+3] + [sdp[-1]]
# print(e1_text[1:], e2_text[1:], sdp)
# if len(sdp) == 2:
# add context words
vector = []
wordnet_vector = []
negations = 0
head_token_position = None
for i, element in enumerate(sdp):
if element != "ROOT":
token_idx = int(
element.split("-")[-1]
) # get the index of the token
sdp_token = sentence[token_idx] # get the token obj
# if any(c.dep_ == 'neg' for c in sdp_token.children):
# token is negated!
# vector.append("not")
# negations += 1
# logging.info("negated!: {}<->{} {}: {}".format(e1_text, e2_text, sdp_token.text, sentence.text))
if mask_entities and sdp_token.idx in entity_offsets:
vector.append("drug")
else:
vector.append(sdp_token.text)
if wordnet_tags:
wordnet_vector.append(wordnet_tags[token_idx])
# print(element, sdp_token.text, head_token, sdp)
head_token = "{}-{}".format(
sdp_token.head.lower_, sdp_token.head.i
) # get the key of head token
# head token must not have its head in the path, otherwise that would be the head token
# in some cases the token is its own head
if head_token not in sdp or head_token == element:
# print("found head token of:", e1_text, e2_text, sdp_token.text, sdp)
head_token_position = i + negations
# vector.append(parsed[token_idx].text)
# print(vector)
if head_token_position is None:
print("head token not found:", e1_text, e2_text, sdp)
sys.exit()
else:
left_vector = vector[: head_token_position + 1]
right_vector = vector[head_token_position:]
left_wordnet = wordnet_vector[: head_token_position + 1]
right_wordnet = wordnet_vector[head_token_position:]
# word_vectors.append(vector)
left_word_vectors.append(left_vector)
right_word_vectors.append(right_vector)
left_wordnets.append(left_wordnet)
right_wordnets.append(right_wordnet)
# if (sentence_head_tokens[e1], sentence_head_tokens[e2]) in sentence_pairs:
# print(sdp, e1, e2, sentence_text)
# print(e1_text, e2_text, sdp, sentence_text)
# instances.append(sdp)
except nx.exception.NetworkXNoPath:
# pass
logging.warning("no path:", e1_text, e2_text, graph.nodes())
left_word_vectors.append([])
right_word_vectors.append([])
left_wordnets.append([])
right_wordnets.append([])
# print("no path:", e1_text, e2_text, sentence_text, parsed.print_tree(light=True))
# sys.exit()
except nx.NodeNotFound:
logging.warning(
(
"node not found:",
e1_text,
e2_text,
e1,
e2,
list(sentence),
graph.nodes(),
)
)
left_word_vectors.append([])
right_word_vectors.append([])
left_wordnets.append([])
right_wordnets.append([])
labels.append((sentence_head_tokens[e1], sentence_head_tokens[e2]))
# print(sentence_head_tokens[e1], sentence_head_tokens[e2])
if (sentence_head_tokens[e1], sentence_head_tokens[e2]) in sentence_pairs:
classes.append(
sentence_pairs[(sentence_head_tokens[e1], sentence_head_tokens[e2])]
)
else:
classes.append(0)
return (
labels,
(left_word_vectors, right_word_vectors),
(left_wordnets, right_wordnets),
classes,
pos_gv,
neg_gv,
)
| 37.502415 | 119 | 0.550818 |
e1b73e252109287a68039d70bc02eba7d5e821da | 1,049 | py | Python | metadata-ingestion/examples/library/dataset_set_tag.py | cuong-pham/datahub | cb4eb001758f55622add0f4dc3650cf483609cba | [
"Apache-2.0"
] | 1,603 | 2016-03-03T17:21:03.000Z | 2020-01-22T22:12:02.000Z | metadata-ingestion/examples/library/dataset_set_tag.py | cuong-pham/datahub | cb4eb001758f55622add0f4dc3650cf483609cba | [
"Apache-2.0"
] | 1,157 | 2016-03-03T19:29:22.000Z | 2020-01-20T14:41:59.000Z | metadata-ingestion/examples/library/dataset_set_tag.py | cuong-pham/datahub | cb4eb001758f55622add0f4dc3650cf483609cba | [
"Apache-2.0"
] | 570 | 2016-03-03T17:21:05.000Z | 2020-01-21T06:54:10.000Z | # Imports for urn construction utility methods
import logging
from datahub.emitter.mce_builder import make_dataset_urn, make_tag_urn
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
# Imports for metadata model classes
from datahub.metadata.schema_classes import (
ChangeTypeClass,
GlobalTagsClass,
TagAssociationClass,
)
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
dataset_urn = make_dataset_urn(platform="hive", name="realestate_db.sales", env="PROD")
tag_urn = make_tag_urn("purchase")
event: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper(
entityType="dataset",
changeType=ChangeTypeClass.UPSERT,
entityUrn=dataset_urn,
aspectName="globalTags",
aspect=GlobalTagsClass(tags=[TagAssociationClass(tag=tag_urn)]),
)
# Create rest emitter
rest_emitter = DatahubRestEmitter(gms_server="http://localhost:8080")
rest_emitter.emit(event)
log.info(f"Set tags to {tag_urn} for dataset {dataset_urn}")
| 32.78125 | 87 | 0.804576 |
e1b7f693b03922194b579f49635c8089ae32b745 | 517 | py | Python | examples/Old Format/matrix_latex.py | waldyrious/galgebra | b5eb070340434d030dd737a5656fbf709538b0b1 | [
"BSD-3-Clause"
] | 151 | 2018-09-18T12:30:14.000Z | 2022-03-16T08:02:48.000Z | examples/Old Format/matrix_latex.py | abrombo/galgebra | 5ae058c9ba2c17b1baf46c58f77124e82eaf428a | [
"BSD-3-Clause"
] | 454 | 2018-09-19T01:42:30.000Z | 2022-01-18T14:02:00.000Z | examples/Old Format/matrix_latex.py | abrombo/galgebra | 5ae058c9ba2c17b1baf46c58f77124e82eaf428a | [
"BSD-3-Clause"
] | 30 | 2019-02-22T08:25:50.000Z | 2022-01-15T05:20:22.000Z | from __future__ import print_function
from sympy import symbols, Matrix
from galgebra.printer import xpdf, Format
if __name__ == "__main__":
main()
| 19.884615 | 65 | 0.475822 |
e1b88db11881c00abc4ca3f31868a0861378a947 | 780 | py | Python | hopsapp/__init__.py | mrahman013/Hope4Hops-web-applcation | d5bde1463c6fbc1ea5424cb656504119393c6ce2 | [
"MIT"
] | null | null | null | hopsapp/__init__.py | mrahman013/Hope4Hops-web-applcation | d5bde1463c6fbc1ea5424cb656504119393c6ce2 | [
"MIT"
] | null | null | null | hopsapp/__init__.py | mrahman013/Hope4Hops-web-applcation | d5bde1463c6fbc1ea5424cb656504119393c6ce2 | [
"MIT"
] | null | null | null | """Implements a basic flask app that provides hashes of text."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_login
#pylint: disable=invalid-name
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://yjjuylsytqewni:d0d63322c6abd33e2dadeafd7ef2501f73af54cf2d39596e464ea2c18b0234a3@ec2-23-23-78-213.compute-1.amazonaws.com:5432/d3gdnt7fkmonn1' #pylint: disable=line-too-long
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.secret_key = 'HGTYNVK123LOL908973'
db = SQLAlchemy(app)
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
# This import need to be here that's why disabling pylint
#pylint: disable=wrong-import-position
import hopsapp.models
import hopsapp.routes
| 35.454545 | 224 | 0.815385 |
e1b8fdfc631946eef5fedb38c2e25e5e6c2e1add | 800 | py | Python | npytoImage.py | x35yao/camera | 0ee77f5de72d785ba68bef44a557470ec425d702 | [
"MIT"
] | null | null | null | npytoImage.py | x35yao/camera | 0ee77f5de72d785ba68bef44a557470ec425d702 | [
"MIT"
] | null | null | null | npytoImage.py | x35yao/camera | 0ee77f5de72d785ba68bef44a557470ec425d702 | [
"MIT"
] | null | null | null | import numpy as np;
import cv2;
n = 428671
img_RS_color = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_RS_color.npy'.format(n))
cv2.imshow('RS Color Image {}'.format(n), img_RS_color)
#
# # img_RS_depth = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_RS_depth.npy'.format(n))
# # cv2.imshow('RS Depth Image {}'.format(n), img_RS_depth)
#
# img_ZED_color = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_ZED_color.npy'.format(n))
# cv2.imshow('ZED Color Image {}'.format(n), img_ZED_color)
#
# # img_ZED_depth = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_ZED_depth.npy'.format(n))
# # cv2.imshow('ZED Depth Image {}'.format(n), img_ZED_depth)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 38.095238 | 119 | 0.7475 |
e1b94f246fa698d25573d863b176f320113a2877 | 11,217 | py | Python | magenta/music/sequences_lib.py | jellysquider/magenta | 0fc8188870f5d1c988b76dae434b21e58362516c | [
"Apache-2.0"
] | null | null | null | magenta/music/sequences_lib.py | jellysquider/magenta | 0fc8188870f5d1c988b76dae434b21e58362516c | [
"Apache-2.0"
] | null | null | null | magenta/music/sequences_lib.py | jellysquider/magenta | 0fc8188870f5d1c988b76dae434b21e58362516c | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines sequence of notes objects for creating datasets."""
import collections
import copy
# internal imports
from magenta.music import constants
from magenta.protobuf import music_pb2
# Set the quantization cutoff.
# Note events before this cutoff are rounded down to nearest step. Notes
# above this cutoff are rounded up to nearest step. The cutoff is given as a
# fraction of a step.
# For example, with quantize_cutoff = 0.75 using 0-based indexing,
# if .75 < event <= 1.75, it will be quantized to step 1.
# If 1.75 < event <= 2.75 it will be quantized to step 2.
# A number close to 1.0 gives less wiggle room for notes that start early,
# and they will be snapped to the previous step.
QUANTIZE_CUTOFF = 0.5
# Shortcut to chord symbol text annotation type.
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
def extract_subsequence(sequence, start_time, end_time):
"""Extracts a subsequence from a NoteSequence.
Notes starting before `start_time` are not included. Notes ending after
`end_time` are truncated.
Args:
sequence: The NoteSequence to extract a subsequence from.
start_time: The float time in seconds to start the subsequence.
end_time: The float time in seconds to end the subsequence.
Returns:
A new NoteSequence that is a subsequence of `sequence` in the specified time
range.
"""
subsequence = music_pb2.NoteSequence()
subsequence.CopyFrom(sequence)
del subsequence.notes[:]
for note in sequence.notes:
if note.start_time < start_time or note.start_time >= end_time:
continue
new_note = subsequence.notes.add()
new_note.CopyFrom(note)
new_note.end_time = min(note.end_time, end_time)
subsequence.total_time = min(sequence.total_time, end_time)
return subsequence
def is_power_of_2(x):
return x and not x & (x - 1)
| 38.67931 | 80 | 0.692788 |
e1ba5c72da56a9dbf7ee8bd79a41429f11457824 | 8,404 | py | Python | tests/index_jsonurl_test.py | Stidsty/dismantle | 26fb8fe7ba97349a67498715bb47a19329b1a4c7 | [
"Apache-2.0"
] | null | null | null | tests/index_jsonurl_test.py | Stidsty/dismantle | 26fb8fe7ba97349a67498715bb47a19329b1a4c7 | [
"Apache-2.0"
] | null | null | null | tests/index_jsonurl_test.py | Stidsty/dismantle | 26fb8fe7ba97349a67498715bb47a19329b1a4c7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Gary Stidston-Broadbent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from hashlib import md5
from json import JSONDecodeError
from shutil import copy2
import pytest
from pytest_httpserver import HTTPServer
from pytest_httpserver.httpserver import HandlerType
from requests import ConnectionError
from dismantle.index import IndexHandler, JsonUrlIndexHandler
| 42.02 | 79 | 0.707996 |
e1ba723285119341020fa35acb08aec8be4bb131 | 200 | py | Python | src/resdk/__init__.py | AGregorc/resolwe-bio-py | 62304e5d4c54c917575421701c6977dc63fc3a8f | [
"Apache-2.0"
] | 4 | 2016-09-28T16:00:05.000Z | 2018-08-16T16:14:10.000Z | src/resdk/__init__.py | AGregorc/resolwe-bio-py | 62304e5d4c54c917575421701c6977dc63fc3a8f | [
"Apache-2.0"
] | 229 | 2016-03-28T19:41:00.000Z | 2022-03-16T15:02:15.000Z | src/resdk/__init__.py | AGregorc/resolwe-bio-py | 62304e5d4c54c917575421701c6977dc63fc3a8f | [
"Apache-2.0"
] | 18 | 2016-03-10T16:11:57.000Z | 2021-06-01T10:01:49.000Z | """Resolwe SDK for Python."""
from .collection_tables import CollectionTables # noqa
from .resdk_logger import log_to_stdout, start_logging # noqa
from .resolwe import Resolwe, ResolweQuery # noqa
| 40 | 62 | 0.79 |
e1bade04e7403e544b5faa2f08e7005733a09b95 | 4,842 | py | Python | helper/validation_scripts/launch-lm-profile.py | NanoMembers/DeepFlow | 0235fe460d15a95f90202a1fdb3d3405d774511a | [
"Apache-2.0"
] | 3 | 2020-10-29T19:00:29.000Z | 2020-12-21T12:24:28.000Z | helper/validation_scripts/launch-lm-profile.py | NanoMembers/DeepFlow | 0235fe460d15a95f90202a1fdb3d3405d774511a | [
"Apache-2.0"
] | null | null | null | helper/validation_scripts/launch-lm-profile.py | NanoMembers/DeepFlow | 0235fe460d15a95f90202a1fdb3d3405d774511a | [
"Apache-2.0"
] | null | null | null | #!/tools/python/python3.8.3/bin/python
import os
import shutil
import subprocess
import numpy as np
batch_list=[i*1024 for i in range(2,7)]
seq_list=[10]
hidden_list=[i*1024 for i in range(2,7)]
vocab_list=[2048] #[int(i) for i in (2**np.linspace(10,13,20)//2*2)]
layer_list=[1]
bpe_list=[10]
epoch_list=[3]
output_dir="/mnt/home/newsha/baidu/developement/MechaFlow/validation/benchmarks/rnnlm/profile_gemm"
result_file="{}/result.csv".format(output_dir)
if os.path.exists(output_dir):
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
print("Created {}".format(output_dir))
with open(result_file, "w") as f:
f.write("Batch Seq Hidden Vocab Layers Epoch BPE core_util dram_util l2_util dram_read dram_write l2_access fp16_inst fma_inst\n\n")
print("Batch Seq Hidden Vocab Layers Epoch BPE core_util dram_util l2_util dram_read dram_write l2_access fp16_inst fma_inst\n\n")
for b in batch_list:
for s in seq_list:
for d in hidden_list:
for v in vocab_list:
for l in layer_list:
for bpe in bpe_list:
for e in epoch_list:
bpe = min(bpe, 25000//b)
fname = "B{}-S{}-D{}-V{}-L{}-E{}-P{}".format(b,s,d,v,l,e,bpe)
output_file = "{}/{}.out".format(output_dir, fname)
command1="/tools/cuda/cuda-11.0.1/bin/ncu --metrics \"regex:.*\" -k volta_fp16_s884gemm_fp16_... -s 0 -c 1 '/tools/venvs/tensorflow/tensorflow-2.2.0/bin/python' lm-fp16.py -m train -train data/test-index.txt -test data/test-index.txt -valid data/test-index.txt -b{} -s{} -d{} -v{} -l{} -p{} -e{} > {} 2>&1".format(b, s, d, v, l, bpe, e, output_file)
#command1 = "/tools/cuda/cuda-11.0.1/bin/nsys profile -t cuda,osrt,cudnn,cublas,nvtx,mpi -o profile/{} --stats=true -f true python lm-fp16.py -b{} -s{} -d{} -v{} -l{} -p{} -e{} -m train -train data/test-index.txt -test data/test-index.txt -valid data/test-index.txt > {} 2>&1".format(fname, b, s, d, v, l, bpe, e, output_file)
command2 = "cat {} | grep \"sm__pipe_tensor_op_hmma_cycles_active.avg.pct_of_peak_sustained_active\"| awk {{'print $3'}}".format(output_file) #unit
command3 = "cat {} | grep \"dram__throughput.avg.pct_of_peak_sustained_active\"| awk {{'print $3'}}".format(output_file) #unit
command4 = "cat {} | grep lts__t_sectors.avg.pct_of_peak_sustained_active | awk {{'print $3'}}".format(output_file) #unit
command5 = "cat {} | grep dram_read_bytes | grep sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command6 = "cat {} | grep dram_write_bytes | grep sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command7 = "cat {} | grep lts__t_bytes.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command8 = "cat {} | grep sm__sass_thread_inst_executed_op_fp16_pred_on.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command9 = "cat {} | grep sm__sass_thread_inst_executed_ops_fadd_fmul_ffma_pred_on.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
result = {'ncu':-1, 'core_util':-1, 'dram_util':-1,
'l2_util':-1, 'dram_read':-1, 'dram_write':-1,
'l2_access':-1, 'fp16_inst':-1, 'fma_inst':-1}
run_command(command1, 'ncu', result)
run_command(command2, 'core_util', result)
run_command(command3, 'dram_util', result)
run_command(command4, 'l2_util', result)
run_command(command5, 'dram_read', result)
run_command(command6, 'dram_write', result)
run_command(command7, 'l2_access', result)
run_command(command8, 'fp16_inst', result)
run_command(command9, 'fma_inst', result)
with open(result_file, "a+") as f:
f.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d} {6:d} {7:.2f} {8:.2f} {9:.2f} {10:,} {11:,} {12:,} {13:,} {14:,}\n".format(b, s, d, v, l, e, bpe, result['core_util'], result['dram_util'], result['l2_util'], result['dram_read'], result['dram_write'], result['l2_access'], int(result['fp16_inst']), int(result['fma_inst'])))
print("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d} {6:d} {7:.2f} {8:.2f} {9:.2f} {10:,} {11:,} {12:,} {13:,} {14:,}\n".format(b, s, d, v, l, e, bpe, result['core_util'], result['dram_util'], result['l2_util'], result['dram_read'], result['dram_write'], result['l2_access'], int(result['fp16_inst']), int(result['fma_inst'])))
| 63.710526 | 363 | 0.621437 |
e1bb9b6b3739ef931135cf1fd4f2fa3e0d1cab30 | 8,171 | py | Python | src/planet_box_extractor/extractor.py | V-AI-S/planet-box-extractor | 5404bc97e7a2e1f6d90c7503d9106973038e4387 | [
"MIT"
] | 6 | 2021-05-31T14:51:55.000Z | 2022-01-27T14:44:04.000Z | src/planet_box_extractor/extractor.py | V-AI-S/planet-box-extractor | 5404bc97e7a2e1f6d90c7503d9106973038e4387 | [
"MIT"
] | null | null | null | src/planet_box_extractor/extractor.py | V-AI-S/planet-box-extractor | 5404bc97e7a2e1f6d90c7503d9106973038e4387 | [
"MIT"
] | null | null | null | from .geo_utils import boundingBox
import time
import PIL.Image
import urllib.request
import mercantile
import numpy as np
if __name__ == '__main__':
latitude, longitude = 5, 20
zoom = 15
radius = 0.2
API_KEY = ''
map_id = ''
extractor = PlanetBoxExtractor(radius, zoom, map_id, API_KEY)
image = extractor.Process(latitude, longitude)
| 41.060302 | 153 | 0.598947 |
e1bbdc48371fed473f16ae3afb93373be31ead4e | 21,365 | py | Python | Kai/python/modules/mctruth.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | 1 | 2022-01-17T17:29:38.000Z | 2022-01-17T17:29:38.000Z | Kai/python/modules/mctruth.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | null | null | null | Kai/python/modules/mctruth.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | 1 | 2021-12-15T10:56:50.000Z | 2021-12-15T10:56:50.000Z | from __future__ import (division, print_function)
import os
import ROOT
#import numpy as np
#import itertools
#from collections import OrderedDict
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.tools import * #deltaR, deltaPhi, etc.
_rootLeafType2rootBranchType = { 'UChar_t':'b', 'Char_t':'B', 'UInt_t':'i', 'Int_t':'I', 'Float_t':'F', 'Double_t':'D', 'ULong64_t':'l', 'Long64_t':'L', 'Bool_t':'O' }
| 54.088608 | 192 | 0.539246 |
e1bced98ae2a678cded5046d18c92e44944d6925 | 1,214 | py | Python | skimpy/utils/namespace.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 13 | 2020-11-05T10:59:13.000Z | 2022-03-21T01:38:31.000Z | skimpy/utils/namespace.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 4 | 2022-01-27T10:23:40.000Z | 2022-03-10T18:16:06.000Z | skimpy/utils/namespace.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 6 | 2020-08-04T17:01:33.000Z | 2022-03-21T01:38:32.000Z | # -*- coding: utf-8 -*-
"""
.. module:: pytfa
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Simulation types """
QSSA = 'qssa'
TQSSA = 'tqssa'
MCA = 'mca'
ODE = 'ode'
ELEMENTARY = 'elementary'
""" Jacobian Types"""
NUMERICAL = 'numerical'
SYMBOLIC = 'symbolic'
""" MCA Types """
NET = 'net'
SPLIT = 'split'
""" Item types """
PARAMETER = 'parameter'
VARIABLE = 'variable'
""" Units """
KCAL = 'kcal'
KJ = 'kJ'
JOULE = 'JOULE'
""" OTHER """
WATER_FORMULA = 'H2O'
| 21.298246 | 72 | 0.702636 |
e1be426bfe54febaaf2747236ba29b8bea95325e | 2,897 | py | Python | designate/storage/impl_sqlalchemy/migrate_repo/versions/051_scoped_tsig.py | cneill/designate-testing | 7bf320062d85a12bff2aee8d26c133941a289fc4 | [
"Apache-2.0"
] | null | null | null | designate/storage/impl_sqlalchemy/migrate_repo/versions/051_scoped_tsig.py | cneill/designate-testing | 7bf320062d85a12bff2aee8d26c133941a289fc4 | [
"Apache-2.0"
] | null | null | null | designate/storage/impl_sqlalchemy/migrate_repo/versions/051_scoped_tsig.py | cneill/designate-testing | 7bf320062d85a12bff2aee8d26c133941a289fc4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from sqlalchemy import Enum
from sqlalchemy.schema import Table, MetaData, Column
from migrate.changeset.constraint import UniqueConstraint
from designate.sqlalchemy.types import UUID
meta = MetaData()
# Get the default pool_id from the config file
default_pool_id = cfg.CONF['service:central'].default_pool_id.replace('-', '')
TSIG_SCOPES = ['POOL', 'ZONE']
| 36.2125 | 78 | 0.713497 |
e1bf68076ea2cc2d9234c0759575b80d167f8b2e | 680 | py | Python | geomat/stein/migrations/0060_remove_mineraltype_mohs_scale.py | mimischi/django-geomat | 8c5bc4c9ba9759b58b52ddf339ccaec40ec5f6ea | [
"BSD-3-Clause"
] | 3 | 2017-01-13T15:53:39.000Z | 2017-05-05T11:57:55.000Z | geomat/stein/migrations/0060_remove_mineraltype_mohs_scale.py | mimischi/django-geomat | 8c5bc4c9ba9759b58b52ddf339ccaec40ec5f6ea | [
"BSD-3-Clause"
] | 233 | 2016-11-05T15:19:48.000Z | 2021-09-07T23:33:47.000Z | geomat/stein/migrations/0060_remove_mineraltype_mohs_scale.py | GeoMatDigital/django-geomat | 8c5bc4c9ba9759b58b52ddf339ccaec40ec5f6ea | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.2 on 2018-05-04 07:33
from django.db import migrations
from django.db import models
| 26.153846 | 106 | 0.632353 |
e1c057aa5119875fed8dba5a07e37ff673709a2b | 14,420 | py | Python | bamboo/core/calculator.py | pld/bamboo | a0fc77aebd6ff6b1087ba46896b0ce705fbb25a3 | [
"BSD-3-Clause"
] | 27 | 2015-01-14T15:57:54.000Z | 2020-12-27T19:34:41.000Z | bamboo/core/calculator.py | biswapanda/bamboo | 72fc260822a27ce52cbe65de178f8fa1b60311f3 | [
"BSD-3-Clause"
] | 2 | 2015-08-06T15:23:28.000Z | 2016-01-28T00:05:25.000Z | bamboo/core/calculator.py | biswapanda/bamboo | 72fc260822a27ce52cbe65de178f8fa1b60311f3 | [
"BSD-3-Clause"
] | 10 | 2015-08-07T01:50:39.000Z | 2019-05-15T21:41:18.000Z | from collections import defaultdict
from celery.task import task
from pandas import concat, DataFrame
from bamboo.core.aggregator import Aggregator
from bamboo.core.frame import add_parent_column, join_dataset
from bamboo.core.parser import Parser
from bamboo.lib.datetools import recognize_dates
from bamboo.lib.jsontools import df_to_jsondict
from bamboo.lib.mongo import MONGO_ID
from bamboo.lib.parsing import parse_columns
from bamboo.lib.query_args import QueryArgs
from bamboo.lib.utils import combine_dicts, flatten, to_list
def calculate_columns(dataset, calculations):
"""Calculate and store new columns for `calculations`.
The new columns are join t othe Calculation dframe and replace the
dataset's observations.
.. note::
This can result in race-conditions when:
- deleting ``controllers.Datasets.DELETE``
- updating ``controllers.Datasets.POST([dataset_id])``
Therefore, perform these actions asychronously.
:param dataset: The dataset to calculate for.
:param calculations: A list of calculations.
"""
new_cols = None
for c in calculations:
if c.aggregation:
aggregator = __create_aggregator(
dataset, c.formula, c.name, c.groups_as_list)
aggregator.save(dataset)
else:
columns = parse_columns(dataset, c.formula, c.name)
if new_cols is None:
new_cols = DataFrame(columns[0])
else:
new_cols = new_cols.join(columns[0])
if new_cols is not None:
dataset.update_observations(new_cols)
# propagate calculation to any merged child datasets
[__propagate_column(x, dataset) for x in dataset.merged_datasets]
def dframe_from_update(dataset, new_data):
"""Make a DataFrame for the `new_data`.
:param new_data: Data to add to dframe.
:type new_data: List.
"""
filtered_data = []
columns = dataset.columns
labels_to_slugs = dataset.schema.labels_to_slugs
num_columns = len(columns)
num_rows = dataset.num_rows
dframe_empty = not num_columns
if dframe_empty:
columns = dataset.schema.keys()
for row in new_data:
filtered_row = dict()
for col, val in row.iteritems():
# special case for reserved keys (e.g. _id)
if col == MONGO_ID:
if (not num_columns or col in columns) and\
col not in filtered_row.keys():
filtered_row[col] = val
else:
# if col is a label take slug, if it's a slug take col
slug = labels_to_slugs.get(
col, col if col in labels_to_slugs.values() else None)
# if slug is valid or there is an empty dframe
if (slug or col in labels_to_slugs.keys()) and (
dframe_empty or slug in columns):
filtered_row[slug] = dataset.schema.convert_type(
slug, val)
filtered_data.append(filtered_row)
index = range(num_rows, num_rows + len(filtered_data))
new_dframe = DataFrame(filtered_data, index=index)
return new_dframe
def __calculation_data(dataset):
"""Create a list of aggregate calculation information.
Builds a list of calculation information from the current datasets
aggregated datasets and aggregate calculations.
"""
calcs_to_data = defaultdict(list)
calculations = dataset.calculations(only_aggs=True)
names_to_formulas = {c.name: c.formula for c in calculations}
names = set(names_to_formulas.keys())
for group, dataset in dataset.aggregated_datasets:
labels_to_slugs = dataset.schema.labels_to_slugs
calculations_for_dataset = list(set(
labels_to_slugs.keys()).intersection(names))
for calc in calculations_for_dataset:
calcs_to_data[calc].append((
names_to_formulas[calc], labels_to_slugs[calc], group,
dataset))
return flatten(calcs_to_data.values())
def __update_is_valid(dataset, new_dframe):
"""Check if the update is valid.
Check whether this is a right-hand side of any joins
and deny the update if the update would produce an invalid
join as a result.
:param dataset: The dataset to check if update valid for.
:param new_dframe: The update dframe to check.
:returns: True is the update is valid, False otherwise.
"""
select = {on: 1 for on in dataset.on_columns_for_rhs_of_joins if on in
new_dframe.columns and on in dataset.columns}
dframe = dataset.dframe(query_args=QueryArgs(select=select))
for on in select.keys():
merged_join_column = concat([new_dframe[on], dframe[on]])
if len(merged_join_column) != merged_join_column.nunique():
return False
return True
def __propagate_column(dataset, parent_dataset):
"""Propagate columns in `parent_dataset` to `dataset`.
When a new calculation is added to a dataset this will propagate the
new column to all child (merged) datasets.
:param dataset: THe child dataet.
:param parent_dataset: The dataset to propagate.
"""
# delete the rows in this dataset from the parent
dataset.remove_parent_observations(parent_dataset.dataset_id)
# get this dataset without the out-of-date parent rows
dframe = dataset.dframe(keep_parent_ids=True)
# create new dframe from the upated parent and add parent id
parent_dframe = add_parent_column(parent_dataset.dframe(),
parent_dataset.dataset_id)
# merge this new dframe with the existing dframe
updated_dframe = concat([dframe, parent_dframe])
# save new dframe (updates schema)
dataset.replace_observations(updated_dframe)
dataset.clear_summary_stats()
# recur into merged dataset
[__propagate_column(x, dataset) for x in dataset.merged_datasets]
def __update_aggregate_dataset(dataset, formula, new_dframe, name, groups,
a_dataset, reducible):
"""Update the aggregated dataset built for `dataset` with `calculation`.
Proceed with the following steps:
- delete the rows in this dataset from the parent
- recalculate aggregated dataframe from aggregation
- update aggregated dataset with new dataframe and add parent id
- recur on all merged datasets descending from the aggregated
dataset
:param formula: The formula to execute.
:param new_dframe: The DataFrame to aggregate on.
:param name: The name of the aggregation.
:param groups: A column or columns to group on.
:type group: String, list of strings, or None.
:param a_dataset: The DataSet to store the aggregation in.
"""
# parse aggregation and build column arguments
aggregator = __create_aggregator(
dataset, formula, name, groups, dframe=new_dframe)
new_agg_dframe = aggregator.update(dataset, a_dataset, formula, reducible)
# jsondict from new dframe
new_data = df_to_jsondict(new_agg_dframe)
for merged_dataset in a_dataset.merged_datasets:
# remove rows in child from this merged dataset
merged_dataset.remove_parent_observations(a_dataset.dataset_id)
# calculate updates for the child
calculate_updates(merged_dataset, new_data,
parent_dataset_id=a_dataset.dataset_id)
def __update_joined_datasets(dataset, update):
"""Update any joined datasets."""
if 'add' in update:
new_dframe = update['add']
for direction, other_dataset, on, j_dataset in dataset.joined_datasets:
if 'add' in update:
if direction == 'left':
# only proceed if on in new dframe
if on in new_dframe.columns:
left_dframe = other_dataset.dframe(padded=True)
# only proceed if new on value is in on column in lhs
if len(set(new_dframe[on]).intersection(
set(left_dframe[on]))):
merged_dframe = join_dataset(left_dframe, dataset, on)
j_dataset.replace_observations(merged_dframe)
# TODO is it OK not to propagate the join here?
else:
# if on in new data join with existing data
if on in new_dframe:
new_dframe = join_dataset(new_dframe, other_dataset, on)
calculate_updates(j_dataset, df_to_jsondict(new_dframe),
parent_dataset_id=dataset.dataset_id)
elif 'delete' in update:
j_dataset.delete_observation(update['delete'])
elif 'edit' in update:
j_dataset.update_observation(*update['edit'])
| 35.343137 | 78 | 0.676283 |
e1c0ee31e7c392fd9a301672456d03f86541b8f3 | 265 | py | Python | modules/python3/tests/unittests/scripts/glm.py | ImagiaViz/inviwo | a00bb6b0551bc1cf26dc0366c827c1a557a9603d | [
"BSD-2-Clause"
] | 349 | 2015-01-30T09:21:52.000Z | 2022-03-25T03:10:02.000Z | modules/python3/tests/unittests/scripts/glm.py | ImagiaViz/inviwo | a00bb6b0551bc1cf26dc0366c827c1a557a9603d | [
"BSD-2-Clause"
] | 641 | 2015-09-23T08:54:06.000Z | 2022-03-23T09:50:55.000Z | modules/python3/tests/unittests/scripts/glm.py | ImagiaViz/inviwo | a00bb6b0551bc1cf26dc0366c827c1a557a9603d | [
"BSD-2-Clause"
] | 124 | 2015-02-27T23:45:02.000Z | 2022-02-21T09:37:14.000Z | import inviwopy
from inviwopy.glm import *
v1 = vec3(1,2,3)
v2 = size2_t(4,5)
m1 = mat4(1)
m2 = mat3(0,1,0,-1,0,0,0,0,2)
v3 = m2 * v1
v4 = vec4(1,2,3,4)
w = v4.w
a = v4.a
q = v4.q
z = v4.z
b = v4.b
p = v4.p
y = v4.y
g = v4.g
t = v4.t
x = v4.x
r = v4.r
s = v4.s
| 10.6 | 29 | 0.532075 |
e1c1c2a4c0c8d49965261747c7efaf839f60298c | 7,538 | py | Python | djangoexample/thumbs/views.py | arneb/sorethumb | 5b224fbf30eaeb83640510d11a0dea40592e76ad | [
"BSD-3-Clause"
] | null | null | null | djangoexample/thumbs/views.py | arneb/sorethumb | 5b224fbf30eaeb83640510d11a0dea40592e76ad | [
"BSD-3-Clause"
] | null | null | null | djangoexample/thumbs/views.py | arneb/sorethumb | 5b224fbf30eaeb83640510d11a0dea40592e76ad | [
"BSD-3-Clause"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.conf import settings
from models import ThumbTest
from sorethumb.filters.defaultfilters import *
from sorethumb.filters.drawfilters import *
from sorethumb.djangothumbnail import DjangoThumbnail
thumb_examples = [
{
'thumb':'small_thumb',
'title':'Basic thumbnail',
'description':"""Here we have a basic thumbnail that uses PIL's thumbnail operation to reduce an image to fit in a defined dimensions.""",
'code' : '''class SmallThumb(DjangoThumbnail):
filters = [ThumbnailFilter(120, 100)]'''
},
{
'thumb':'square',
'title':'Square',
'description':'As above, but cropped to be square. Since uploaded images can be any old size, they can tend to look ragged when presented in rows. Square thumbs look better in rows, at the expense of a little cropping',
'code':"""class Square(DjangoThumbnail):
filters = [ThumbnailFilter(100, 100),
SquareFilter()] """
},
{
'thumb':'rounded_corners5',
'title':'5 pixels rounded corner',
'description':"""Rounded corners without CSS3, on a transparent background. What is it with designers and rounded corners anyway?""",
'code':"""class RoundedCorners5(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(5)] """,
},
{
'thumb':'rounded_corners10',
'title':'10 pixels rounded corner',
'description':'As above, but 10 pixels.',
},
{
'thumb':'rounded_corners20',
'title':'20 pixels rounded corner',
'description':'Even more rounded corners',
},
{
'thumb':'rounded_corners_edged',
'title':'Rounded corners with a border',
'description':'The rounded corner filter also supports a coloured border',
'code':"""class RoundedCornersEdged(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#333')]""",
},
{
'thumb':'rounded_corners_background',
'title':"Rounded corners on an opaque background",
'description':"Rounded corners on an opaque backround for browsers with poor support for per-pixel transparency — IE6 I'm looking at you!",
'code':"""class RoundedCornersBackground(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#333'),
ResizeCanvasFilter(130, 110, '#fff'),
OpaqueFilter('#fff')] """
},
{
'thumb':'rounded_corners_background_gradient',
'title':"Rounded corners on a gadient",
'description':"As above, but on a gradient background. The vertical gradient filter replaces transparent areas with a smooth gradient between two colours.",
'code':"""class RoundedCornersBackgroundGradient(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#000'),
ResizeCanvasFilter(130, 110, '#e2e2ff', background_opacity=0),
VerticalGradientFilter('#fff', '#88e')] """
},
{'thumb':'mask_thumb',
'title':'Masked thumbnail',
'description': 'This thumbnail uses MaskFilter which replaces the alpha channel with another image, to create some interesting effects.',
'code':"""class MaskThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
ResizeCanvasFilter(120, 100, '#000', background_opacity=0),
MaskFilter(settings.MEDIA_ROOT+'/alpha.png')]
"""
},
{
'thumb':'gray_thumb',
'title':'Grayscale',
'description':'A grayscale thumb, could be used as a hover state.',
'code':"""class GrayThumb(DjangoThumbnail):
filters = [ThumbnailFilter(120, 100),
GrayscaleFilter()]"""
},
{
'thumb':'faded_thumb',
'title':'50% opacity',
'description':'The OpacityFilter sets the opacity of the thumbnail.',
'code':"""class FadedThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
OpacityFilter(.5)] """
},
{
'thumb':'overlay_thumb',
'title':'Thumbnail with overlay',
'description':"""A thumbnail with an overlayed transparent png. Could be used to indicate online status.""",
'code' : '''class OverlayThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
OverlayFilter(settings.MEDIA_ROOT+'/user.png')]'''
},
]
| 31.805907 | 223 | 0.602282 |
e1c3efdf6d1bcb608ddb86a4384fd1aed1e4458f | 117 | py | Python | hello_world.py | michaeljamieson/Python01 | 96777e5252aaf58e5b424dd5b39186b395d9d859 | [
"Apache-2.0"
] | null | null | null | hello_world.py | michaeljamieson/Python01 | 96777e5252aaf58e5b424dd5b39186b395d9d859 | [
"Apache-2.0"
] | null | null | null | hello_world.py | michaeljamieson/Python01 | 96777e5252aaf58e5b424dd5b39186b395d9d859 | [
"Apache-2.0"
] | null | null | null | print ('hello world')
print ('hey i did something')
print ('what happens if i do a ;');
print ('apparently nothing')
| 23.4 | 35 | 0.683761 |
e1c41ef577f4a05e6ab921f39322d64330728ff4 | 1,688 | py | Python | start.py | AndrewJanuary/PM-Monitor | 43548c805d2ee11ac54f8df874cc06be458454a8 | [
"MIT"
] | null | null | null | start.py | AndrewJanuary/PM-Monitor | 43548c805d2ee11ac54f8df874cc06be458454a8 | [
"MIT"
] | 6 | 2021-01-28T22:04:45.000Z | 2021-12-20T20:59:03.000Z | start.py | AndrewJanuary/PM-Monitor | 43548c805d2ee11ac54f8df874cc06be458454a8 | [
"MIT"
] | null | null | null | from app.sensor import Sensor
from app.uploader import Uploader
from app.offline import Offline
import time, logging, argparse, sys, random, datetime
logging.basicConfig(filename='airquality.log', level=logging.DEBUG, filemode='a',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sen = Sensor('PM Sensor 1', '/dev/ttyUSB0', b'\xaa', b'0xAB', b'\xc0')
| 30.690909 | 82 | 0.64455 |
e1c4600d073fba00b0a31f0113ee9536694f12a6 | 3,364 | py | Python | py_trees_ros/visitors.py | geoc1234/py_trees_ros | 65a055624f9261d67f0168ef419aa650302f96d0 | [
"BSD-3-Clause"
] | 65 | 2019-05-01T08:21:42.000Z | 2022-03-23T15:49:55.000Z | py_trees_ros/visitors.py | geoc1234/py_trees_ros | 65a055624f9261d67f0168ef419aa650302f96d0 | [
"BSD-3-Clause"
] | 62 | 2019-02-27T14:27:42.000Z | 2022-02-08T03:54:30.000Z | py_trees_ros/visitors.py | geoc1234/py_trees_ros | 65a055624f9261d67f0168ef419aa650302f96d0 | [
"BSD-3-Clause"
] | 23 | 2019-03-03T17:09:59.000Z | 2022-01-06T03:07:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://raw.githubusercontent.com/splintered-reality/py_trees_ros/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
ROS Visitors are entities that can be passed to a ROS tree implementation
(e.g. :class:`~py_trees_ros.trees.BehaviourTree`) and used to either visit
each and every behaviour in the tree, or visit behaviours as the tree is
traversed in an executing tick. At each behaviour, the visitor
runs its own method on the behaviour to do as it wishes - logging, introspecting).
.. warning:: Visitors should not modify the behaviours they visit.
.. seealso:: The base interface and core visitors in :mod:`py_trees.visitors`
"""
##############################################################################
# Imports
##############################################################################
import py_trees.visitors
import py_trees_ros_interfaces.msg as py_trees_msgs
import rclpy
import time
from . import conversions
##############################################################################
# Visitors
##############################################################################
| 31.735849 | 92 | 0.568668 |
e1c8c4baec324f5e5f8e13e03541f29a1a32842d | 11,394 | py | Python | Jarvis/features/Friday_Blueprint.py | faizeraza/Jarvis-Virtual-Assistant- | da88fc0124e6020aff1030317dc3dc918f7aa017 | [
"MIT"
] | 1 | 2021-12-14T00:18:10.000Z | 2021-12-14T00:18:10.000Z | Jarvis/features/Friday_Blueprint.py | faizeraza/Jarvis-Virtual-Assistant- | da88fc0124e6020aff1030317dc3dc918f7aa017 | [
"MIT"
] | null | null | null | Jarvis/features/Friday_Blueprint.py | faizeraza/Jarvis-Virtual-Assistant- | da88fc0124e6020aff1030317dc3dc918f7aa017 | [
"MIT"
] | 1 | 2021-12-29T05:01:02.000Z | 2021-12-29T05:01:02.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Friday_Blueprint.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 49.755459 | 121 | 0.722924 |
e1c8d5b0e59bc3cff42a51e6c70986bae9cb73c9 | 3,201 | py | Python | pints/toy/_logistic_model.py | iamleeg/pints | bd1c11472ff3ec0990f3d55f0b2f20d92397926d | [
"BSD-3-Clause"
] | null | null | null | pints/toy/_logistic_model.py | iamleeg/pints | bd1c11472ff3ec0990f3d55f0b2f20d92397926d | [
"BSD-3-Clause"
] | null | null | null | pints/toy/_logistic_model.py | iamleeg/pints | bd1c11472ff3ec0990f3d55f0b2f20d92397926d | [
"BSD-3-Clause"
] | null | null | null | #
# Logistic toy model.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import numpy as np
import pints
from . import ToyModel
| 34.419355 | 79 | 0.555764 |
e1cc98556e1e617de9737efeaed139473d56ebaf | 426 | py | Python | homeworks/vecutil.py | JediKoder/coursera-CodeMatrix | 1ac461d22ebaf2777eabdcf31d76d709c33f472a | [
"MIT"
] | 3 | 2018-01-11T07:48:06.000Z | 2020-04-27T20:49:02.000Z | homeworks/vecutil.py | JediKoder/coursera-CodeMatrix | 1ac461d22ebaf2777eabdcf31d76d709c33f472a | [
"MIT"
] | null | null | null | homeworks/vecutil.py | JediKoder/coursera-CodeMatrix | 1ac461d22ebaf2777eabdcf31d76d709c33f472a | [
"MIT"
] | 1 | 2021-01-26T07:25:48.000Z | 2021-01-26T07:25:48.000Z | # Copyright 2013 Philip N. Klein
from vec import Vec
def list2vec(L):
"""Given a list L of field elements, return a Vec with domain {0...len(L)-1}
whose entry i is L[i]
>>> list2vec([10, 20, 30])
Vec({0, 1, 2},{0: 10, 1: 20, 2: 30})
"""
return Vec(set(range(len(L))), {k:L[k] for k in range(len(L))})
def zero_vec(D):
"""Returns a zero vector with the given domain
"""
return Vec(D, {})
| 25.058824 | 80 | 0.577465 |
e1ccdf7c98befd87dd0fafb60f1dfb6a4f453f5f | 59,998 | py | Python | squidpy/im/_container.py | Emberwhirl/squidpy | 456c49ac9149e16562617a8a4236a9faa2c0480d | [
"BSD-3-Clause"
] | 1 | 2022-02-02T13:41:36.000Z | 2022-02-02T13:41:36.000Z | squidpy/im/_container.py | Emberwhirl/squidpy | 456c49ac9149e16562617a8a4236a9faa2c0480d | [
"BSD-3-Clause"
] | null | null | null | squidpy/im/_container.py | Emberwhirl/squidpy | 456c49ac9149e16562617a8a4236a9faa2c0480d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
from copy import copy, deepcopy
from types import MappingProxyType
from typing import (
Any,
Union,
Mapping,
TypeVar,
Callable,
Iterable,
Iterator,
Sequence,
TYPE_CHECKING,
)
from pathlib import Path
from functools import partial
from itertools import chain
from typing_extensions import Literal
import re
import validators
from scanpy import logging as logg
from anndata import AnnData
from scanpy.plotting.palettes import default_102 as default_palette
from dask import delayed
import numpy as np
import xarray as xr
import dask.array as da
from matplotlib.colors import ListedColormap
import matplotlib as mpl
import matplotlib.pyplot as plt
from skimage.util import img_as_float
from skimage.transform import rescale
from squidpy._docs import d, inject_docs
from squidpy._utils import NDArrayA, singledispatchmethod
from squidpy.im._io import _lazy_load_image, _infer_dimensions, _assert_dims_present
from squidpy.gr._utils import (
_assert_in_range,
_assert_positive,
_assert_non_negative,
_assert_spatial_basis,
_assert_non_empty_sequence,
)
from squidpy.im._coords import (
CropCoords,
CropPadding,
_NULL_COORDS,
_NULL_PADDING,
TupleSerializer,
_update_attrs_scale,
_update_attrs_coords,
)
from squidpy.im._feature_mixin import FeatureMixin
from squidpy._constants._constants import InferDimensions
from squidpy._constants._pkg_constants import Key
FoI_t = Union[int, float]
Pathlike_t = Union[str, Path]
Arraylike_t = Union[NDArrayA, xr.DataArray]
InferDims_t = Union[Literal["default", "prefer_channels", "prefer_z"], Sequence[str]]
Input_t = Union[Pathlike_t, Arraylike_t, "ImageContainer"]
Interactive = TypeVar("Interactive") # cannot import because of cyclic dependencies
_ERROR_NOTIMPLEMENTED_LIBID = f"It seems there are multiple `library_id` in `adata.uns[{Key.uns.spatial!r}]`.\n \
Loading multiple images is not implemented (yet), please specify a `library_id`."
__all__ = ["ImageContainer"]
| 37.173482 | 120 | 0.561252 |
e1cd563f597751eb051e125f9959363e2f96050c | 397 | py | Python | users/forms.py | kurosh-wss/Personal-Finance-Management | 9c7c467b95999974492df19a0f0286809f877c87 | [
"MIT"
] | null | null | null | users/forms.py | kurosh-wss/Personal-Finance-Management | 9c7c467b95999974492df19a0f0286809f877c87 | [
"MIT"
] | null | null | null | users/forms.py | kurosh-wss/Personal-Finance-Management | 9c7c467b95999974492df19a0f0286809f877c87 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from crispy_bootstrap5.bootstrap5 import FloatingField
from crispy_forms.layout import Layout
from crispy_forms.helper import FormHelper
| 30.538462 | 58 | 0.806045 |
e1cdbef2e0091f3d12ceeeebb7a6739477ce69ea | 1,214 | py | Python | tutorzzz_reminder/reminder.py | xiebei1108/tools | 8a3141e5d97305f4438e828c62eb7be512767aa9 | [
"Apache-2.0"
] | null | null | null | tutorzzz_reminder/reminder.py | xiebei1108/tools | 8a3141e5d97305f4438e828c62eb7be512767aa9 | [
"Apache-2.0"
] | null | null | null | tutorzzz_reminder/reminder.py | xiebei1108/tools | 8a3141e5d97305f4438e828c62eb7be512767aa9 | [
"Apache-2.0"
] | null | null | null | import json
import requests
import config
assignedIdList = list()
| 25.829787 | 93 | 0.528007 |
e1ce81097515faf4dfa5b55142281e9cb5ff0a2c | 3,315 | py | Python | invoicing/crud/invoice_crud.py | dnegreira/Invoicing | 0bc8133e989f095c10151f67482e249416274947 | [
"MIT"
] | null | null | null | invoicing/crud/invoice_crud.py | dnegreira/Invoicing | 0bc8133e989f095c10151f67482e249416274947 | [
"MIT"
] | null | null | null | invoicing/crud/invoice_crud.py | dnegreira/Invoicing | 0bc8133e989f095c10151f67482e249416274947 | [
"MIT"
] | null | null | null | from invoicing.crud.base_crud import BaseCrud
from invoicing.latex.latex_invoice import LatexInvoice
from invoicing.models.invoice_model import InvoiceModel
from invoicing.repository.invoice_repository import InvoiceRepository
from invoicing.repository.job_repository import JobRepository
from invoicing.ui.date import Date
from invoicing.ui.menu import Menu
from invoicing.ui.style import Style
from invoicing.value_validation.value_validation import Validation
| 44.2 | 103 | 0.650075 |
e1cfe37a6f7f6e565038ee9ac5851b8cdd75207b | 946 | py | Python | ds3225_client.py | kim-tom/dbus_server | b16d1b47dfe4d699ef0177592ba528ba988f17be | [
"MIT"
] | null | null | null | ds3225_client.py | kim-tom/dbus_server | b16d1b47dfe4d699ef0177592ba528ba988f17be | [
"MIT"
] | null | null | null | ds3225_client.py | kim-tom/dbus_server | b16d1b47dfe4d699ef0177592ba528ba988f17be | [
"MIT"
] | null | null | null | from ds3225 import DS3225
import dbus
import dbus.mainloop.glib
import dbus.service
from gi.repository import GObject, GLib
UNLOCKED_DEG = 175
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
BUS_NAME = 'jp.kimura.DS3225Service'
OBJECT_PATH = '/jp/kimura/DS3225Server'
INTERFACE = 'jp.kimura.DS3225'
if __name__ == '__main__':
import time
ds3225_client = DS3225Client()
while True:
ds3225_client.set_pos(UNLOCKED_DEG)
time.sleep(2)
ds3225_client.set_pos(UNLOCKED_DEG-90)
time.sleep(2)
| 27.823529 | 65 | 0.701903 |
e1d05a453d3d0e33ff80baf493eec26c3cbe59f9 | 437 | py | Python | Extra kunskap/Kod/Farmen.py | abbindustrigymnasium/Programmering-1-Slutuppgift | 679069ebb632ee59f6b4ee3035c18ae204cde145 | [
"Apache-2.0"
] | null | null | null | Extra kunskap/Kod/Farmen.py | abbindustrigymnasium/Programmering-1-Slutuppgift | 679069ebb632ee59f6b4ee3035c18ae204cde145 | [
"Apache-2.0"
] | null | null | null | Extra kunskap/Kod/Farmen.py | abbindustrigymnasium/Programmering-1-Slutuppgift | 679069ebb632ee59f6b4ee3035c18ae204cde145 | [
"Apache-2.0"
] | 1 | 2020-03-09T12:04:31.000Z | 2020-03-09T12:04:31.000Z | import openpyxl
wb= openpyxl.load_workbook('Farmen.xlsx')
# sheet= wb.active
# print(wb.get_sheet_names())
# Deltagar_sheet= wb.get_sheet_by_name('Deltagare')
# artists=[{"Namn":sheet.cell(row=i, column=2).value,
# "Sng":sheet.cell(row=i, column=3).value,
# "Pong":sheet.cell(row=i, column=6).value,
# "Rst":sheet.cell(row=i, column=5).value
# } for i in range(2,sheet.max_row) ]
# print(artists) | 33.615385 | 54 | 0.649886 |
e1d12741ffdd5338667faa2590522d4debf232f5 | 3,437 | py | Python | archive/simple_nn.py | petebond/MarketPlaceML | 347ea5eab84673b846c85c58ce6c525e3f1dd0ff | [
"CC0-1.0"
] | null | null | null | archive/simple_nn.py | petebond/MarketPlaceML | 347ea5eab84673b846c85c58ce6c525e3f1dd0ff | [
"CC0-1.0"
] | null | null | null | archive/simple_nn.py | petebond/MarketPlaceML | 347ea5eab84673b846c85c58ce6c525e3f1dd0ff | [
"CC0-1.0"
] | null | null | null | from os import access
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
# Create fully connected neural network
model = CNN(784, 10)
x = torch.randn(64, 784)
print(model(x).shape)
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparameters
batch_size = 64
learning_rate = 1e-3
num_epochs = 10
input_size = 784
num_classes = 10
# Load data
train_dataset = datasets.MNIST(root='dataset/', train=True, download=True, transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = datasets.MNIST(root='dataset/', train=False, download=True, transform=transforms.ToTensor())
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
# Initialize model
model = NN(input_size=input_size, num_classes=num_classes).to(device)
# Define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train model
for epoch in range(num_epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
# Correct shape
data = data.reshape(data.shape[0], -1)
# Forward pass
scores = model(data)
loss = criterion(scores, target)
# Backward pass
optimizer.zero_grad()
loss.backward()
# Gradient descent step
optimizer.step()
# Check accuracy
check_accuracy(train_loader, model)
check_accuracy(test_loader, model)
| 30.6875 | 112 | 0.647949 |
becbc66bb4d180935eed7f6a49ea9b7ed75ae703 | 998 | py | Python | plot_loss.py | ngachago/tabular_comp | 799a1e0dbf7a51bb04454f1f14a57f883dbd2da7 | [
"MIT"
] | null | null | null | plot_loss.py | ngachago/tabular_comp | 799a1e0dbf7a51bb04454f1f14a57f883dbd2da7 | [
"MIT"
] | null | null | null | plot_loss.py | ngachago/tabular_comp | 799a1e0dbf7a51bb04454f1f14a57f883dbd2da7 | [
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
| 26.972973 | 51 | 0.635271 |
becc2f00075040cb4b100c7b8d0736d719593862 | 18,695 | py | Python | hotkey.py | RMPiria/painelContreole | bd07def485981456a7e7390f2b18db71740ce8da | [
"Unlicense"
] | null | null | null | hotkey.py | RMPiria/painelContreole | bd07def485981456a7e7390f2b18db71740ce8da | [
"Unlicense"
] | null | null | null | hotkey.py | RMPiria/painelContreole | bd07def485981456a7e7390f2b18db71740ce8da | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from os.path import dirname, join, normpath
import sys
from sys import platform
from config import config
if platform == 'darwin':
import objc
from AppKit import NSApplication, NSWorkspace, NSBeep, NSSound, NSEvent, NSKeyDown, NSKeyUp, NSFlagsChanged, NSKeyDownMask, NSFlagsChangedMask, NSShiftKeyMask, NSControlKeyMask, NSAlternateKeyMask, NSCommandKeyMask, NSNumericPadKeyMask, NSDeviceIndependentModifierFlagsMask, NSF1FunctionKey, NSF35FunctionKey, NSDeleteFunctionKey, NSClearLineFunctionKey
elif platform == 'win32':
import atexit
import ctypes
from ctypes.wintypes import *
import threading
import winsound
RegisterHotKey = ctypes.windll.user32.RegisterHotKey
UnregisterHotKey = ctypes.windll.user32.UnregisterHotKey
MOD_ALT = 0x0001
MOD_CONTROL = 0x0002
MOD_SHIFT = 0x0004
MOD_WIN = 0x0008
MOD_NOREPEAT = 0x4000
GetMessage = ctypes.windll.user32.GetMessageW
TranslateMessage = ctypes.windll.user32.TranslateMessage
DispatchMessage = ctypes.windll.user32.DispatchMessageW
PostThreadMessage = ctypes.windll.user32.PostThreadMessageW
WM_QUIT = 0x0012
WM_HOTKEY = 0x0312
WM_APP = 0x8000
WM_SND_GOOD = WM_APP + 1
WM_SND_BAD = WM_APP + 2
GetKeyState = ctypes.windll.user32.GetKeyState
MapVirtualKey = ctypes.windll.user32.MapVirtualKeyW
VK_BACK = 0x08
VK_CLEAR = 0x0c
VK_RETURN = 0x0d
VK_SHIFT = 0x10
VK_CONTROL = 0x11
VK_MENU = 0x12
VK_CAPITAL = 0x14
VK_MODECHANGE= 0x1f
VK_ESCAPE = 0x1b
VK_SPACE = 0x20
VK_DELETE = 0x2e
VK_LWIN = 0x5b
VK_RWIN = 0x5c
VK_NUMPAD0 = 0x60
VK_DIVIDE = 0x6f
VK_F1 = 0x70
VK_F24 = 0x87
VK_OEM_MINUS = 0xbd
VK_NUMLOCK = 0x90
VK_SCROLL = 0x91
VK_PROCESSKEY= 0xe5
VK_OEM_CLEAR = 0xfe
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowText.argtypes = [HWND, LPWSTR, ctypes.c_int]
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
SendInput = ctypes.windll.user32.SendInput
SendInput.argtypes = [ctypes.c_uint, ctypes.POINTER(INPUT), ctypes.c_int]
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
else: # Linux
# singleton
hotkeymgr = HotkeyMgr()
| 44.300948 | 424 | 0.589462 |
becc66adc74c550995995f2d2b08dfaa9d6845d1 | 3,530 | py | Python | Chapter10/ch10_r1_grover_aqua.py | georgekorpas/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience | 938123d051c5bab72110011b3a05e515bb69ca09 | [
"MIT"
] | 24 | 2020-11-21T20:33:51.000Z | 2022-03-26T06:41:27.000Z | Chapter10/ch10_r1_grover_aqua.py | videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience | 938123d051c5bab72110011b3a05e515bb69ca09 | [
"MIT"
] | 2 | 2021-02-07T14:32:12.000Z | 2022-03-25T07:23:46.000Z | Chapter10/ch10_r1_grover_aqua.py | videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience | 938123d051c5bab72110011b3a05e515bb69ca09 | [
"MIT"
] | 16 | 2020-11-03T07:49:11.000Z | 2022-03-26T06:41:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
from qiskit import Aer, IBMQ
# Do the necessary import for our program
from qiskit.aqua.algorithms import Grover
from qiskit.aqua.components.oracles import LogicalExpressionOracle, TruthTableOracle
# Import basic plot tools
from qiskit.tools.visualization import plot_histogram
from IPython.core.display import display
global oracle_method, oracle_type
# Main loop
if __name__ == '__main__':
main() | 34.950495 | 120 | 0.703116 |
becc7d9f66f89922de66fb46f1bec24640debbb3 | 1,428 | py | Python | setup_pre-commit.py | fierte-product-development/Setups | d6151888ecc82a94e894b5d926c85eb193a5b97c | [
"MIT"
] | 1 | 2021-07-06T09:26:48.000Z | 2021-07-06T09:26:48.000Z | setup_pre-commit.py | fierte-product-development/Setups | d6151888ecc82a94e894b5d926c85eb193a5b97c | [
"MIT"
] | 3 | 2022-01-26T05:29:04.000Z | 2022-02-16T10:16:02.000Z | setup_pre-commit.py | fierte-product-development/Setups | d6151888ecc82a94e894b5d926c85eb193a5b97c | [
"MIT"
] | 1 | 2020-11-11T01:23:01.000Z | 2020-11-11T01:23:01.000Z | import sys
import os
import subprocess
from subprocess import PIPE, STDOUT
from pathlib import Path
proc_arg = {
'shell': True,
'stdout': PIPE,
'text': True
}
if __name__ == "__main__":
main()
| 21.969231 | 65 | 0.579832 |
bece2e0f8b2de4615e59523ec76f70e91828684b | 889 | py | Python | 5-buscar.py | gustavodp22/TP_DE_-CRUD-create-read-update-delete- | 8edfba57cbbad37f8fb1af2d42d1d601301e8dd6 | [
"MIT"
] | null | null | null | 5-buscar.py | gustavodp22/TP_DE_-CRUD-create-read-update-delete- | 8edfba57cbbad37f8fb1af2d42d1d601301e8dd6 | [
"MIT"
] | null | null | null | 5-buscar.py | gustavodp22/TP_DE_-CRUD-create-read-update-delete- | 8edfba57cbbad37f8fb1af2d42d1d601301e8dd6 | [
"MIT"
] | null | null | null | """
CRUD de SQLite3 con Python 3
"""
import sqlite3
try:
bd = sqlite3.connect("libros.db")
cursor = bd.cursor()
busqueda = input("Escribe tu bsqueda: ")
if not busqueda:
print("Bsqueda invlida")
exit()
sentencia = "SELECT * FROM libros WHERE titulo LIKE ?;"
cursor.execute(sentencia, [ "%{}%".format(busqueda) ])
libros = cursor.fetchall()
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+".format("", "", "", ""))
print("|{:^20}|{:^20}|{:^10}|{:^50}|".format("Autor", "Gnero", "Precio", "Ttulo"))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+".format("", "", "", ""))
for autor, genero, precio, titulo in libros:
print("|{:^20}|{:^20}|{:^10}|{:^50}|".format(autor, genero, precio, titulo))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+".format("", "", "", ""))
except sqlite3.OperationalError as error:
print("Error al abrir:", error)
| 27.78125 | 86 | 0.533183 |
becf153d29dc0bf9abcbf500a0d578ce48e9150a | 4,686 | py | Python | tests/advanced_tests/regressors.py | amlanbanerjee/auto_ml | db8e1d2cfa93f13a21e55739acfc8d99837e91b0 | [
"MIT"
] | 1,671 | 2016-08-09T04:44:48.000Z | 2022-03-27T01:29:23.000Z | tests/advanced_tests/regressors.py | amlanbanerjee/auto_ml | db8e1d2cfa93f13a21e55739acfc8d99837e91b0 | [
"MIT"
] | 428 | 2016-08-08T00:13:04.000Z | 2022-01-19T10:09:05.000Z | tests/advanced_tests/regressors.py | amlanbanerjee/auto_ml | db8e1d2cfa93f13a21e55739acfc8d99837e91b0 | [
"MIT"
] | 334 | 2016-08-29T12:34:18.000Z | 2022-01-31T09:14:30.000Z | import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
| 31.877551 | 146 | 0.712761 |
bed0237d9ebc522d5a4384033d2b57c729cc7ede | 39 | py | Python | __init__.py | amueller/information-theoretic-mst | 178fd4396bc9a9a499ec3d18d5047b320a5c32f2 | [
"Unlicense"
] | 20 | 2016-05-03T13:29:09.000Z | 2021-10-06T20:41:36.000Z | __init__.py | amueller/information-theoretic-mst | 178fd4396bc9a9a499ec3d18d5047b320a5c32f2 | [
"Unlicense"
] | 1 | 2018-04-21T15:32:07.000Z | 2020-05-19T00:28:52.000Z | __init__.py | amueller/information-theoretic-mst | 178fd4396bc9a9a499ec3d18d5047b320a5c32f2 | [
"Unlicense"
] | 5 | 2015-04-21T00:27:49.000Z | 2019-02-23T20:46:33.000Z | from itm import ITM
__all__ = ['ITM']
| 9.75 | 19 | 0.666667 |
bed3d451e4686403bc8880395163b3b1b1569208 | 2,827 | py | Python | scripts/left_right_hemisphere_data/hemisphere_asymmetry.py | mwinding/connectome_analysis | dbc747290891805863c9481921d8080dc2043d21 | [
"MIT"
] | 1 | 2021-06-10T05:48:16.000Z | 2021-06-10T05:48:16.000Z | left_right_hemisphere_data/hemisphere_asymmetry.py | mwinding/connectome_tools | 0392f6b1e924194299ea7760d8386eb01f3371a3 | [
"MIT"
] | 2 | 2022-01-21T11:48:45.000Z | 2022-01-21T11:48:45.000Z | scripts/left_right_hemisphere_data/hemisphere_asymmetry.py | mwinding/connectome_analysis | dbc747290891805863c9481921d8080dc2043d21 | [
"MIT"
] | 1 | 2022-02-02T15:39:52.000Z | 2022-02-02T15:39:52.000Z | import pandas as pd
import numpy as np
import csv
# import synapses divided across hemispheres
hemisphere_data = pd.read_csv('left_right_hemisphere_data/brain_hemisphere_membership.csv', header = 0)
#print(hemisphere_data)
# import pair list CSV, manually generated
#pairs = pd.read_csv('data/bp-pairs-2020-01-28.csv', header = 0)
# import skids of neurons that cross commissure
commissure_neurons = pd.read_json('left_right_hemisphere_data/cross_commissure-2020-3-2.json')['skeleton_id'].values
#print(type(commissure_neurons[0]))
#print(type(hemisphere_data['skeleton'][0]))
ipsi_neurons = np.setdiff1d(hemisphere_data['skeleton'], commissure_neurons)
ipsi_neurons_bool = pd.Series(hemisphere_data['skeleton'].values).isin(ipsi_neurons)
contra_neurons_bool = ~pd.Series(hemisphere_data['skeleton'].values).isin(ipsi_neurons)
print("IPSI")
print("Postsynaptic Sites")
print(sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_left'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_right'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_left'].values)/sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_right'].values))
print("")
print("Presynaptic Sites")
print(sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_left'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_right'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_left'].values)/sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_right'].values))
print("")
print("Treenodes")
print(sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_left'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_right'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_left'].values)/sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_right'].values))
print("")
print("")
print("")
print("CONTRA")
print("Postsynaptic Sites")
print(sum(hemisphere_data[contra_neurons_bool]['n_inputs_left'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_inputs_right'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_inputs_left'].values)/sum(hemisphere_data[contra_neurons_bool]['n_inputs_right'].values))
print("")
print("Presynaptic Sites")
print(sum(hemisphere_data[contra_neurons_bool]['n_outputs_left'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_outputs_right'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_outputs_left'].values)/sum(hemisphere_data[contra_neurons_bool]['n_outputs_right'].values))
print("")
print("Treenodes")
print(sum(hemisphere_data[contra_neurons_bool]['n_treenodes_left'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_treenodes_right'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_treenodes_left'].values)/sum(hemisphere_data[contra_neurons_bool]['n_treenodes_right'].values))
print("")
| 45.596774 | 145 | 0.812168 |
bed4073b9e79a28ea38d1cc06f6e14cb5d4efcb7 | 777 | py | Python | __determineTripplesSumToZeroFromList.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __determineTripplesSumToZeroFromList.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __determineTripplesSumToZeroFromList.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | '''
Python program to determine which triples sum to zero from a given list of lists.
Input: [[1343532, -2920635, 332], [-27, 18, 9], [4, 0, -4], [2, 2, 2], [-20, 16, 4]]
Output:
[False, True, True, False, True]
Input: [[1, 2, -3], [-4, 0, 4], [0, 1, -5], [1, 1, 1], [-2, 4, -1]]
Output:
[True, True, False, False, False]
'''
#License: https://bit.ly/3oLErEI
nums = [[1343532, -2920635, 332], [-27, 18, 9], [4, 0, -4], [2, 2, 2], [-20, 16, 4]]
print("Original list of lists:",nums)
print("Determine which triples sum to zero:")
print(test(nums))
nums = [[1, 2, -3], [-4, 0, 4], [0, 1, -5], [1, 1, 1], [-2, 4, -1]]
print("\nOriginal list of lists:",nums)
print("Determine which triples sum to zero:")
print(test(nums))
| 32.375 | 84 | 0.574003 |
bed6c276e3757d89c0d4a20b188e77bced930a94 | 701 | py | Python | fastNLP/modules/encoder/lstm.py | h00Jiang/fastNLP | 79ddb469d81946c87a3d066122a8a3aba6e40f3a | [
"Apache-2.0"
] | null | null | null | fastNLP/modules/encoder/lstm.py | h00Jiang/fastNLP | 79ddb469d81946c87a3d066122a8a3aba6e40f3a | [
"Apache-2.0"
] | null | null | null | fastNLP/modules/encoder/lstm.py | h00Jiang/fastNLP | 79ddb469d81946c87a3d066122a8a3aba6e40f3a | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
| 29.208333 | 98 | 0.636234 |
bed723c002fdd1ab37526c62f785025bbbd1fbd1 | 838 | py | Python | geoLocApp/signals.py | KKWaxy/geoLoc | 23e33b9fd7cb3b1031bd11475612dcc324680975 | [
"Apache-2.0"
] | null | null | null | geoLocApp/signals.py | KKWaxy/geoLoc | 23e33b9fd7cb3b1031bd11475612dcc324680975 | [
"Apache-2.0"
] | null | null | null | geoLocApp/signals.py | KKWaxy/geoLoc | 23e33b9fd7cb3b1031bd11475612dcc324680975 | [
"Apache-2.0"
] | null | null | null | from django.db.models.signals import pre_save,post_save
from django.dispatch import receiver
import geoLocApp.models
import geoLocApp.distance
# @receiver(post_save,sender=geoLocApp.models.Position,dispatch_uid="only_before_registered")
# def setDistance(sender, **kwargs):
# position = kwargs["instance"]
# coordonnees = position.coordonnees.all()
# print(coordonnees)
# for coordonnee in coordonnees:
# coordonnee.distance = geoLocApp.distance.distance(coordonnee.latitude,position.latitude,coordonnee.longitude,position.longitude)
# print(coordonnee.distance)
# @receiver(post_save,sender=geoLocApp.models.Position,dispatch_uid="new_position_added")
# def new_position(sender,**kwargs):
# if kwargs['created']==True:
# return ['intance']
# else:
# return 0 | 39.904762 | 139 | 0.72673 |
bed7a7f211ac4ca2170057d5dae27d3248efc33a | 2,198 | py | Python | src/main/python/shabda/data/iterators/internal/data_iterator_base.py | dhiraa/sabdha | f428418962dcc76f49e0a451ffc0545fda9b6b59 | [
"Apache-2.0"
] | 4 | 2018-10-26T07:00:34.000Z | 2020-10-07T01:03:08.000Z | src/main/python/shabda/data/iterators/internal/data_iterator_base.py | dhiraa/sabdha | f428418962dcc76f49e0a451ffc0545fda9b6b59 | [
"Apache-2.0"
] | null | null | null | src/main/python/shabda/data/iterators/internal/data_iterator_base.py | dhiraa/sabdha | f428418962dcc76f49e0a451ffc0545fda9b6b59 | [
"Apache-2.0"
] | 1 | 2018-10-26T07:00:38.000Z | 2018-10-26T07:00:38.000Z | # Copyright 2018 The Shabda Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Iterator that creates features for LSTM based models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from shabda.hyperparams.hyperparams import HParams
| 28.179487 | 83 | 0.670155 |
bed7c4898d58c738f63aa212ff888514735a6694 | 441 | py | Python | app/filters.py | dakhnovskaya/organization | 72fb6f0a2daf03e7f7ce4dfb2cb3c3eaf1c40851 | [
"Apache-2.0"
] | null | null | null | app/filters.py | dakhnovskaya/organization | 72fb6f0a2daf03e7f7ce4dfb2cb3c3eaf1c40851 | [
"Apache-2.0"
] | null | null | null | app/filters.py | dakhnovskaya/organization | 72fb6f0a2daf03e7f7ce4dfb2cb3c3eaf1c40851 | [
"Apache-2.0"
] | null | null | null | import django_filters
from app.models import Company
| 33.923077 | 102 | 0.750567 |
bed83cf2a356ae3011eb19bade5063e46f89b28c | 6,598 | py | Python | reaction/rpc/rabbitmq.py | Inkln/reaction | 7a57bd642ac3db15012717130a5f2655c3b7b177 | [
"Apache-2.0"
] | 73 | 2019-10-01T15:59:57.000Z | 2021-06-29T11:59:16.000Z | reaction/rpc/rabbitmq.py | Inkln/reaction | 7a57bd642ac3db15012717130a5f2655c3b7b177 | [
"Apache-2.0"
] | 2 | 2020-06-25T10:26:38.000Z | 2022-02-21T06:20:47.000Z | reaction/rpc/rabbitmq.py | Inkln/reaction | 7a57bd642ac3db15012717130a5f2655c3b7b177 | [
"Apache-2.0"
] | 7 | 2019-10-08T05:46:22.000Z | 2020-07-27T12:58:18.000Z | from typing import List
import asyncio
import inspect
import logging
import uuid
import aio_pika
import aio_pika.exceptions
from .base import BaseRPC
from .common import RPCError, RPCHandler, RPCRequest, RPCResponse
| 34.910053 | 78 | 0.543346 |
bed8ffa1e73ffa405bfc1005a04f4f722ab41812 | 2,069 | py | Python | api/migrations/0005_auto_20200906_1951.py | sh2MAN/yamdb_final | 17f84bacd832237d88d3389605cf2acdf2a590f5 | [
"BSD-3-Clause"
] | null | null | null | api/migrations/0005_auto_20200906_1951.py | sh2MAN/yamdb_final | 17f84bacd832237d88d3389605cf2acdf2a590f5 | [
"BSD-3-Clause"
] | null | null | null | api/migrations/0005_auto_20200906_1951.py | sh2MAN/yamdb_final | 17f84bacd832237d88d3389605cf2acdf2a590f5 | [
"BSD-3-Clause"
] | 12 | 2021-02-11T16:39:00.000Z | 2022-03-30T19:18:24.000Z | # Generated by Django 3.0.5 on 2020-09-06 19:51
from django.db import migrations, models
import django.db.models.deletion
| 34.483333 | 177 | 0.585307 |
bed9a7c33d1cf837bf05eedf9e2389f71612ac64 | 1,104 | py | Python | user_activity/models.py | adithya-bhat-b/user-activity | d2577bbb295ac381e08a31e296e3d681da7ab036 | [
"MIT"
] | null | null | null | user_activity/models.py | adithya-bhat-b/user-activity | d2577bbb295ac381e08a31e296e3d681da7ab036 | [
"MIT"
] | 3 | 2021-04-08T22:04:18.000Z | 2021-06-09T19:14:16.000Z | user_activity/models.py | adithya-bhat-b/user-activity | d2577bbb295ac381e08a31e296e3d681da7ab036 | [
"MIT"
] | null | null | null | import pytz
from django.db import models
# Create your models here.
def _get_time_zones():
"""
Function to get all the timezones
"""
timezone_choices = [(tz, tz) for tz in pytz.all_timezones]
return timezone_choices
# Model for user
# Model for user | 25.674419 | 74 | 0.646739 |
bedad5c3db81102c82212833e871a369686befc7 | 1,992 | py | Python | Learning/python_data_analysis1.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | Learning/python_data_analysis1.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | Learning/python_data_analysis1.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | import pandas as pd
# pandas provides us lots of data frame and functions that we can quickly use
# to analyze data.
"""
output
"""
# This file contains notes of basic data analyzing strategies using Python.
# I will introduce two ways to read a csv file: pathway and URL.
# Also, I will introduce how to output data and save them into a csv file.
# ------------------------------------------------------------------------------
# read a csv file using the pathway
# This is based on the pathway that we saved the file.
# In python, to represent a pathway, we should either use / or //.
df = pd.read_csv('E:\\tips.csv')
# ------------------------------------------------------------------------------
# read data online using a URL
data_url = "https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv"
df = pd.read_csv(data_url)
# same output for the above two methods
# output is shown below
"""
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
5 25.29 4.71 Male No Sun Dinner 4
.. ... ... ... ... ... ... ...
240 27.18 2.00 Female Yes Sat Dinner 2
241 22.67 2.00 Male Yes Sat Dinner 2
242 17.82 1.75 Male No Sat Dinner 2
243 18.78 3.00 Female No Thur Dinner 2
[244 rows x 7 columns]
"""
# ------------------------------------------------------------------------------
# output data and save them into a csv file
df.to_csv('E:\\demo.csv', encoding='utf-8', index=False)
# When index = False, when output as a csv file, the name of each line will be
# removed.
# If we contain some special characters in data, encoding will treat it as
# utf-8.
| 36.888889 | 84 | 0.533133 |
bedb078b9701b035a6b7ffcad6706445ac8dade2 | 768 | py | Python | InvoiceItemModel.py | kevinyjiang/cpa-generator | c21cd1f898cf068daff8a6937d6cefc591b16ab1 | [
"MIT"
] | 2 | 2018-09-26T19:18:45.000Z | 2018-11-14T00:38:28.000Z | InvoiceItemModel.py | kevinyjiang/cpa-generator | c21cd1f898cf068daff8a6937d6cefc591b16ab1 | [
"MIT"
] | null | null | null | InvoiceItemModel.py | kevinyjiang/cpa-generator | c21cd1f898cf068daff8a6937d6cefc591b16ab1 | [
"MIT"
] | null | null | null | import config | 30.72 | 68 | 0.453125 |
bedb1dc2f3fdaeceb37c80ae1a87e69944c3c668 | 1,725 | py | Python | lambda/populateDB/lambda_function.py | aws-samples/amazon-connect-dynamic-ivr-menus | 911f5d04cf78d3097cfe7e169bd0062459d61ec4 | [
"MIT-0"
] | 4 | 2021-06-24T14:42:42.000Z | 2021-12-13T07:08:48.000Z | lambda/populateDB/lambda_function.py | aws-samples/amazon-connect-dynamic-ivr-menus | 911f5d04cf78d3097cfe7e169bd0062459d61ec4 | [
"MIT-0"
] | 1 | 2021-12-13T06:53:39.000Z | 2021-12-13T06:53:39.000Z | lambda/populateDB/lambda_function.py | aws-samples/amazon-connect-dynamic-ivr-menus | 911f5d04cf78d3097cfe7e169bd0062459d61ec4 | [
"MIT-0"
] | 2 | 2021-06-10T18:54:03.000Z | 2021-12-13T08:07:05.000Z | import json
import boto3
import os
| 28.278689 | 176 | 0.584348 |
bedcd44ac29b275e927dc09d0e22f32d04f7138a | 59 | py | Python | pyds/heap/__init__.py | nitinkatyal1314/data-structures | 2e7f5b99a6b09cea48f729682d9431b72afbfd7a | [
"MIT"
] | 6 | 2021-04-06T18:14:59.000Z | 2021-07-18T03:26:03.000Z | pyds/heap/__init__.py | nitinkatyal1314/data-structures | 2e7f5b99a6b09cea48f729682d9431b72afbfd7a | [
"MIT"
] | null | null | null | pyds/heap/__init__.py | nitinkatyal1314/data-structures | 2e7f5b99a6b09cea48f729682d9431b72afbfd7a | [
"MIT"
] | null | null | null | from .api import HeapAPI as Heap
from .api import HeapType
| 19.666667 | 32 | 0.79661 |
bedeaa04e3aa523fae916c1f3ad83805bf94106f | 2,849 | py | Python | examples/s5b_transfer/s5b_receiver.py | isabella232/slixmpp | e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a | [
"BSD-3-Clause"
] | null | null | null | examples/s5b_transfer/s5b_receiver.py | isabella232/slixmpp | e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a | [
"BSD-3-Clause"
] | 1 | 2021-02-24T07:58:40.000Z | 2021-02-24T07:58:40.000Z | examples/s5b_transfer/s5b_receiver.py | isabella232/slixmpp | e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2015 Emmanuel Gil Peyrot
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import asyncio
import logging
from getpass import getpass
from argparse import ArgumentParser
import slixmpp
if __name__ == '__main__':
# Setup the command line arguments.
parser = ArgumentParser()
# Output verbosity options.
parser.add_argument("-q", "--quiet", help="set logging to ERROR",
action="store_const", dest="loglevel",
const=logging.ERROR, default=logging.INFO)
parser.add_argument("-d", "--debug", help="set logging to DEBUG",
action="store_const", dest="loglevel",
const=logging.DEBUG, default=logging.INFO)
# JID and password options.
parser.add_argument("-j", "--jid", dest="jid",
help="JID to use")
parser.add_argument("-p", "--password", dest="password",
help="password to use")
parser.add_argument("-o", "--out", dest="filename",
help="file to save to")
args = parser.parse_args()
# Setup logging.
logging.basicConfig(level=args.loglevel,
format='%(levelname)-8s %(message)s')
if args.jid is None:
args.jid = input("Username: ")
if args.password is None:
args.password = getpass("Password: ")
if args.filename is None:
args.filename = input("File path: ")
# Setup the S5BReceiver and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = S5BReceiver(args.jid, args.password, args.filename)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0065', {
'auto_accept': True
}) # SOCKS5 Bytestreams
# Connect to the XMPP server and start processing XMPP stanzas.
xmpp.connect()
xmpp.process(forever=False)
| 31.307692 | 77 | 0.630046 |
bedfb697a1311d179da9b0d371384f0a26973131 | 2,170 | py | Python | api/app/routers/weather_models.py | bcgov/wps | 71df0de72de9cd656dc9ebf8461ffe47cfb155f6 | [
"Apache-2.0"
] | 19 | 2020-01-31T21:51:31.000Z | 2022-01-07T14:40:03.000Z | api/app/routers/weather_models.py | bcgov/wps | 71df0de72de9cd656dc9ebf8461ffe47cfb155f6 | [
"Apache-2.0"
] | 1,680 | 2020-01-24T23:25:08.000Z | 2022-03-31T23:50:27.000Z | api/app/routers/weather_models.py | bcgov/wps | 71df0de72de9cd656dc9ebf8461ffe47cfb155f6 | [
"Apache-2.0"
] | 6 | 2020-04-28T22:41:08.000Z | 2021-05-05T18:16:06.000Z | """ Routers for weather_models.
"""
import logging
from fastapi import APIRouter, Depends
from app.auth import authentication_required, audit
from app.weather_models import ModelEnum
from app.schemas.weather_models import (
WeatherModelPredictionSummaryResponse,
WeatherStationsModelRunsPredictionsResponse)
from app.schemas.shared import WeatherDataRequest
from app.weather_models.fetch.summaries import fetch_model_prediction_summaries
from app.weather_models.fetch.predictions import (
fetch_model_run_predictions_by_station_code)
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/weather_models",
dependencies=[Depends(audit), Depends(authentication_required)],
)
| 38.070175 | 109 | 0.765438 |
bee066a8fc595636f1ed42106327e650d743c5d7 | 1,529 | py | Python | 155.min-stack.py | elfgzp/leetCode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
] | 3 | 2019-04-12T06:22:56.000Z | 2019-05-04T04:25:01.000Z | 155.min-stack.py | elfgzp/Leetcode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
] | null | null | null | 155.min-stack.py | elfgzp/Leetcode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=155 lang=python3
#
# [155]
#
# https://leetcode-cn.com/problems/min-stack/description/
#
# algorithms
# Easy (47.45%)
# Total Accepted: 19.4K
# Total Submissions: 40.3K
# Testcase Example: '["MinStack","push","push","push","getMin","pop","top","getMin"]\n[[],[-2],[0],[-3],[],[],[],[]]'
#
# pushpoptop
#
#
# push(x)-- x
# pop()--
# top()--
# getMin() --
#
#
# :
#
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.getMin(); --> -3.
# minStack.pop();
# minStack.top(); --> 0.
# minStack.getMin(); --> -2.
#
#
#
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| 19.602564 | 118 | 0.530412 |
bee082fd43d018efe615e1efde05a3a482204b84 | 48,845 | py | Python | demisto_client/demisto_api/models/investigation_playbook_task.py | guytest/demisto-py | 8ca4f56a6177668151b5656cbe675a377003c0e9 | [
"Apache-2.0"
] | 1 | 2020-04-08T14:36:06.000Z | 2020-04-08T14:36:06.000Z | demisto_client/demisto_api/models/investigation_playbook_task.py | guytest/demisto-py | 8ca4f56a6177668151b5656cbe675a377003c0e9 | [
"Apache-2.0"
] | null | null | null | demisto_client/demisto_api/models/investigation_playbook_task.py | guytest/demisto-py | 8ca4f56a6177668151b5656cbe675a377003c0e9 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Demisto API
This is the public REST API to integrate with the demisto server. HTTP request can be sent using any HTTP-client. For an example dedicated client take a look at: https://github.com/demisto/demisto-py. Requests must include API-key that can be generated in the Demisto web client under 'Settings' -> 'Integrations' -> 'API keys' Optimistic Locking and Versioning\\: When using Demisto REST API, you will need to make sure to work on the latest version of the item (incident, entry, etc.), otherwise, you will get a DB version error (which not allow you to override a newer item). In addition, you can pass 'version\\: -1' to force data override (make sure that other users data might be lost). Assume that Alice and Bob both read the same data from Demisto server, then they both changed the data, and then both tried to write the new versions back to the server. Whose changes should be saved? Alices? Bobs? To solve this, each data item in Demisto has a numeric incremental version. If Alice saved an item with version 4 and Bob trying to save the same item with version 3, Demisto will rollback Bob request and returns a DB version conflict error. Bob will need to get the latest item and work on it so Alice work will not get lost. Example request using 'curl'\\: ``` curl 'https://hostname:443/incidents/search' -H 'content-type: application/json' -H 'accept: application/json' -H 'Authorization: <API Key goes here>' --data-binary '{\"filter\":{\"query\":\"-status:closed -category:job\",\"period\":{\"by\":\"day\",\"fromValue\":7}}}' --compressed ``` # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from demisto_client.demisto_api.models.advance_arg import AdvanceArg # noqa: F401,E501
from demisto_client.demisto_api.models.data_collection_form import DataCollectionForm # noqa: F401,E501
from demisto_client.demisto_api.models.evidence_data import EvidenceData # noqa: F401,E501
from demisto_client.demisto_api.models.field_mapping import FieldMapping # noqa: F401,E501
from demisto_client.demisto_api.models.inv_playbook_task_complete_data import InvPlaybookTaskCompleteData # noqa: F401,E501
# from demisto_client.demisto_api.models.investigation_playbook import InvestigationPlaybook # noqa: F401,E501
from demisto_client.demisto_api.models.notifiable_item import NotifiableItem # noqa: F401,E501
from demisto_client.demisto_api.models.reputation_calc_alg import ReputationCalcAlg # noqa: F401,E501
from demisto_client.demisto_api.models.sla import SLA # noqa: F401,E501
from demisto_client.demisto_api.models.task import Task # noqa: F401,E501
from demisto_client.demisto_api.models.task_condition import TaskCondition # noqa: F401,E501
from demisto_client.demisto_api.models.task_loop import TaskLoop # noqa: F401,E501
from demisto_client.demisto_api.models.task_state import TaskState # noqa: F401,E501
from demisto_client.demisto_api.models.task_type import TaskType # noqa: F401,E501
from demisto_client.demisto_api.models.task_view import TaskView # noqa: F401,E501
from demisto_client.demisto_api.models.timer_trigger import TimerTrigger # noqa: F401,E501
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InvestigationPlaybookTask):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.498337 | 1,584 | 0.649381 |
bee216da141df68c9f7b41ae5761fcf6bc3e34f4 | 36,274 | py | Python | make.py | beefoo/subway-inequality | a93bdbf81ea3753b0d2ec85d028f816adcc2f6f9 | [
"MIT"
] | 1 | 2021-03-08T01:44:10.000Z | 2021-03-08T01:44:10.000Z | make.py | beefoo/subway-inequality | a93bdbf81ea3753b0d2ec85d028f816adcc2f6f9 | [
"MIT"
] | null | null | null | make.py | beefoo/subway-inequality | a93bdbf81ea3753b0d2ec85d028f816adcc2f6f9 | [
"MIT"
] | 1 | 2021-11-02T21:55:39.000Z | 2021-11-02T21:55:39.000Z | # -*- coding: utf-8 -*-
# python3 make.py -loc "data/lines/1.csv" -width 3840 -height 2160 -overwrite
# python3 make.py -loc "data/lines/1.csv" -width 3840 -height 2160 -rtl -overwrite
# python3 combine.py
# python3 make.py -data "data/lines/A_LEF.csv" -width 3840 -height 2160 -loc "data/lines/C.csv" -img "img/A.png" -sw 0.1405 -tw 0.145 -overwrite
# python3 make.py -data "data/lines/A_LEF.csv" -width 3840 -height 2160 -loc "data/lines/C.csv" -img "img/A.png" -sw 0.1405 -tw 0.145 -rtl -overwrite
# python3 combine.py -in "output/subway_line_A.mp4,output/subway_line_A_rtl.mp4" -out "output/subway_line_A_loop.mp4"
# python3 make.py -data "data/lines/7.csv" -width 3840 -height 2160 -img "img/7.png" -sw 0.11725 -tw 0.135625 -reverse -overwrite
# python3 make.py -data "data/lines/7.csv" -width 3840 -height 2160 -img "img/7.png" -sw 0.11725 -tw 0.135625 -reverse -rtl -overwrite
# python3 combine.py -in "output/subway_line_7.mp4,output/subway_line_7_rtl.mp4" -out "output/subway_line_7_loop.mp4"
import argparse
import numpy as np
import os
from pprint import pprint
import sys
from lib import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-data', dest="DATA_FILE", default="data/lines/2.csv", help="Input csv file with preprocessed data")
parser.add_argument('-loc', dest="DATA_LOCAL_FILE", default="", help="Input csv file with preprocessed data of a local train that should 'fill in' stations in-between express trains")
parser.add_argument('-img', dest="IMAGE_FILE", default="img/2.png", help="Subway bullet image")
parser.add_argument('-instruments', dest="INSTRUMENTS_FILE", default="data/instruments.csv", help="Input csv file with instruments config")
parser.add_argument('-dir', dest="MEDIA_DIRECTORY", default="audio/", help="Input media directory")
parser.add_argument('-width', dest="WIDTH", default=1920, type=int, help="Output video width")
parser.add_argument('-height', dest="HEIGHT", default=1080, type=int, help="Output video height")
parser.add_argument('-pad0', dest="PAD_START", default=2000, type=int, help="Pad start in ms")
parser.add_argument('-pad1', dest="PAD_END", default=2000, type=int, help="Pad end in ms")
parser.add_argument('-fps', dest="FPS", default=30, type=int, help="Output video frames per second")
parser.add_argument('-outframe', dest="OUTPUT_FRAME", default="tmp/line_%s/frame.%s.png", help="Output frames pattern")
parser.add_argument('-aout', dest="AUDIO_OUTPUT_FILE", default="output/subway_line_%s.mp3", help="Output audio file")
parser.add_argument('-dout', dest="DATA_OUTPUT_FILE", default="output/subway_line_%s.csv", help="Output data file")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/subway_line_%s.mp4", help="Output media file")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing files?")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just view statistics?")
parser.add_argument('-reverse', dest="REVERSE", action="store_true", help="Reverse the line?")
parser.add_argument('-rtl', dest="RIGHT_TO_LEFT", action="store_true", help="Play from right to left?")
parser.add_argument('-ao', dest="AUDIO_ONLY", action="store_true", help="Only output audio?")
parser.add_argument('-vo', dest="VIDEO_ONLY", action="store_true", help="Only output video?")
parser.add_argument('-do', dest="DATA_ONLY", action="store_true", help="Only output data?")
parser.add_argument('-viz', dest="VISUALIZE_SEQUENCE", action="store_true", help="Output a visualization of the sequence")
parser.add_argument('-plot', dest="PLOT_SEQUENCE", action="store_true", help="Display a plot chart of the sequence")
parser.add_argument('-frame', dest="SINGLE_FRAME", default=-1, type=int, help="Output just a single frame")
# Music config
parser.add_argument('-db', dest="MASTER_DB", type=float, default=-2.4, help="Master +/- decibels to be applied to final audio")
parser.add_argument('-bpm', dest="BPM", type=int, default=120, help="Beats per minute, e.g. 60, 75, 100, 120, 150")
parser.add_argument('-mpb', dest="METERS_PER_BEAT", type=int, default=75, help="Higher numbers creates shorter songs")
parser.add_argument('-dpb', dest="DIVISIONS_PER_BEAT", type=int, default=4, help="e.g. 4 = quarter notes, 8 = eighth notes")
parser.add_argument('-pm', dest="PRICE_MULTIPLIER", type=float, default=1.3, help="Makes instruments more expensive; higher numbers = less instruments playing")
parser.add_argument('-vdur', dest="VARIANCE_MS", type=int, default=20, help="+/- milliseconds an instrument note should be off by to give it a little more 'natural' feel")
# Visual design config
parser.add_argument('-sw', dest="STATION_WIDTH", type=float, default=0.125, help="Minimum station width as a percent of the screen width; adjust this to change the overall visual speed")
parser.add_argument('-tw', dest="TEXT_WIDTH", type=float, default=0.15, help="Station text width as a percent of the screen width")
parser.add_argument('-cy', dest="CENTER_Y", type=float, default=0.475, help="Center y as a percent of screen height")
parser.add_argument('-bty', dest="BOROUGH_TEXT_Y", type=float, default=0.55, help="Borough text center y as a percent of screen height")
parser.add_argument('-sty', dest="STATION_TEXT_Y", type=float, default=0.375, help="Station text center y as a percent of screen height")
parser.add_argument('-cw', dest="CIRCLE_WIDTH", type=int, default=60, help="Circle radius in pixels assuming 1920x1080")
parser.add_argument('-lh', dest="LINE_HEIGHT", type=int, default=24, help="Height of horizontal line in pixels assuming 1920x1080")
parser.add_argument('-bh', dest="BOUNDARY_HEIGHT", type=int, default=166, help="Height of boundary line in pixels assuming 1920x1080")
parser.add_argument('-bw', dest="BOUNDARY_WIDTH", type=int, default=3, help="Width of boundary line in pixels assuming 1920x1080")
parser.add_argument('-bm', dest="BOUNDARY_MARGIN", type=int, default=48, help="Horizontal margin of boundary line in pixels assuming 1920x1080")
parser.add_argument('-mw', dest="MARKER_WIDTH", type=int, default=8, help="Height of horizontal line in pixels assuming 1920x1080")
parser.add_argument('-sts', dest="STATION_TEXT_SIZE", type=int, default=30, help="Station text size in pixels assuming 1920x1080")
parser.add_argument('-stm', dest="STATION_TEXT_MARGIN", type=int, default=20, help="Station text bottom margin in pixels assuming 1920x1080")
parser.add_argument('-slm', dest="STATION_LETTER_MARGIN", type=int, default=1, help="Space after each station text letter in pixels assuming 1920x1080")
parser.add_argument('-bts', dest="BOROUGH_TEXT_SIZE", type=int, default=24, help="Borough text size in pixels assuming 1920x1080")
parser.add_argument('-blm', dest="BOROUGH_LETTER_MARGIN", type=int, default=1, help="Space after each borough text letter in pixels assuming 1920x1080")
parser.add_argument('-bthresh', dest="BOROUGH_THRESHOLD", type=float, default=0.375, help="Minimum width available for displaying borough dividers")
parser.add_argument('-dw', dest="DIVIDER_WIDTH", type=int, default=28, help="Line divider in pixels assuming 1920x1080")
parser.add_argument('-dd', dest="DIVIDER_DISTANCE", type=float, default=0.333, help="Distance between dividers as a percent of screen width")
parser.add_argument('-dc', dest="DIVIDER_COLOR", default="#666666", help="Distance between dividers as a percent of screen width")
parser.add_argument('-bg', dest="BG_COLOR", default="#000000", help="Background color")
parser.add_argument('-tc', dest="TEXT_COLOR", default="#eeeeee", help="Text color")
parser.add_argument('-atc', dest="ALT_TEXT_COLOR", default="#aaaaaa", help="Secondary text color")
parser.add_argument('-mc', dest="MARKER_COLOR", default="#dddddd", help="Marker color")
parser.add_argument('-sfont', dest="STATION_FONT", default="fonts/OpenSans-Bold.ttf", help="Station font")
parser.add_argument('-bfont', dest="BOROUGH_FONT", default="fonts/OpenSans-SemiBold.ttf", help="Borough font")
parser.add_argument('-map', dest="MAP_IMAGE", default="img/nyc.png", help="Station font")
parser.add_argument('-mcoord', dest="MAP_COORDS", default=" -74.1261,40.9087,-73.7066,40.5743", help="Top left, bottom right point")
parser.add_argument('-mapm', dest="MAP_MARGIN", type=int, default=30, help="Margin of map in pixels assuming 1920x1080")
parser.add_argument('-mapw', dest="MAP_W", type=int, default=260, help="Map width in pixels assuming 1920x1080")
parser.add_argument('-mlw', dest="MAP_LINE_WIDTH", type=int, default=4, help="Map line in pixels assuming 1920x1080")
parser.add_argument('-mlc', dest="MAP_LINE_COLOR", default="#eeeeee", help="Secondary text color")
a = parser.parse_args()
if not a.AUDIO_ONLY:
import gizeh
from PIL import Image, ImageDraw, ImageFont
startTime = logTime()
# Calculations
BEAT_MS = roundInt(60.0 / a.BPM * 1000)
ROUND_TO_NEAREST = roundInt(1.0 * BEAT_MS / a.DIVISIONS_PER_BEAT)
basename = getBasename(a.DATA_FILE)
if "_" in basename:
basename, _ = tuple(basename.split("_"))
lineName = basename
if a.RIGHT_TO_LEFT:
basename += "_rtl"
# Read data
_, stations = readCsv(a.DATA_FILE)
_, instruments = readCsv(a.INSTRUMENTS_FILE)
lstations = []
if len(a.DATA_LOCAL_FILE):
_, lstations = readCsv(a.DATA_LOCAL_FILE)
# Parse instruments
instruments = prependAll(instruments, ("file", a.MEDIA_DIRECTORY))
instruments = [i for i in instruments if i["active"] > 0]
instruments = addIndices(instruments, "index")
for i, instrument in enumerate(instruments):
instruments[i]["from_beat_ms"] = roundInt(1.0 * BEAT_MS / instrument["from_tempo"])
instruments[i]["to_beat_ms"] = roundInt(1.0 * BEAT_MS / instrument["to_tempo"])
instruments[i]["interval_ms"] = roundInt(instrument["interval_phase"] * BEAT_MS)
instruments[i]["price"] = instrument["price"] * a.PRICE_MULTIPLIER
# Buy instruments based on a specified budget
# Add local stations in-between express ones
if len(lstations) > 0:
lbasename = getBasename(a.DATA_LOCAL_FILE)
estations = {}
addStations = []
for i, s in enumerate(stations):
lines = str(s["Daytime Routes"]).split(" ")
if lbasename in lines:
estations[s["Station ID"]] = s.copy()
sortByStart = None
currentLStations = []
for i, s in enumerate(lstations):
if s["Station ID"] in estations:
if sortByStart is not None and len(currentLStations) > 0:
step = 1.0 / (len(currentLStations) + 1)
for j, ls in enumerate(currentLStations):
currentLStations[j]["sortBy"] = sortByStart + (j+1) * step
currentLStations[j]["isLocal"] = 1
addStations += currentLStations
currentLStations = []
sortByStart = estations[s["Station ID"]]["sortBy"]
elif sortByStart is not None:
currentLStations.append(s)
stations += addStations
# stations = sorted(stations, key=lambda d: d["sortBy"])
# for s in stations:
# if "isLocal" in s:
# print(" --"+s["Stop Name"])
# else:
# print(s["Stop Name"])
# sys.exit()
# Parse stations
stations = sorted(stations, key=lambda d: d["income"])
stations = addNormalizedValues(stations, "income", "nIncome")
stations = addIndices(stations, "incomeIndex")
isReverse = a.REVERSE
if a.RIGHT_TO_LEFT:
isReverse = (not isReverse)
stations = sorted(stations, key=lambda d: d["sortBy"], reverse=isReverse)
stations = addIndices(stations, "index")
stationCount = len(stations)
ms = a.PAD_START
for i, station in enumerate(stations):
stations[i]["percentile"] = 1.0 * station["incomeIndex"] / stationCount * 100
# stations[i]["percentile"] = min(99.999, 1.0 * station["nIncome"] * 100)
stations[i]["instruments"] = buyInstruments(stations[i], instruments)
# print(len(stations[i]["instruments"]))
distance = beats = duration = 0
if i < stationCount-1:
distance = earthDistance(stations[i+1]['GTFS Latitude'], stations[i+1]['GTFS Longitude'], station['GTFS Latitude'], station['GTFS Longitude'])
beats = roundInt(1.0 * distance / a.METERS_PER_BEAT)
duration = beats * BEAT_MS
boroughNext = stations[i+1]["Borough"]
stations[i]["distance"] = distance
stations[i]["beats"] = beats
stations[i]["duration"] = duration
stations[i]["vduration"] = duration
stations[i]["BoroughNext"] = boroughNext
stations[i]["ms"] = ms
stations[i]["lineName"] = lineName
ms += duration
if a.PROBE:
print("===========================")
for s in stations:
if "isLocal" in s:
print(formatSeconds(roundInt(s["ms"]/1000.0)) + " --- " + s["Stop Name"] + " (LOCAL) - $" + formatNumber(s["income"]))
else:
print(formatSeconds(roundInt(s["ms"]/1000.0)) + " - " + s["Stop Name"] + " - $" + formatNumber(s["income"]))
print("===========================")
else:
dataFilename = a.DATA_OUTPUT_FILE % basename
makeDirectories([dataFilename])
writeCsv(dataFilename, stations, headings=["ms", "Stop Name", "isLocal", "income", "Borough", "lineName"])
textFilename = replaceFileExtension(dataFilename, ".txt")
text = f'Subway Inequality: {basename} train ({stations[-1]["Stop Name"]} Bound)\n\n'
text += f'This song above mimics a ride along a subway line (the {basename} train), where the quantity and power of the instruments at any given moment in the song corresponds to the median household income of the neighborhood that you are passing through. The goal is to have the dramatic contrasts of the song echo the dramatic contrast of income in the city.\n\n'
for s in stations:
if "isLocal" not in s:
text += f'{formatSeconds(roundInt(s["ms"]/1000.0))} - {s["Stop Name"]} - ${formatNumber(s["income"])} household income\n'
writeTextFile(textFilename, text)
if a.DATA_ONLY:
sys.exit()
# Calculate ranges
distances = [s["distance"] for s in stations if s["distance"] > 0]
totalDistance = sum(distances)
minDistance, maxDistance = (min(distances), max(distances))
durations = [s["duration"] for s in stations if s["duration"] > 0]
totalMs = sum(durations)
minDuration, maxDuration = (min(durations), max(durations))
totalBeats = sum([s["beats"] for s in stations])
totalSeconds = roundInt(totalMs / 1000.0)
secondsPerStation = roundInt(1.0*totalSeconds/stationCount)
print('Total distance in meters: %s' % roundInt(totalDistance))
print('Distance range in meters: [%s, %s]' % (roundInt(minDistance), roundInt(maxDistance)))
print('Average beats per station: %s' % roundInt(1.0*totalBeats/stationCount))
print('Average time per station: %s' % formatSeconds(secondsPerStation))
print('Main sequence beats: %s' % totalBeats)
# Retrieve gain based on current beat
# Get beat duration in ms based on current point in time
# Return if the instrument should be played in the given interval
# Add beats to sequence
# Build main sequence
sequence = []
for i, instrument in enumerate(instruments):
ms = 0
stationQueueDur = 0
# Each station in stations
for station in stations:
# Check if instrument is in this station
instrumentIndex = findInList(station['instruments'], 'index', instrument['index'])
# Instrument not here, just add the station duration and continue
if instrumentIndex < 0 and stationQueueDur > 0:
sequence = addBeatsToSequence(sequence, instrument, stationQueueDur, ms, BEAT_MS, ROUND_TO_NEAREST, a.PAD_START)
ms += stationQueueDur + station['duration']
stationQueueDur = 0
elif instrumentIndex < 0:
ms += station['duration']
else:
stationQueueDur += station['duration']
if stationQueueDur > 0:
sequence = addBeatsToSequence(sequence, instrument, stationQueueDur, ms, BEAT_MS, ROUND_TO_NEAREST, a.PAD_START)
sequenceDuration = max([s["ms"] for s in sequence]) + a.PAD_END
# Now start the video frame logic
# Calculations
aa = vars(a)
aa["STATION_WIDTH"] = roundInt(1.0 * a.WIDTH * a.STATION_WIDTH)
aa["TEXT_WIDTH"] = roundInt(1.0 * a.WIDTH * a.TEXT_WIDTH)
aa["CENTER_Y"] = roundInt(1.0 * a.HEIGHT * a.CENTER_Y)
aa["BOROUGH_TEXT_Y"] = roundInt(1.0 * a.HEIGHT * a.BOROUGH_TEXT_Y)
aa["STATION_TEXT_Y"] = roundInt(1.0 * a.HEIGHT * a.STATION_TEXT_Y)
RESOLUTION = a.WIDTH / 1920.0
aa["CIRCLE_WIDTH"] = roundInt(a.CIRCLE_WIDTH * RESOLUTION)
aa["LINE_HEIGHT"] = roundInt(a.LINE_HEIGHT * RESOLUTION)
aa["BOUNDARY_MARGIN"] = roundInt(a.BOUNDARY_MARGIN * RESOLUTION)
aa["BOUNDARY_HEIGHT"] = roundInt(a.BOUNDARY_HEIGHT * RESOLUTION)
aa["BOUNDARY_WIDTH"] = roundInt(a.BOUNDARY_WIDTH * RESOLUTION)
aa["BOROUGH_THRESHOLD"] = roundInt(1.0 * a.WIDTH * a.BOROUGH_THRESHOLD)
aa["MARKER_WIDTH"] = roundInt(a.MARKER_WIDTH * RESOLUTION)
aa["STATION_TEXT_SIZE"] = roundInt(a.STATION_TEXT_SIZE * RESOLUTION)
aa["STATION_TEXT_MARGIN"] = roundInt(a.STATION_TEXT_MARGIN * RESOLUTION)
aa["STATION_LETTER_MARGIN"] = roundInt(a.STATION_LETTER_MARGIN * RESOLUTION)
aa["BOROUGH_TEXT_SIZE"] = roundInt(a.BOROUGH_TEXT_SIZE * RESOLUTION)
aa["BOROUGH_LETTER_MARGIN"] = roundInt(a.BOROUGH_LETTER_MARGIN * RESOLUTION)
aa["MAP_COORDS"] = tuple([float(c) for c in a.MAP_COORDS.strip().split(",")])
aa["MAP_MARGIN"] = roundInt(a.MAP_MARGIN * RESOLUTION)
aa["MAP_W"] = roundInt(a.MAP_W * RESOLUTION)
aa["MAP_LINE_WIDTH"] = roundInt(a.MAP_LINE_WIDTH * RESOLUTION)
aa["DIVIDER_WIDTH"] = roundInt(a.DIVIDER_WIDTH * RESOLUTION)
aa["DIVIDER_DISTANCE"] = roundInt(1.0 * a.WIDTH * a.DIVIDER_DISTANCE)
# Add borough names
boroughNames = {
"Q": "Queens",
"M": "Manhattan",
"Bk": "Brooklyn",
"Bx": "Bronx",
"SI": "Staten Island"
}
for i, station in enumerate(stations):
stations[i]["borough"] = boroughNames[station["Borough"]]
x = 0
mlon0, mlat0, mlon1, mlat1 = a.MAP_COORDS
vstations = stations[:]
# If going right to left, reverse the stations visually
if a.RIGHT_TO_LEFT:
vstations = list(reversed(vstations))
for i, station in enumerate(vstations):
if i < stationCount-1:
vstations[i]["vduration"] = vstations[i+1]["duration"]
else:
vstations[i]["vduration"] = 0
for i, station in enumerate(vstations):
boroughNext = station["borough"]
if i < stationCount-1:
boroughNext = vstations[i+1]["borough"]
vstations[i]["boroughNext"] = boroughNext
vstations[i]["width"] = roundInt(1.0 * station["vduration"] / minDuration * a.STATION_WIDTH)
vstations[i]["x"] = x
vstations[i]["x0"] = x - a.TEXT_WIDTH / 2
vstations[i]["x1"] = x + a.TEXT_WIDTH / 2
vstations[i]["mapNx"] = norm(station["GTFS Longitude"], (mlon0, mlon1))
vstations[i]["mapNy"] = norm(station["GTFS Latitude"], (mlat0, mlat1))
x += vstations[i]["width"]
totalW = x
pxPerMs = 1.0 * totalW / totalMs
pxPerS = pxPerMs * 1000.0
pxPerFrame = pxPerS / a.FPS
print("Total width: %s px" % totalW)
print("Pixels per second: %s" % pxPerS)
print("Pixels per frame: %s" % pxPerFrame)
totalFrames = msToFrame(sequenceDuration, a.FPS)
totalFrames = int(ceilToNearest(totalFrames, a.FPS))
print("Total frames: %s" % totalFrames)
sequenceDuration = frameToMs(totalFrames, a.FPS)
audioFilename = a.AUDIO_OUTPUT_FILE % basename
print("%s steps in sequence" % len(sequence))
print('Total sequence time: %s' % formatSeconds(sequenceDuration/1000.0))
if a.VISUALIZE_SEQUENCE:
instrumentsCount = len(instruments)
labelW = 200
unitH = 10
unitW = 10
marginH = 2
imgH = (unitH+marginH) * instrumentsCount
imgW = totalSeconds * unitW + labelW
dfont = ImageFont.truetype(font="fonts/OpenSans-Regular.ttf", size=10)
print("Making viz %s x %s" % (imgW, imgH))
im = Image.new('RGB', (imgW, imgH), "#000000")
draw = ImageDraw.Draw(im, 'RGB')
for i, ins in enumerate(instruments):
y = i * (unitH + marginH)
draw.text((2, y), ins["name"], fill="#FFFFFF", font=dfont)
steps = [step for step in sequence if step["instrumentIndex"]==ins["index"]]
for step in steps:
sx = roundInt((step["ms"] - a.PAD_START) / 1000.0 / totalSeconds * (imgW-labelW) + labelW)
draw.rectangle([(sx, y), (sx+3, y+unitH)], fill=(roundInt(255*step["volume"]),0,0))
if i > 0:
draw.line([(0, y-1), (imgW, y-1)], fill="#cccccc", width=1)
printProgress(i+1, instrumentsCount)
im.save("output/viz.png")
sys.exit()
if a.PLOT_SEQUENCE:
import matplotlib.pyplot as plt
xs = [s['ms']/1000.0 for s in stations]
ys = [s['income'] for s in stations]
plt.plot(xs, ys)
plt.show()
sys.exit()
if a.PROBE:
sys.exit()
makeDirectories([a.AUDIO_OUTPUT_FILE, a.OUTPUT_FILE])
if not a.AUDIO_ONLY:
bulletImg = Image.open(a.IMAGE_FILE)
bulletImg = bulletImg.resize((a.CIRCLE_WIDTH, a.CIRCLE_WIDTH), resample=Image.LANCZOS)
mapImg = Image.open(a.MAP_IMAGE)
mapH = roundInt((1.0 * mapImg.size[1] / mapImg.size[0]) * a.MAP_W)
mapImg = mapImg.resize((a.MAP_W, mapH), resample=Image.LANCZOS)
fontStation = ImageFont.truetype(font=a.STATION_FONT, size=a.STATION_TEXT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
fontBorough = ImageFont.truetype(font=a.BOROUGH_FONT, size=a.BOROUGH_TEXT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
makeDirectories([a.OUTPUT_FRAME % (basename, "*")])
if a.OVERWRITE and a.SINGLE_FRAME < 1:
removeFiles(a.OUTPUT_FRAME % (basename, "*"))
# calculations for easing in/out
padFrameInCount = msToFrame(a.PAD_START, a.FPS)
station0FrameCount = msToFrame(stations[0]["duration"], a.FPS)
easeInFrames = getEasedFrames(padFrameInCount, station0FrameCount, pxPerFrame)
easeInFrameCount = len(easeInFrames)
padFrameOutCount = msToFrame(a.PAD_END, a.FPS)
station1FrameCount = msToFrame(stations[-2]["duration"], a.FPS)
easeOutFrames = getEasedFrames(padFrameOutCount, station1FrameCount, pxPerFrame)
# easeOutFrames = list(reversed(easeOutFrames))
easeOutFrameCount = len(easeOutFrames)
easeOutPixels = roundInt(sum(easeOutFrames))
print("Making video frame sequence...")
videoFrames = []
centerX = roundInt(a.WIDTH * 0.5)
xOffset = centerX
direction = -1
if a.RIGHT_TO_LEFT:
direction = 1
xOffset -= totalW
xOffsetF = 1.0 * xOffset
target = centerX-totalW if direction < 0 else centerX
for f in range(totalFrames):
frame = f + 1
ms = frameToMs(frame, a.FPS)
frameFilename = a.OUTPUT_FRAME % (basename, zeroPad(frame, totalFrames))
if a.SINGLE_FRAME < 1 or a.SINGLE_FRAME == frame:
if a.SINGLE_FRAME > 0:
frameFilename = "output/frame.png"
drawFrame(frameFilename, ms, xOffset, vstations, totalW, bulletImg, mapImg, fontStation, fontBorough, a)
if a.SINGLE_FRAME > 0:
sys.exit()
pixelsLeft = abs(target - xOffset)
# ease in start
if frame < easeInFrameCount:
xOffsetF += (direction * easeInFrames[frame-1])
xOffset = roundInt(xOffsetF)
# print(abs(xOffset-centerX))
# # correct any discrepancy after ease in
# elif frame <= easeInFrameCount:
# xOffset = (frame - padFrameInCount) * pxPerFrame
# xOffsetF = 1.0 * xOffset
# ease out end
elif pixelsLeft <= easeOutPixels:
pxStep = easeOutFrames.pop() if len(easeOutFrames) > 0 else 1
xOffsetF += (direction * pxStep)
xOffset = roundInt(xOffsetF)
# print("%s > %s" % (xOffset, centerX-totalW))
else:
xOffset += (direction * pxPerFrame)
xOffsetF = 1.0 * xOffset
xOffset = lim(xOffset, (centerX-totalW, centerX))
printProgress(frame, totalFrames)
# break
stepTime = logTime(startTime, "Finished frames")
padZeros = len(str(totalFrames))
outfile = a.OUTPUT_FILE % basename
frameInfile = a.OUTPUT_FRAME % (basename, '%s')
if a.VIDEO_ONLY:
compileFrames(frameInfile, a.FPS, outfile, padZeros)
sys.exit()
if a.OVERWRITE or not os.path.isfile(audioFilename):
mixAudio(sequence, sequenceDuration, audioFilename, masterDb=a.MASTER_DB)
else:
print("%s already exists" % audioFilename)
stepTime = logTime(stepTime, "Finished Audio")
if not a.AUDIO_ONLY:
if a.VIDEO_ONLY:
audioFilename = None
if a.OVERWRITE or not os.path.isfile(outfile):
compileFrames(frameInfile, a.FPS, outfile, padZeros, audioFile=audioFilename)
else:
print("%s already exists" % outfile)
logTime(startTime, "Total execution time")
| 48.559572 | 371 | 0.668688 |
bee4367acb7e986f0d0dbc48437bdbc33f87cdab | 15,487 | py | Python | release/scripts/startup/bl_ui/space_text.py | vic3t3chn0/Bforartists | 7c54a60dd7aa568e20ae7e3778dfef993b61b7b5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-01-18T22:13:24.000Z | 2020-01-18T22:13:24.000Z | release/scripts/startup/bl_ui/space_text.py | vic3t3chn0/Bforartists | 7c54a60dd7aa568e20ae7e3778dfef993b61b7b5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/startup/bl_ui/space_text.py | vic3t3chn0/Bforartists | 7c54a60dd7aa568e20ae7e3778dfef993b61b7b5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
import bpy
from bpy.types import Header, Menu, Panel
from bpy.app.translations import pgettext_iface as iface_
# bfa - show hide the editormenu
#Redraw timer sub menu - Debug stuff
# move_select submenu
classes = (
ALL_MT_editormenu,
TEXT_HT_header,
TEXT_HT_footer,
TEXT_MT_editor_menus,
TEXT_PT_properties,
TEXT_PT_find,
TEXT_MT_view,
TEXT_MT_redraw_timer,
TEXT_MT_text,
TEXT_MT_templates,
TEXT_MT_templates_py,
TEXT_MT_templates_osl,
TEXT_MT_format,
TEXT_MT_edit_to3d,
TEXT_MT_context_menu,
TEXT_MT_edit,
TEXT_MT_edit_move_select,
TEXT_MT_edit_delete,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 31.735656 | 125 | 0.612966 |
bee4d2aa2b67b36999556e3fe3dbdddbb08d368e | 6,151 | py | Python | wb/main/jobs/create_setup_bundle/create_setup_bundle_job.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | wb/main/jobs/create_setup_bundle/create_setup_bundle_job.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | wb/main/jobs/create_setup_bundle/create_setup_bundle_job.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Class for create setup bundle job
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import tempfile
from contextlib import closing
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.jobs.utils.database_functions import set_status_in_db
from wb.main.models import CreateSetupBundleJobModel, SharedArtifactModel
from wb.main.scripts.job_scripts_generators.setup_script_generator import SetupScriptGenerator
from wb.main.utils.bundle_creator.setup_bundle_creator import SetupBundleCreator, SetupComponentsParams
from wb.main.utils.utils import find_by_ext
| 55.918182 | 115 | 0.676313 |
bee53cb77bca7c1ce3a8035bc7f1d877d50fc52d | 2,365 | py | Python | ros_awsiot_agent/src/ros_awsiot_agent/mqtt2ros.py | whill-labs/ros_awsiot | 4c15be53c0643fb81fd5a261a1af5be2652c4166 | [
"MIT"
] | 4 | 2021-10-06T10:19:07.000Z | 2022-03-02T02:13:09.000Z | ros_awsiot_agent/src/ros_awsiot_agent/mqtt2ros.py | whill-labs/ros_awsiot | 4c15be53c0643fb81fd5a261a1af5be2652c4166 | [
"MIT"
] | 1 | 2021-10-02T15:13:48.000Z | 2021-10-02T15:13:48.000Z | ros_awsiot_agent/src/ros_awsiot_agent/mqtt2ros.py | whill-labs/ros_awsiot | 4c15be53c0643fb81fd5a261a1af5be2652c4166 | [
"MIT"
] | 1 | 2021-10-07T02:11:27.000Z | 2021-10-07T02:11:27.000Z | #!/usr/bin/env python3
import logging
from os.path import expanduser
from typing import Any, Dict
from uuid import uuid4
import rospy
from awsiotclient import mqtt, pubsub
from ros_awsiot_agent import set_module_logger
from rosbridge_library.internal.message_conversion import populate_instance
from rosbridge_library.internal.ros_loader import get_message_class
set_module_logger(modname="awsiotclient", level=logging.WARN)
if __name__ == "__main__":
main()
| 30.320513 | 80 | 0.689641 |
bee627678ed010aec77f469faec38fc6e41f1465 | 5,031 | py | Python | poly/app.py | thdb-theo/Polynomial | 9943ee5eb175ef01720954c6a95c685bd7fd5f6c | [
"MIT"
] | null | null | null | poly/app.py | thdb-theo/Polynomial | 9943ee5eb175ef01720954c6a95c685bd7fd5f6c | [
"MIT"
] | null | null | null | poly/app.py | thdb-theo/Polynomial | 9943ee5eb175ef01720954c6a95c685bd7fd5f6c | [
"MIT"
] | null | null | null | import sys
import re
from PyQt4 import QtGui, QtCore
from polynomial import Polynomial
from rational import Rational
if __name__ == '__main__':
main()
| 33.098684 | 93 | 0.603459 |
bee68e7de68c03f76e1ccae51e5aa678663d50fa | 493 | py | Python | ariadne_server/tests/fixtures/fake_context.py | seanaye/FeatherLight-API | 4d42a424762311ee35b3fd4f689883aa4197eb2e | [
"MIT"
] | 3 | 2020-06-28T17:30:57.000Z | 2022-01-25T18:03:38.000Z | ariadne_server/tests/fixtures/fake_context.py | seanaye/FeatherLight-API | 4d42a424762311ee35b3fd4f689883aa4197eb2e | [
"MIT"
] | null | null | null | ariadne_server/tests/fixtures/fake_context.py | seanaye/FeatherLight-API | 4d42a424762311ee35b3fd4f689883aa4197eb2e | [
"MIT"
] | 1 | 2021-02-04T07:14:08.000Z | 2021-02-04T07:14:08.000Z | from secrets import token_hex
import pytest
| 18.259259 | 47 | 0.584178 |
bee850ee36621b995a6de029e878f2bcfff1b23e | 4,552 | py | Python | libs/evaluation/training_benchmark_database.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | libs/evaluation/training_benchmark_database.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | libs/evaluation/training_benchmark_database.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Julian Bernhard,
# Klemens Esterle, Patrick Hart, Tobias Kessler
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from bark.benchmark.benchmark_result import BenchmarkConfig
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import TrainingBenchmark
from bark.benchmark.benchmark_runner import BenchmarkRunner, BehaviorConfig
| 47.915789 | 133 | 0.65312 |
bee859bef7a37ff661836407bce80f2d3470ddd9 | 27,023 | py | Python | goldstone/tenants/tests_cloud.py | Solinea/goldstone-server | 91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5 | [
"Apache-2.0"
] | 14 | 2015-05-18T22:11:11.000Z | 2020-08-14T06:50:09.000Z | goldstone/tenants/tests_cloud.py | lexjacobs/goldstone-server | 91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5 | [
"Apache-2.0"
] | 568 | 2015-05-17T01:26:36.000Z | 2021-06-10T20:36:47.000Z | goldstone/tenants/tests_cloud.py | lexjacobs/goldstone-server | 91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5 | [
"Apache-2.0"
] | 22 | 2015-05-25T20:16:06.000Z | 2021-08-08T20:25:24.000Z | """Unit tests for /tenants/<id>/cloud endpoints."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.contrib.auth import get_user_model
from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED, \
HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_403_FORBIDDEN, \
HTTP_204_NO_CONTENT
from goldstone.test_utils import Setup, create_and_login, \
AUTHORIZATION_PAYLOAD, CONTENT_BAD_TOKEN, CONTENT_NO_CREDENTIALS, \
check_response_without_uuid, TEST_USER_1, CONTENT_PERMISSION_DENIED, \
BAD_TOKEN, BAD_UUID
from .models import Tenant, Cloud
from .tests_tenants import TENANTS_ID_URL
# HTTP response content.
CONTENT_MISSING_OS_USERNAME = '"username":["This field is required."]'
CONTENT_MISSING_OS_NAME = '"tenant_name":["This field is required."]'
CONTENT_MISSING_OS_PASSWORD = '"password":["This field is required."]'
CONTENT_MISSING_OS_URL = '"auth_url":["This field is required."]'
# URLs used by this module.
TENANTS_ID_CLOUD_URL = TENANTS_ID_URL + "cloud/"
TENANTS_ID_CLOUD_ID_URL = TENANTS_ID_CLOUD_URL + "%s/"
| 41.130898 | 79 | 0.531029 |
bee920effbd17d10746b345bbf080e1ea4ae7a4f | 4,750 | py | Python | onnxruntime/test/server/integration_tests/model_zoo_data_prep.py | PhaniShekhar/onnxruntime | 2663b9c44381b30525ae6234e13ed25c69206d07 | [
"MIT"
] | null | null | null | onnxruntime/test/server/integration_tests/model_zoo_data_prep.py | PhaniShekhar/onnxruntime | 2663b9c44381b30525ae6234e13ed25c69206d07 | [
"MIT"
] | null | null | null | onnxruntime/test/server/integration_tests/model_zoo_data_prep.py | PhaniShekhar/onnxruntime | 2663b9c44381b30525ae6234e13ed25c69206d07 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
import shutil
import onnx
import onnxruntime
import json
from google.protobuf.json_format import MessageToJson
import predict_pb2
import onnx_ml_pb2
# Current models only have one input and one output
if __name__ == '__main__':
model_zoo = os.path.realpath(sys.argv[1])
test_data = os.path.realpath(sys.argv[2])
os.makedirs(test_data, exist_ok=True)
gen_req_resp(model_zoo, test_data)
| 32.758621 | 123 | 0.708842 |
beea57272100654c7600d64caab6b4c5cdc2179e | 2,484 | py | Python | articlequality/feature_lists/tests/test_enwiki.py | mariushoch/articlequality | 57edf786636548bed466aa4e9d9e213fe8d1093b | [
"MIT"
] | null | null | null | articlequality/feature_lists/tests/test_enwiki.py | mariushoch/articlequality | 57edf786636548bed466aa4e9d9e213fe8d1093b | [
"MIT"
] | null | null | null | articlequality/feature_lists/tests/test_enwiki.py | mariushoch/articlequality | 57edf786636548bed466aa4e9d9e213fe8d1093b | [
"MIT"
] | null | null | null | from revscoring.datasources.revision_oriented import revision
from revscoring.dependencies import solve
from .. import enwiki
revision_text = revision.text
| 27 | 76 | 0.654589 |
beead51f0acfbaced2396459fa3fc222015aa766 | 6,984 | py | Python | GPU_compare_CPU.py | kvmu/SFU-workterm | 91c976b094097912e71dd7e0d6207ad8ce7a7e93 | [
"MIT"
] | null | null | null | GPU_compare_CPU.py | kvmu/SFU-workterm | 91c976b094097912e71dd7e0d6207ad8ce7a7e93 | [
"MIT"
] | null | null | null | GPU_compare_CPU.py | kvmu/SFU-workterm | 91c976b094097912e71dd7e0d6207ad8ce7a7e93 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 14:31:17 2015
@author: Kevin M.
Description:
This script does CPU and GPU matrix element time complexity
profiling. It has a function which applies the matrix element
analysis for a given set of parameters, profiles the code and
plots the time complexity results (with fit) and plots the matrix
elements from each case.
"""
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from my_timer import timer
from math import log
from scipy.optimize import curve_fit
def flinear(N, mode):
"""
O(n) function
"""
y = np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
return y ,1
def fsquare(N, mode):
"""
O(n^2) function
"""
for i in range(N):
for j in range(N):
y = i*j
return y,1
def algoAnalysis(fn, nMin, nMax, mode):
"""
Run timer and plot time complexity
"""
n = []
time_result = []
y_result = []
y_err = []
for i in [j*32 for j in range(nMin,nMax+1)]:
with timer() as t:
temp_result, temp_err = fn(i, mode)
time_result.append(t.msecs)
y_result.append(temp_result)
y_err.append(temp_err)
n.append(i)
return n, time_result, y_result, y_err
# main() function
# call main
if __name__ == '__main__':
# matplotlib.rcParams.update({'font.family': 'Zapf Chancery'})
main()
| 29.719149 | 88 | 0.583477 |
beeae2374b9c6c4f75bcbbf6e4da5b6372460220 | 7,714 | py | Python | applications/plugins/Flexible/Flexible_test/scenes/python/reInitMapping.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | applications/plugins/Flexible/Flexible_test/scenes/python/reInitMapping.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | applications/plugins/Flexible/Flexible_test/scenes/python/reInitMapping.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | import Sofa
import SofaTest
import SofaPython.Tools
OBJ = SofaPython.Tools.localPath( __file__, "beam.obj" )
RAW = SofaPython.Tools.localPath( __file__, "beam.raw" )
##Check if calling Mapping::init() change anything
#
#The trick is to know that if the option evaluateShapeFunction is activated
#in the ImageGaussPointSampler then a sampler::bwdInit() must be called
#to update weights using gauss points.
| 47.913043 | 298 | 0.704693 |
beeda21a5090a064572591c96a86d43fd6daf247 | 1,688 | py | Python | mindspore/ops/_register_for_op.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | 1 | 2021-07-16T12:05:53.000Z | 2021-07-16T12:05:53.000Z | mindspore/ops/_register_for_op.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/_register_for_op.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Registry the relation."""
from collections import UserDict
from .primitive import Primitive
| 33.76 | 82 | 0.593009 |
beee49868a956aa3196803cdf539676b921996ae | 11,496 | py | Python | senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import webob
from senlin.api.common import version_request as vr
from senlin.api.common import wsgi
from senlin.api.middleware import version_negotiation as vn
from senlin.common import exception
from senlin.tests.unit.common import base
| 40.336842 | 79 | 0.673626 |
bef071b99c5638f1355cc9be272ba0f93a6cb31f | 493 | py | Python | iot/models.py | kkishans/IOT_DJANGO | 12a19858f002a8c684e4dbb93868a8859d57615f | [
"MIT"
] | null | null | null | iot/models.py | kkishans/IOT_DJANGO | 12a19858f002a8c684e4dbb93868a8859d57615f | [
"MIT"
] | null | null | null | iot/models.py | kkishans/IOT_DJANGO | 12a19858f002a8c684e4dbb93868a8859d57615f | [
"MIT"
] | null | null | null | from django.db import models
from django import forms
# Create your models here.
| 25.947368 | 60 | 0.699797 |
bef0b40fe98f05288d080226293e4d439b57a362 | 2,970 | py | Python | Homework files/Think_Python_Book_Homework_1.py | SillyHatsOnly/Python-Education-Experiments | 22244defc47b4e3ba41af07957a782013afe12b0 | [
"MIT"
] | null | null | null | Homework files/Think_Python_Book_Homework_1.py | SillyHatsOnly/Python-Education-Experiments | 22244defc47b4e3ba41af07957a782013afe12b0 | [
"MIT"
] | null | null | null | Homework files/Think_Python_Book_Homework_1.py | SillyHatsOnly/Python-Education-Experiments | 22244defc47b4e3ba41af07957a782013afe12b0 | [
"MIT"
] | null | null | null |
do_twice(print_spam)
do_twice(print_spam, 'spamm')
do_four(print_spam, "SPAM")
hor_line
hor_line()
hor_line()
hor_line()
square_print()
square_print()
square_print()
double_square
double_square()
double_square()
square_print()
square_print()
| 18.679245 | 47 | 0.476431 |
bef16a350cb321f3059e524b8af8bbcaac507956 | 123 | py | Python | email_log/apps.py | bernd-wechner/django-email-log | dbbe0ef6cee8b8067d6420dccc7a8f2061662a68 | [
"MIT"
] | 26 | 2015-04-14T18:24:54.000Z | 2022-03-07T13:01:34.000Z | email_log/apps.py | bernd-wechner/django-email-log | dbbe0ef6cee8b8067d6420dccc7a8f2061662a68 | [
"MIT"
] | 23 | 2015-06-23T02:40:39.000Z | 2022-02-08T05:07:42.000Z | email_log/apps.py | bernd-wechner/django-email-log | dbbe0ef6cee8b8067d6420dccc7a8f2061662a68 | [
"MIT"
] | 25 | 2015-02-04T16:16:05.000Z | 2021-09-28T10:53:00.000Z | from django.apps import AppConfig
| 17.571429 | 33 | 0.731707 |
bef17e7d48e784a47058c04dd63db533f851c334 | 83 | py | Python | gawain/tests/test_numerics.py | henrywatkins/gawain | c556be20242249504fc0e04a5d3b7168a8369043 | [
"MIT"
] | 1 | 2021-11-20T06:16:13.000Z | 2021-11-20T06:16:13.000Z | gawain/tests/test_numerics.py | henrywatkins/gawain | c556be20242249504fc0e04a5d3b7168a8369043 | [
"MIT"
] | null | null | null | gawain/tests/test_numerics.py | henrywatkins/gawain | c556be20242249504fc0e04a5d3b7168a8369043 | [
"MIT"
] | null | null | null | import pytest
from gawain.numerics import Clock, SolutionVector, MHDSolutionVector
| 27.666667 | 68 | 0.86747 |
bef2574ded37985d33b872832104339ea2dcbc78 | 384 | py | Python | project_9/util.py | sople1/project_9 | 7d91d786533d508572feae1ffbd1b4a6a80208ab | [
"CC0-1.0"
] | null | null | null | project_9/util.py | sople1/project_9 | 7d91d786533d508572feae1ffbd1b4a6a80208ab | [
"CC0-1.0"
] | null | null | null | project_9/util.py | sople1/project_9 | 7d91d786533d508572feae1ffbd1b4a6a80208ab | [
"CC0-1.0"
] | null | null | null | """
utility for project 9
:author: Seongsu Yoon <sople1@snooey.net>
:license: CC0
"""
def clear():
"""
clear cmd/term
:return: void
"""
import os
import sys
if sys.platform == 'win32':
os.system('cls') # on windows
else:
os.system('clear') # on linux / os x
if __name__ == '__main__':
raise Exception("please run main py")
| 14.769231 | 45 | 0.570313 |
bef2add5e5d23f2bc354f97f806087052f88a9fd | 2,383 | py | Python | api/views.py | HigoOliveira/DesafioFinalServer | 284d3cea59f28f6da229345496896106e3d2048a | [
"MIT"
] | null | null | null | api/views.py | HigoOliveira/DesafioFinalServer | 284d3cea59f28f6da229345496896106e3d2048a | [
"MIT"
] | null | null | null | api/views.py | HigoOliveira/DesafioFinalServer | 284d3cea59f28f6da229345496896106e3d2048a | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from .models import User, Event
from .serializer import UserSerializer, EventSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework import status, mixins, generics
from rest_framework.response import Response | 36.661538 | 85 | 0.768779 |
bef317cba640175d733bcaa55e5644bbab2602a1 | 1,582 | py | Python | Searching_Sorting/MinimumMovesToEqualAllArrayElements2.py | PK-100/Competitive_Programming | d0863feaaa99462b2999e85dcf115f7a6c08bb8d | [
"MIT"
] | 70 | 2018-06-25T21:20:15.000Z | 2022-03-24T03:55:17.000Z | Searching_Sorting/MinimumMovesToEqualAllArrayElements2.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 4 | 2018-09-04T13:12:20.000Z | 2021-06-20T08:29:12.000Z | Searching_Sorting/MinimumMovesToEqualAllArrayElements2.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 24 | 2018-12-26T05:15:32.000Z | 2022-01-23T23:04:54.000Z | # LC 462
#from statistics import median
from random import randrange
from math import floor
| 28.763636 | 76 | 0.474083 |
bef32dc0efa2656e8a84216ea747c7b952e1b452 | 43 | py | Python | moban/_version.py | CLiu13/moban | 5deada1af7ff24a6adf698de6a8b589a258d4dc2 | [
"MIT"
] | 1 | 2018-12-16T01:16:22.000Z | 2018-12-16T01:16:22.000Z | moban/_version.py | CLiu13/moban | 5deada1af7ff24a6adf698de6a8b589a258d4dc2 | [
"MIT"
] | null | null | null | moban/_version.py | CLiu13/moban | 5deada1af7ff24a6adf698de6a8b589a258d4dc2 | [
"MIT"
] | null | null | null | __version__ = "0.3.9"
__author__ = "C. W."
| 14.333333 | 21 | 0.604651 |
bef3d9edefde231b494e39df17d4acf13c5e8797 | 666 | py | Python | Gif_Rick/rick_random_gif.py | BiceCold/Citadel_of_Ricks | 72f1a447accc2c11d1fa1cbf3c3342913913e50e | [
"Apache-2.0"
] | 2 | 2018-04-13T17:41:08.000Z | 2018-09-20T22:19:52.000Z | Gif_Rick/rick_random_gif.py | BiceCold/Citadel_of_Ricks | 72f1a447accc2c11d1fa1cbf3c3342913913e50e | [
"Apache-2.0"
] | null | null | null | Gif_Rick/rick_random_gif.py | BiceCold/Citadel_of_Ricks | 72f1a447accc2c11d1fa1cbf3c3342913913e50e | [
"Apache-2.0"
] | null | null | null | import imgurpython
from Environment_Handlers.configs import get_config
import random
client_id = get_config("client_id")
client_secret = get_config("client_secret")
client_refresh_token = get_config("client_refresh")
client_access_token = get_config("client_access_token")
username = 'antipoliticsrick'
client = imgurpython.ImgurClient(client_id, client_secret, client_access_token, client_refresh_token)
# album_ids = client.get_account_album_ids(username, page=0)
img_lst = client.get_album_images('GebVe10')
giflink = []
| 26.64 | 101 | 0.792793 |
bef57e6edf7a67698588bda9e271df4b1e689682 | 131 | py | Python | catalyst/dl/experiment/__init__.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | 3 | 2019-11-02T05:37:06.000Z | 2020-01-13T02:26:07.000Z | catalyst/dl/experiment/__init__.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | null | null | null | catalyst/dl/experiment/__init__.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | 1 | 2021-12-20T07:32:25.000Z | 2021-12-20T07:32:25.000Z | # flake8: noqa
from .base import BaseExperiment
from .config import ConfigExperiment
from .supervised import SupervisedExperiment
| 21.833333 | 44 | 0.839695 |
bef59cacc937701b313af0467ffa47a4f4a7a929 | 3,217 | py | Python | home_directories/Library/Application Support/Alfred 3/Alfred.alfredpreferences/workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py | joekallen/dotfiles | 9318c168fdb9ce0b1066b032e93a7e465d0ffcee | [
"Apache-2.0"
] | null | null | null | home_directories/Library/Application Support/Alfred 3/Alfred.alfredpreferences/workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py | joekallen/dotfiles | 9318c168fdb9ce0b1066b032e93a7e465d0ffcee | [
"Apache-2.0"
] | null | null | null | home_directories/Library/Application Support/Alfred 3/Alfred.alfredpreferences/workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py | joekallen/dotfiles | 9318c168fdb9ce0b1066b032e93a7e465d0ffcee | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import subprocess
from workflow import Workflow
| 27.732759 | 126 | 0.608952 |
bef59fb3dbc590e868cbbe9ba87904ee2be92c5d | 528 | py | Python | dependencies/generate maps/pythongis/app/tk2/__init__.py | karimbahgat/AutoMap | eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f | [
"MIT"
] | 4 | 2015-12-05T14:31:55.000Z | 2018-02-09T05:54:36.000Z | dependencies/generate maps/pythongis/app/tk2/__init__.py | karimbahgat/AutoMap | eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f | [
"MIT"
] | 1 | 2022-01-13T02:52:09.000Z | 2022-01-13T02:52:09.000Z | dependencies/generate maps/pythongis/app/tk2/__init__.py | karimbahgat/AutoMap | eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f | [
"MIT"
] | 1 | 2018-10-24T01:08:11.000Z | 2018-10-24T01:08:11.000Z | """
Tk2
Tk2 is a convenience library for extending the functionality of Tkinter,
to make it easier and more flexible to create GUI applications.
"""
from .basics import *
from .scrollwidgets import *
from .texteditor import Text, MultiTextSearch
from .variables import *
# Later
from .multiwidgets import *
from .progbar import *
from .ribbon import *
#from orderedlist import *
#from calendar import *
from web import *
from . import filedialog
from . import messagebox
from . import colorchooser
from . import dispatch
| 20.307692 | 73 | 0.767045 |
bef5e1efda3dd7f954a3c06d34cbd2bd9106ec13 | 803 | py | Python | tools/deepke/relation_extraction/standard/models/BasicModule.py | dfface/DoctorKG | 6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8 | [
"MIT"
] | 1 | 2022-03-26T16:08:08.000Z | 2022-03-26T16:08:08.000Z | tools/deepke/relation_extraction/standard/models/BasicModule.py | dfface/DoctorKG | 6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8 | [
"MIT"
] | null | null | null | tools/deepke/relation_extraction/standard/models/BasicModule.py | dfface/DoctorKG | 6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8 | [
"MIT"
] | null | null | null | import os
import time
import torch
import torch.nn as nn
| 22.942857 | 85 | 0.555417 |
bef786a72fbb29131b60f5c806a5c2a1d2c1e463 | 3,135 | py | Python | software/nuke/init.py | kei-iketani/plex | cf09c8ef93984e5a69b23bf56248b87e4cfd98b0 | [
"MIT"
] | 153 | 2018-03-22T18:29:17.000Z | 2022-03-07T03:43:09.000Z | software/nuke/init.py | kei-iketani/plex | cf09c8ef93984e5a69b23bf56248b87e4cfd98b0 | [
"MIT"
] | 30 | 2018-08-16T16:27:42.000Z | 2021-02-24T05:37:25.000Z | software/nuke/init.py | alexanderrichter/arPipeline | 3466f70a79e4d32c0647ba21d9689157a0f7772e | [
"MIT"
] | 34 | 2018-03-24T03:54:05.000Z | 2022-03-10T11:36:52.000Z | #*********************************************************************
# content = init Nuke
# version = 0.1.0
# date = 2019-12-01
#
# license = MIT <https://github.com/alexanderrichtertd>
# author = Alexander Richter <alexanderrichtertd.com>
#*********************************************************************
import os
import errno
import nuke
import pipefunc
from tank import Tank
#*********************************************************************
# VARIABLE
TITLE = os.path.splitext(os.path.basename(__file__))[0]
LOG = Tank().log.init(script=TITLE)
PROJECT_DATA = Tank().data_project
RESOLUTION = (' ').join([str(PROJECT_DATA['resolution'][0]),
str(PROJECT_DATA['resolution'][1]),
PROJECT_DATA['name'].replace(' ', '')])
#*********************************************************************
# FOLDER CREATION
#*********************************************************************
# PIPELINE
Tank().init_software()
add_plugin_paths()
try: from scripts import write_node
except: LOG.warning('FAILED loading write_node')
# LOAD paths
try:
for paths in os.getenv('SOFTWARE_SUB_PATH').split(';'):
nuke.pluginAddPath(paths)
except:
LOG.warning('FAILED loading SOFTWARE_SUB_PATH')
print('SETTINGS')
# RESOLUTION *********************************************************************
try:
nuke.addFormat(RESOLUTION)
nuke.knobDefault('Root.format', PROJECT_DATA['name'].replace(' ', ''))
print(' {} ON - {}'.format(chr(254), RESOLUTION))
except:
LOG.error(' OFF - {}'.format(RESOLUTION), exc_info=True)
print(' {} OFF - {}'.format(chr(254), RESOLUTION))
# FPS *********************************************************************
try:
nuke.knobDefault("Root.fps", str(PROJECT_DATA['fps']))
print(' {} ON - {} fps'.format(chr(254), PROJECT_DATA['fps']))
except:
LOG.error(' OFF - {} fps'.format(PROJECT_DATA['fps']), exc_info=True)
print(' {} OFF - {} fps'.format(chr(254), PROJECT_DATA['fps']))
# createFolder *********************************************************************
try:
nuke.addBeforeRender(create_write_dir)
print(' {} ON - create_write_dir (before render)'.format(chr(254)))
except:
LOG.error(' OFF - create_write_dir (before render)'.format(chr(254)), exc_info=True)
print(' {} OFF - create_write_dir (before render)'.format(chr(254)))
print('')
| 30.436893 | 89 | 0.536204 |
bef8b3cc0e57e7d1cb77a57410c13eec81de3df9 | 589 | py | Python | main.py | gaoshanyu/web_ui_test_sample | 8a6cc9b54b5f728af7ef0725dea42d759bd115d0 | [
"MIT"
] | null | null | null | main.py | gaoshanyu/web_ui_test_sample | 8a6cc9b54b5f728af7ef0725dea42d759bd115d0 | [
"MIT"
] | null | null | null | main.py | gaoshanyu/web_ui_test_sample | 8a6cc9b54b5f728af7ef0725dea42d759bd115d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Created at 03/10/2020
__author__ = 'raniys'
import pytest
if __name__ == '__main__':
# -v: verbose; -s: shortcut for --capture=no;
# -m: only run tests matching given mark expression. example: -m 'mark1 and not mark2';
# --html=path: create html report file at given path.
# pytest.main(["-v", "-s", "-m", "smoke", "--html=./reports/smoke_tests_report.html"])
# pytest.main(["-v", "-s", "-m", "sample", "--html=./reports/sample_tests_report.html"])
pytest.main(["-v", "-s", "-m", "search", "--html=./reports/search_tests_report.html"])
| 39.266667 | 92 | 0.614601 |
bef9a72ceb82bbb48832da89c306ea29b20a4752 | 863 | py | Python | rnd/HaskellRSLCompiler/test/parse/test.py | syoyo/lucille | ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa | [
"BSD-3-Clause"
] | 77 | 2015-01-29T21:02:10.000Z | 2022-03-04T11:23:12.000Z | rnd/HaskellRSLCompiler/test/parse/test.py | syoyo/lucille | ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa | [
"BSD-3-Clause"
] | 1 | 2018-11-08T02:11:24.000Z | 2018-11-08T04:31:17.000Z | rnd/HaskellRSLCompiler/test/parse/test.py | syoyo/lucille | ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa | [
"BSD-3-Clause"
] | 13 | 2015-04-20T08:17:29.000Z | 2020-06-17T18:35:06.000Z | #!/usr/bin/env python
import os, sys
import subprocess
import re
import glob
errlog = []
if __name__ == '__main__':
main()
| 17.979167 | 98 | 0.499421 |
befb15db729fb7dcc8145ec21e9b327a9461b95c | 657 | py | Python | glasses/models/classification/base/__init__.py | rentainhe/glasses | 34300a76985c7fc643094fa8d617114926a0ee75 | [
"MIT"
] | 271 | 2020-10-20T12:30:23.000Z | 2022-03-17T03:02:38.000Z | glasses/models/classification/base/__init__.py | rentainhe/glasses | 34300a76985c7fc643094fa8d617114926a0ee75 | [
"MIT"
] | 212 | 2020-07-25T13:02:23.000Z | 2022-02-20T10:33:32.000Z | glasses/models/classification/base/__init__.py | rentainhe/glasses | 34300a76985c7fc643094fa8d617114926a0ee75 | [
"MIT"
] | 23 | 2021-01-03T13:53:36.000Z | 2022-03-17T05:40:34.000Z | from torch import Tensor, nn
from ...base import VisionModule
| 21.9 | 65 | 0.584475 |
befc1052790c2cb39af3f31238e68ac4213b7a50 | 3,202 | py | Python | lib/data.py | PEDIA-Charite/classifier | 13e9d6108f9691b089aac59c7392f7940033b8af | [
"MIT"
] | 2 | 2019-04-04T03:44:25.000Z | 2019-12-23T17:08:51.000Z | lib/data.py | PEDIA-Charite/classifier | 13e9d6108f9691b089aac59c7392f7940033b8af | [
"MIT"
] | 9 | 2017-05-23T09:55:15.000Z | 2019-11-22T11:24:20.000Z | lib/data.py | PEDIA-Charite/classifier | 13e9d6108f9691b089aac59c7392f7940033b8af | [
"MIT"
] | 2 | 2017-05-24T12:23:13.000Z | 2019-09-03T08:36:18.000Z | # -*- coding: utf-8 -*-
import os
import numpy as np
import sys
import logging
import csv
# Setup logging
logger = logging.getLogger(__name__)
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%m-%d %H:%M')
console_handle.setFormatter(formatter)
logger.addHandler(console_handle)
| 37.232558 | 135 | 0.553716 |