hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8e1956c9e02704f82448e09bd95db729640c5f1 | 18,721 | py | Python | python/temp/yolo_main.py | plasticanne/unity-object-detection-zoo | a436aec8fd6b9b4067aafc20706e7d1896223d64 | [
"MIT"
] | null | null | null | python/temp/yolo_main.py | plasticanne/unity-object-detection-zoo | a436aec8fd6b9b4067aafc20706e7d1896223d64 | [
"MIT"
] | null | null | null | python/temp/yolo_main.py | plasticanne/unity-object-detection-zoo | a436aec8fd6b9b4067aafc20706e7d1896223d64 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import os
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
import cv2
from keras import backend as K
from keras.layers import Input, Lambda
from keras.models import Model, Sequential, load_model
from keras.utils import multi_gpu_model
from tensorflow.image import ResizeMethod
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
import colorsys
from timeit import default_timer as timer
from yolo3 import utils
from yolo3.model import tiny_yolo_body, yolo_body, yolo_eval,yolo_eval2
def tf_letterbox_image(size, image):
'''resize image with unchanged aspect ratio using padding'''
new_image = tf.image.resize_image_with_pad(
image,
target_height=size[1],
target_width=size[0],
method=ResizeMethod.BICUBIC
)
return new_image
def cv2_letterbox_image(img_path, size):
'''resize image with unchanged aspect ratio using padding'''
im = cv2.imread(img_path)
old_size = im.shape[:2] # old_size is in (height, width) format
ratio_w = float(size[1])/old_size[1]
ratio_h = float(size[0])/old_size[0]
ratio=min(ratio_h,ratio_w)
new_size = tuple([int(x*ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = size[1] - new_size[1]
delta_h = size[0] - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
color = [0, 0, 0]
new_image = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
return new_image
if __name__ == '__main__':
# loading model from:
# 0: h5
# 1: freezed unity interface pb
# 2: unity interface meta
# 3: blider & h5 weights
model_load_from = 0
# args
MODEL_h5_path = 'model_data/yolo.h5'
MODEL_pb_path = 'model_data/freezed_coco_yolo.pb'
ANCHORS_path = 'model_data/yolo_anchors.txt'
CLASSES_path = 'model_data/coco_classes.txt'
CLASSES_num = 80
MODEL_meta_folder = ""
MODEL_weight_h5_path = ""
# classify score threshold, value will be fixed to output freezed
MODEL_score_threshold = 0.1
IOU_threshold = 0.1 # yolo iou box filter, value will be fixed to output freezed
GPU_num = 1 # video cards count , cpu version or gpu version with counts will fixed after convert to pb graph
# doing detection:
# 0: no action
# 1: img
# 2: video
do_detect = 1
# args
IMG_path = 'demo/car_cat.jpg'
VIDEO_path = 'demo/Raccoon.mp4'
OUTPUT_video = ""
DRAW_score_threshold = 0.1 # score filter for draw boxes
# (height,width) 'Multiples of 32 required' , resize input to model
FORCE_image_resize = (416, 416)
# keras h5 convert to freezed graph output:
# 0: no action
# 1: h5-->freezed pb
# 2: h5-->meta
do_output_freezed_unity_interface = 0
# args
OUTPUT_pb_path = "./model_data"
OUTPUT_pb_file = "freezed_coco_yolo.pb"
OUTPUT_meta_folder = ""
OUTPUT_meta_file_name = ""
K.clear_session()
with K.get_session() as sess:
yolo = YOLO(CLASSES_num, ANCHORS_path, sess)
if model_load_from == 0:
yolo.load_model_by_h5(
MODEL_h5_path, MODEL_score_threshold, IOU_threshold, GPU_num)
elif model_load_from == 1:
yolo.load_model_by_pb(MODEL_pb_path)
elif model_load_from == 2:
yolo.load_model_by_meta(MODEL_meta_folder)
elif model_load_from == 3:
yolo.load_model_by_buider(MODEL_weight_h5_path)
yolo.get_nodes()
if model_load_from == 0:
if do_output_freezed_unity_interface == 1:
yolo.write_pb(OUTPUT_pb_path, OUTPUT_pb_file)
elif do_output_freezed_unity_interface == 2:
yolo.write_meta(OUTPUT_meta_folder, OUTPUT_meta_file_name)
else:
if do_output_freezed_unity_interface != 0:
print("for output, model must loading from .h5")
if do_detect == 1:
detect_image(yolo, IMG_path,CLASSES_path, DRAW_score_threshold,
FORCE_image_resize)
elif do_detect == 2:
detect_video(yolo, VIDEO_path,CLASSES_path, DRAW_score_threshold,
FORCE_image_resize, OUTPUT_video)
| 41.418142 | 116 | 0.633834 |
b8e2aaafc2b4702776593b03b7fea1abb7e1b4d0 | 3,262 | py | Python | src/extractor-lib/tests/csv_generation/test_normalized_directory_template.py | stephenfuqua/Ed-Fi-X-Fizz | 94597eda585d4f62f69c12e2a58fa8e8846db11b | [
"Apache-2.0"
] | 3 | 2020-10-15T10:29:59.000Z | 2020-12-01T21:40:55.000Z | src/extractor-lib/tests/csv_generation/test_normalized_directory_template.py | stephenfuqua/Ed-Fi-X-Fizz | 94597eda585d4f62f69c12e2a58fa8e8846db11b | [
"Apache-2.0"
] | 40 | 2020-08-17T21:08:33.000Z | 2021-02-02T19:56:09.000Z | src/extractor-lib/tests/csv_generation/test_normalized_directory_template.py | stephenfuqua/Ed-Fi-X-Fizz | 94597eda585d4f62f69c12e2a58fa8e8846db11b | [
"Apache-2.0"
] | 10 | 2021-06-10T16:27:27.000Z | 2021-12-27T12:31:57.000Z | # SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from os import path
from sys import platform
from edfi_lms_extractor_lib.csv_generation.write import (
_normalized_directory_template,
USERS_ROOT_DIRECTORY,
ASSIGNMENT_ROOT_DIRECTORY,
SUBMISSION_ROOT_DIRECTORY,
)
OUTPUT_DIRECTORY = "output_directory"
OUTPUT_DIRECTORY_WITH_SLASH = "output_directory/"
OUTPUT_DIRECTORY_WITH_BACKSLASH = "output_directory\\"
| 30.773585 | 87 | 0.701104 |
b8e2f0eed3c941ac36abbbe75adbed48e0a9d358 | 425 | py | Python | python3-tutorial/02 Advanced/1216 UpdateMany.py | CoderDream/python-best-practice | 40e6b5315daefb37c59daa1a1990ac1ae10f8cca | [
"MIT"
] | null | null | null | python3-tutorial/02 Advanced/1216 UpdateMany.py | CoderDream/python-best-practice | 40e6b5315daefb37c59daa1a1990ac1ae10f8cca | [
"MIT"
] | null | null | null | python3-tutorial/02 Advanced/1216 UpdateMany.py | CoderDream/python-best-practice | 40e6b5315daefb37c59daa1a1990ac1ae10f8cca | [
"MIT"
] | null | null | null | # update_one() update_many()
# F name alexa 123
import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["runoobdb"]
mycol = mydb["sites"]
myquery = {"name": {"$regex": "^F"}}
newvalues = {"$set": {"alexa": "123"}}
x = mycol.update_many(myquery, newvalues)
print(x.modified_count, "")
#
#
# 1
#
| 20.238095 | 63 | 0.694118 |
b8e31fa93df9ea85fa09d4f2fd6acdf91de443e9 | 789 | py | Python | search/linear/linear_search.py | alfiejsmith/algorithms | c1d816aba932a1ae0664ff2a5b7784e2a01e1de2 | [
"MIT"
] | null | null | null | search/linear/linear_search.py | alfiejsmith/algorithms | c1d816aba932a1ae0664ff2a5b7784e2a01e1de2 | [
"MIT"
] | null | null | null | search/linear/linear_search.py | alfiejsmith/algorithms | c1d816aba932a1ae0664ff2a5b7784e2a01e1de2 | [
"MIT"
] | null | null | null | from random import shuffle
"""
Will search a list of integers for a value using a linear search algorithm.
Does not require a sorted list to be passed in.
Returns -1 if item is not found
Linear Search:
Best - O(1)
Worst - O(n)
Average - O(n)
Space Complexity - O(1)
"""
| 22.542857 | 86 | 0.636248 |
b8e38e1d075d3a7559a30980f5c79e4ab5617467 | 3,657 | py | Python | gitScrabber/scrabTasks/git/projectDates.py | Eyenseo/gitScrabber | e3f5ce1a7b034fa3e40a54577268228a3be2b141 | [
"MIT"
] | null | null | null | gitScrabber/scrabTasks/git/projectDates.py | Eyenseo/gitScrabber | e3f5ce1a7b034fa3e40a54577268228a3be2b141 | [
"MIT"
] | null | null | null | gitScrabber/scrabTasks/git/projectDates.py | Eyenseo/gitScrabber | e3f5ce1a7b034fa3e40a54577268228a3be2b141 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2017 Roland Jaeger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ..scrabTask import GitTask
import utils
name = "ProjectDates"
version = "1.1.0"
| 37.316327 | 80 | 0.623462 |
b8e396ee442faafcbc18f8f10aa0618271fca39e | 3,526 | py | Python | demo_maecce_for_pls.py | hkaneko1985/dcek | 13d9228b2dc2fd87c2e08a01721e1b1b220f2e19 | [
"MIT"
] | 25 | 2019-08-23T12:39:14.000Z | 2022-03-30T08:58:15.000Z | demo_maecce_for_pls.py | hkaneko1985/dcek | 13d9228b2dc2fd87c2e08a01721e1b1b220f2e19 | [
"MIT"
] | 2 | 2022-01-06T11:21:21.000Z | 2022-01-18T22:11:12.000Z | demo_maecce_for_pls.py | hkaneko1985/dcek | 13d9228b2dc2fd87c2e08a01721e1b1b220f2e19 | [
"MIT"
] | 16 | 2019-12-12T08:20:48.000Z | 2022-01-26T00:34:31.000Z | # -*- coding: utf-8 -*-
# %reset -f
"""
@author: Hiromasa Kaneko
"""
# Demonstration of MAEcce in PLS modeling
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
from dcekit.validation import mae_cce
from sklearn import datasets
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import GridSearchCV, train_test_split
# settings
number_of_training_samples = 50 # 30, 50, 100, 300, 500, 1000, 3000, for example
number_of_test_samples = 10000
number_of_x_variables = 30 # 10, 30, 50, 100, 300, 500, 1000, 3000, for example
number_of_y_randomization = 50
max_pls_component_number = 20
fold_number = 5
# generate sample dataset
x, y = datasets.make_regression(n_samples=number_of_training_samples + number_of_test_samples,
n_features=number_of_x_variables, n_informative=10, noise=30,
random_state=number_of_training_samples + number_of_x_variables)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=number_of_test_samples, random_state=0)
# autoscaling
autoscaled_x_train = (x_train - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)
autoscaled_y_train = (y_train - y_train.mean()) / y_train.std(ddof=1)
autoscaled_x_test = (x_test - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)
# cross-validation
pls_components = np.arange(1, max_pls_component_number + 1)
cv_model = GridSearchCV(PLSRegression(), {'n_components': pls_components}, cv=fold_number)
cv_model.fit(autoscaled_x_train, autoscaled_y_train)
# modeling and prediction
model = getattr(cv_model, 'estimator')
hyperparameters = list(cv_model.best_params_.keys())
for hyperparameter in hyperparameters:
setattr(model, hyperparameter, cv_model.best_params_[hyperparameter])
model.fit(autoscaled_x_train, autoscaled_y_train)
estimated_y_train = np.ndarray.flatten(model.predict(autoscaled_x_train))
estimated_y_train = estimated_y_train * y_train.std(ddof=1) + y_train.mean()
predicted_y_test = np.ndarray.flatten(model.predict(autoscaled_x_test))
predicted_y_test = predicted_y_test * y_train.std(ddof=1) + y_train.mean()
# MAEcce
mae_cce_train = mae_cce(cv_model, x_train, y_train, number_of_y_randomization=number_of_y_randomization, do_autoscaling=True, random_state=0)
# yy-plot for test data
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_test, predicted_y_test)
y_max = np.max(np.array([np.array(y_test), predicted_y_test]))
y_min = np.min(np.array([np.array(y_test), predicted_y_test]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Estimated Y')
plt.show()
# r2p, RMSEp, MAEp for test data
print('r2p: {0}'.format(float(1 - sum((y_test - predicted_y_test) ** 2) / sum((y_test - y_test.mean()) ** 2))))
print('RMSEp: {0}'.format(float((sum((y_test - predicted_y_test) ** 2) / len(y_test)) ** 0.5)))
mae_test = float(sum(abs(y_test - predicted_y_test)) / len(y_test))
print('MAEp: {0}'.format(mae_test))
# histgram of MAEcce
plt.rcParams["font.size"] = 18
plt.hist(mae_cce_train, bins=30)
plt.plot(mae_test, 0.2, 'r.', markersize=30)
plt.xlabel('MAEcce(histgram), MAEp(red point)')
plt.ylabel('frequency')
plt.show()
| 44.632911 | 142 | 0.723199 |
b8e5c7f7a18f5689f0dfad89a71f45469022396b | 151,828 | py | Python | bot.py | admica/evediscobot | 3ece4cd65718ba5d62ef0beab80f1793ac96aa3a | [
"MIT"
] | null | null | null | bot.py | admica/evediscobot | 3ece4cd65718ba5d62ef0beab80f1793ac96aa3a | [
"MIT"
] | null | null | null | bot.py | admica/evediscobot | 3ece4cd65718ba5d62ef0beab80f1793ac96aa3a | [
"MIT"
] | null | null | null | #!/home/admica/python3/bin/python3
#Discord eve bot by admica
import asyncio, discord, time, threading, websocket, json
from discord.ext import commands
from discord.ext.commands import Bot
import aiohttp
import re
from queue import Queue
from datetime import timedelta
from datetime import datetime
import os, sys
import requests
from chatterbot import ChatBot
from ctypes.util import find_library
from random import randint
import pickle
from tensorflow.python.keras.layers import Dense, Reshape, Flatten, Dropout, Input, concatenate
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose, Activation
from keras.layers import Input, Embedding, LSTM, Dense, RepeatVector, Dropout, merge,concatenate
from keras.optimizers import Adam
from keras.models import Model, Sequential
from keras.layers import Activation, Dense
from keras.preprocessing import sequence
from six.moves import input
import numpy as np
REDO = 'redo'
VOCAB = '/usr/share/dict/cracklib-small'
NUMBERWORD = {1: 'Thousand', 2: 'Million', 3: 'Billion', 4: 'Trillion', 0: 'Hundred', 5: 'Quadrillion', 6: 'Quintillion', 7: 'Sextillion', 8: 'Septillion', 9: 'Octillion'}
#############################################################
#############################################################
import time
time.sleep(1)
bot = Zbot()
try:
bot.start()
bot.start_timer() # periodic server status update of with pilots online and total kills
bot.run()
except Exception as e:
print("FATAILITY IN MAIN: {}".format(e))
bot.do_restart()
| 44.892963 | 671 | 0.42726 |
b8e66118386395c82079c492edb8b95513d242cf | 18,796 | py | Python | tests/help_text_test.py | equinor/osdu-cli | 579922556925ea7ad759a6230498378cf724b445 | [
"MIT"
] | 3 | 2021-08-19T05:59:39.000Z | 2021-11-10T08:02:58.000Z | tests/help_text_test.py | equinor/osdu-cli | 579922556925ea7ad759a6230498378cf724b445 | [
"MIT"
] | 2 | 2021-09-13T11:10:15.000Z | 2021-11-25T13:21:54.000Z | tests/help_text_test.py | equinor/osdu-cli | 579922556925ea7ad759a6230498378cf724b445 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright (c) Equinor ASA. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Tests that -h does not return error and has all required text.
This only tests for commands/subgroups which are specified in this file.
This does not test the correctness of help text content."""
import unittest
from subprocess import PIPE, Popen
if __name__ == "__main__":
import nose2
nose2.main()
| 40.508621 | 114 | 0.546606 |
b8e81060803693ffd42ace6d2aecd7a9dd90f046 | 417 | py | Python | testing/resources/test_g.py | tongni1975/processing.py | 0b9ad68a1dc289d5042d1d3b132c13cc157d3f88 | [
"Apache-2.0"
] | null | null | null | testing/resources/test_g.py | tongni1975/processing.py | 0b9ad68a1dc289d5042d1d3b132c13cc157d3f88 | [
"Apache-2.0"
] | 1 | 2021-06-25T15:36:38.000Z | 2021-06-25T15:36:38.000Z | testing/resources/test_g.py | tongni1975/processing.py | 0b9ad68a1dc289d5042d1d3b132c13cc157d3f88 | [
"Apache-2.0"
] | null | null | null | import processing.opengl.PGraphics3D
| 26.0625 | 70 | 0.654676 |
b8e9a8b69a6237c573c52a972df1c7ef664eba25 | 4,811 | py | Python | scripts/experiments/intrinsic_evaluations/exbert/server/data/processing/create_faiss.py | antoilouis/netbert | ccd37ef8a1727557de74498132eea24db2135940 | [
"MIT"
] | 2 | 2021-01-29T01:30:51.000Z | 2021-07-14T16:47:15.000Z | server/data/processing/create_faiss.py | CharlotteSean/exbert | 75e6bb146ab799e3652a887562490d5f31357223 | [
"Apache-2.0"
] | null | null | null | server/data/processing/create_faiss.py | CharlotteSean/exbert | 75e6bb146ab799e3652a887562490d5f31357223 | [
"Apache-2.0"
] | 1 | 2020-03-04T14:02:28.000Z | 2020-03-04T14:02:28.000Z | import faiss
import numpy as np
import utils.path_fixes as pf
from pathlib import Path
from data.processing.corpus_embeddings import CorpusEmbeddings
from functools import partial
import argparse
FAISS_LAYER_PATTERN = 'layer_*.faiss'
LAYER_TEMPLATE = 'layer_{:02d}.faiss'
NLAYERS = 12
NHEADS = 12
def train_indexes(ce:CorpusEmbeddings, stepsize=100):
"""
Parameters:
===========
- corpus_embedding: Wrapper around HDF5 file for easy access to data
- stepsize: How many sentences to train with at once
"""
indexes = [faiss.IndexFlatIP(ce.embedding_dim) for i in range(ce.n_layers)]
for ix in range(0, len(ce), stepsize):
cdata = ce[ix:ix+stepsize]
for i in range(ce.n_layers):
indexes[i].add(cdata[i])
return indexes
def save_indexes(idxs, outdir, base_name=LAYER_TEMPLATE):
"""Save the faiss index into a file for each index in idxs"""
out_name = str(Path(outdir) / base_name)
for i, idx in enumerate(idxs):
faiss.write_index(idx, out_name.format(i))
def create_mask(head_size, n_heads, selected_heads):
"""Create a mask that indicates how the size of the head and the number of those heads
in a transformer model.
This allows easy masking of heads you don't want to search for
"""
mask = np.zeros(n_heads)
for h in selected_heads:
mask[int(h)] = 1
return np.repeat(mask, head_size)
default_masks = {
'bert-base-uncased': partial(create_mask, 64, 12)
}
base_mask = default_masks['bert-base-uncased']
if __name__ == "__main__":
# Creating the indices for both the context and embeddings
args = parse_args()
main(args.directory) | 33.17931 | 134 | 0.672833 |
b8e9db6f289a79604e54db518d87b8a53a1a0672 | 504 | py | Python | weasyl/test/test_http.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | 111 | 2016-05-18T04:18:18.000Z | 2021-11-03T02:05:19.000Z | weasyl/test/test_http.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | 1,103 | 2016-05-29T05:17:53.000Z | 2022-03-31T18:12:40.000Z | weasyl/test/test_http.py | TheWug/weasyl | a568a542cc58c11e30621fb672c701531d4306a8 | [
"Apache-2.0"
] | 47 | 2016-05-29T20:48:37.000Z | 2021-11-12T09:40:40.000Z | import pytest
from weasyl import http
| 29.647059 | 102 | 0.603175 |
b8ea0aefe02a0ac8e734a613a8836ee2fbeec6cf | 421 | py | Python | chords/neural_network/classifier.py | fernando-figueredo/ChordsWebApp | 9bf983ab5579c36c75447c74eec0400d78ab49f9 | [
"MIT"
] | 2 | 2021-03-30T01:09:51.000Z | 2022-03-10T21:17:15.000Z | chords/neural_network/classifier.py | fernando-figueredo/ChordsWebApp | 9bf983ab5579c36c75447c74eec0400d78ab49f9 | [
"MIT"
] | null | null | null | chords/neural_network/classifier.py | fernando-figueredo/ChordsWebApp | 9bf983ab5579c36c75447c74eec0400d78ab49f9 | [
"MIT"
] | null | null | null | from neural_network.train import Trainer | 28.066667 | 59 | 0.643705 |
b8ea2be5c0eee4133b1b628fc992cd2fbe84768f | 556 | py | Python | cybox/common/metadata.py | tirkarthi/python-cybox | a378deb68b3ac56360c5cc35ff5aad1cd3dcab83 | [
"BSD-3-Clause"
] | 40 | 2015-03-05T18:22:51.000Z | 2022-03-06T07:29:25.000Z | cybox/common/metadata.py | tirkarthi/python-cybox | a378deb68b3ac56360c5cc35ff5aad1cd3dcab83 | [
"BSD-3-Clause"
] | 106 | 2015-01-12T18:52:20.000Z | 2021-04-25T22:57:52.000Z | cybox/common/metadata.py | tirkarthi/python-cybox | a378deb68b3ac56360c5cc35ff5aad1cd3dcab83 | [
"BSD-3-Clause"
] | 30 | 2015-03-25T07:24:40.000Z | 2021-07-23T17:10:11.000Z | # Copyright (c) 2020, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox.bindings.cybox_common as common_binding
| 32.705882 | 99 | 0.753597 |
b8ecff777a101fecf5e77b7561d2d3b4b1ad0ea3 | 972 | py | Python | src/app/main/routes.py | Abh4git/PythonMongoService | f64fcb7c4db0db41adb8b74736c82e8de5f6dbec | [
"MIT"
] | null | null | null | src/app/main/routes.py | Abh4git/PythonMongoService | f64fcb7c4db0db41adb8b74736c82e8de5f6dbec | [
"MIT"
] | null | null | null | src/app/main/routes.py | Abh4git/PythonMongoService | f64fcb7c4db0db41adb8b74736c82e8de5f6dbec | [
"MIT"
] | null | null | null | #All Routes are defined here
from flask_cors import CORS, cross_origin
from app.main.controller.products import ProductController
from flask import request, jsonify
import json
#Test route without any connections
api_v2_cors_config = {
"origins": [
'http://localhost:3000' # React
# React
],
"methods": ["OPTIONS", "GET", "POST"],
"allow_headers": ["Authorization", "Content-Type"]
}
#route returning Products list
#route for products list filtered by product types
| 29.454545 | 92 | 0.737654 |
b8ed5ea88b3e1f4c3f96f668efbaca32325efa0f | 6,850 | py | Python | tests/test_user.py | ccfiel/fbchat-asyncio | 4ba39a835c7374c2cbf2a34e4e4fbf5c60ce6891 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T14:44:05.000Z | 2019-11-02T14:44:05.000Z | tests/test_user.py | ccfiel/fbchat-asyncio | 4ba39a835c7374c2cbf2a34e4e4fbf5c60ce6891 | [
"BSD-3-Clause"
] | null | null | null | tests/test_user.py | ccfiel/fbchat-asyncio | 4ba39a835c7374c2cbf2a34e4e4fbf5c60ce6891 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import datetime
from fbchat._user import User, ActiveStatus
| 35.128205 | 88 | 0.489927 |
b8ed8469a90e01bd0b314d93c23d97aa1b93965d | 143 | py | Python | (3)Algorithms/operator_boolean.py | mass9/Python | 66499164e36a4fe9630029d34b292ab06f849b2f | [
"MIT"
] | null | null | null | (3)Algorithms/operator_boolean.py | mass9/Python | 66499164e36a4fe9630029d34b292ab06f849b2f | [
"MIT"
] | null | null | null | (3)Algorithms/operator_boolean.py | mass9/Python | 66499164e36a4fe9630029d34b292ab06f849b2f | [
"MIT"
] | null | null | null | from operator import*
a = -1
b = 5
print('a= ',a)
print('b= ',b)
print()
print(not_(a))
print(truth(a))
print(is_(a,b))
print(is_not(a,b))
| 9.533333 | 21 | 0.594406 |
b8edaac684aec68ed9d6e7241e67d70248284354 | 1,903 | py | Python | nicos_mlz/erwin/setups/system.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/erwin/setups/system.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_mlz/erwin/setups/system.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'system setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'localhost',
instrument = 'ErWIN',
experiment = 'Exp',
datasinks = ['conssink', 'dmnsink'],
notifiers = [],
)
modules = ['nicos.commands.standard']
devices = dict(
ErWIN = device('nicos.devices.instrument.Instrument',
description = 'ErWIN instrument',
instrument = 'ErWIN',
responsible = 'Michael Heere <michael.heere@kit.edu>',
website = 'https://mlz-garching.de/erwin',
operators = [
'Karlsruhe Institute of Technology (KIT)',
],
),
Sample = device('nicos.devices.sample.Sample',
description = 'sample object',
),
Exp = device('nicos_mlz.devices.experiment.Experiment',
description = 'experiment object',
dataroot = 'data',
sample = 'Sample',
reporttemplate = '',
sendmail = False,
serviceexp = 'p0',
mailsender = 'erwin@frm2.tum.de',
mailserver = 'mailhost.frm2.tum.de',
elog = True,
managerights = dict(
enableDirMode = 0o775,
enableFileMode = 0o644,
disableDirMode = 0o550,
disableFileMode = 0o440,
owner = 'erwin',
group = 'erwin'
),
),
filesink = device('nicos.devices.datasinks.AsciiScanfileSink'),
conssink = device('nicos.devices.datasinks.ConsoleScanSink'),
dmnsink = device('nicos.devices.datasinks.DaemonSink'),
Space = device('nicos.devices.generic.FreeSpace',
description = 'The amount of free space for storing data',
warnlimits = (5., None),
path = None,
minfree = 5,
),
LogSpace = device('nicos.devices.generic.FreeSpace',
description = 'Space on log drive',
path = 'log',
warnlimits = (.5, None),
minfree = 0.5,
lowlevel = True,
),
)
| 29.734375 | 67 | 0.575933 |
b8eeeede3579cb2a1baac69df57edebe5d6b3dd1 | 1,771 | py | Python | clustering_normalized_cuts/run.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | 7 | 2020-03-15T12:14:07.000Z | 2021-12-01T07:01:09.000Z | clustering_normalized_cuts/run.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | clustering_normalized_cuts/run.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the code for Clustering using our CNC framework."""
from __future__ import division
import collections
import os
from absl import app
from absl import flags
from clustering_normalized_cuts import setup
from clustering_normalized_cuts.cnc_net import run_net
from clustering_normalized_cuts.data_loader import get_data
flags.adopt_module_key_flags(setup)
FLAGS = flags.FLAGS
# SELECT GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
if __name__ == '__main__':
app.run(main)
| 29.032787 | 79 | 0.749294 |
b8ef33ed1947340aa880647a993de9c30d1767e8 | 4,029 | py | Python | remps/policy/gaussian.py | albertometelli/remps | d243d4f23c4b8de5220788853c8e2dd5852e593e | [
"MIT"
] | 6 | 2019-06-17T15:13:45.000Z | 2020-08-27T10:09:16.000Z | remps/policy/gaussian.py | albertometelli/remps | d243d4f23c4b8de5220788853c8e2dd5852e593e | [
"MIT"
] | 13 | 2020-01-28T22:43:36.000Z | 2022-03-11T23:46:19.000Z | remps/policy/gaussian.py | albertometelli/remps | d243d4f23c4b8de5220788853c8e2dd5852e593e | [
"MIT"
] | 1 | 2019-08-11T22:41:59.000Z | 2019-08-11T22:41:59.000Z | import tensorflow as tf
from remps.policy.policy import Policy
from remps.utils.utils import get_default_tf_dtype
| 35.034783 | 99 | 0.516505 |
b8f05419337e887d574b7c6ff46bba2da204e4eb | 921 | py | Python | rrr.py | tutacat/beep-play | 41b50ebb0250289616cf3a4839fd0097d524ebd7 | [
"BSD-2-Clause"
] | null | null | null | rrr.py | tutacat/beep-play | 41b50ebb0250289616cf3a4839fd0097d524ebd7 | [
"BSD-2-Clause"
] | null | null | null | rrr.py | tutacat/beep-play | 41b50ebb0250289616cf3a4839fd0097d524ebd7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import math, random, subprocess, time
sin=math.sin
commands=["/usr/bin/setterm","/usr/bin/xset"]
fname = ""
file = None
type = None
_test = ""
cmd = None
for c in commands:
_test = subprocess.getoutput("setterm --blength 256")
if not _test:
raise SystemError(c+" error")
if _test.find("not support")<0 and _test.find("error")<0:
cmd=c
break
else:
setterm=False
setterm=cmd==commands[0]
if not cmd:
raise SystemError("No supported command ("+",".join(commands)+")")
i=0
while 1:
note=sin(i*.1)*9+60
subprocess.run(( cmd,"--bfreq" if setterm else "b", str(round(2**((note-69)/12)*440)), "--blength" if setterm else "", str(round(100)) ))
print(end="\a",flush=True)
time.sleep(0.1)
i+=1
subprocess.run(( cmd,"--bfreq" if setterm else "b", "400", "--blength" if setterm else "", "200" ))
| 28.78125 | 141 | 0.624321 |
b8f101cbd2a4876f4d335fd3cc77c990454b6aca | 26,558 | py | Python | pygamma_agreement/continuum.py | faroit/pygamma-agreement | fcfcfe7332be15bd97e71b9987aa5c6104be299e | [
"MIT"
] | null | null | null | pygamma_agreement/continuum.py | faroit/pygamma-agreement | fcfcfe7332be15bd97e71b9987aa5c6104be299e | [
"MIT"
] | null | null | null | pygamma_agreement/continuum.py | faroit/pygamma-agreement | fcfcfe7332be15bd97e71b9987aa5c6104be299e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2020 CoML
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Rachid RIAD & Hadrien TITEUX
"""
##########
Continuum and corpus
##########
"""
import csv
import logging
import random
from copy import deepcopy
from functools import total_ordering
from pathlib import Path
from typing import Optional, Tuple, List, Union, Set, Iterable, TYPE_CHECKING, Dict
import cvxpy as cp
import numpy as np
from dataclasses import dataclass
from pyannote.core import Annotation, Segment, Timeline
from pyannote.database.util import load_rttm
from sortedcontainers import SortedDict, SortedSet
from typing_extensions import Literal
from .dissimilarity import AbstractDissimilarity
from .numba_utils import chunked_cartesian_product
if TYPE_CHECKING:
from .alignment import UnitaryAlignment, Alignment
CHUNK_SIZE = 2 ** 25
# defining Annotator type
Annotator = str
PivotType = Literal["float_pivot", "int_pivot"]
PrecisionLevel = Literal["high", "medium", "low"]
# percentages for the precision
PRECISION_LEVEL = {
"high": 0.01,
"medium": 0.02,
"low": 0.1
}
def copy(self) -> 'Continuum':
"""
Makes a copy of the current continuum.
Returns
-------
continuum: Continuum
"""
continuum = Continuum(self.uri)
continuum._annotations = deepcopy(self._annotations)
return continuum
def __bool__(self):
"""Truthiness, basically tests for emptiness
>>> if continuum:
... # continuum is not empty
... else:
... # continuum is empty
"""
return len(self._annotations) > 0
def __len__(self):
return len(self._annotations)
def add(self, annotator: Annotator, segment: Segment, annotation: Optional[str] = None):
"""
Add a segment to the continuum
Parameters
----------
annotator: str
The annotator that produced the added annotation
segment: `pyannote.core.Segment`
The segment for that annotation
annotation: optional str
That segment's annotation, if any.
"""
if segment.duration == 0.0:
raise ValueError("Tried adding segment of duration 0.0")
if annotator not in self._annotations:
self._annotations[annotator] = SortedSet()
self._annotations[annotator].add(Unit(segment, annotation))
# units array has to be updated, nullifying
if self._alignments_disorders is not None:
self._chosen_alignments = None
self._alignments_disorders = None
def add_annotation(self, annotator: Annotator, annotation: Annotation):
"""
Add a full pyannote annotation to the continuum.
Parameters
----------
annotator: str
A string id for the annotator who produced that annotation.
annotation: :class:`pyannote.core.Annotation`
A pyannote `Annotation` object. If a label is present for a given
segment, it will be considered as that label's annotation.
"""
for segment, _, label in annotation.itertracks(yield_label=True):
self.add(annotator, segment, label)
def add_timeline(self, annotator: Annotator, timeline: Timeline):
"""
Add a full pyannote timeline to the continuum.
Parameters
----------
annotator: str
A string id for the annotator who produced that timeline.
timeline: `pyannote.core.Timeline`
A pyannote `Annotation` object. No annotation will be attached to
segments.
"""
for segment in timeline:
self.add(annotator, segment)
def add_textgrid(self,
annotator: Annotator,
tg_path: Union[str, Path],
selected_tiers: Optional[List[str]] = None,
use_tier_as_annotation: bool = False):
"""
Add a textgrid file's content to the Continuum
Parameters
----------
annotator: str
A string id for the annotator who produced that TextGrid.
tg_path: `Path` or str
Path to the textgrid file.
selected_tiers: optional list of str
If set, will drop tiers that are not contained in this list.
use_tier_as_annotation: optional bool
If True, the annotation for each non-empty interval will be the name
of its parent Tier.
"""
from textgrid import TextGrid, IntervalTier
tg = TextGrid.fromFile(str(tg_path))
for tier_name in tg.getNames():
if selected_tiers is not None and tier_name not in selected_tiers:
continue
tier: IntervalTier = tg.getFirst(tier_name)
for interval in tier:
if not interval.mark:
continue
if use_tier_as_annotation:
self.add(annotator,
Segment(interval.minTime, interval.maxTime),
tier_name)
else:
self.add(annotator,
Segment(interval.minTime, interval.maxTime),
interval.mark)
def add_elan(self,
annotator: Annotator,
eaf_path: Union[str, Path],
selected_tiers: Optional[List[str]] = None,
use_tier_as_annotation: bool = False):
"""
Add an Elan (.eaf) file's content to the Continuum
Parameters
----------
annotator: str
A string id for the annotator who produced that ELAN file.
eaf_path: `Path` or str
Path to the .eaf (ELAN) file.
selected_tiers: optional list of str
If set, will drop tiers that are not contained in this list.
use_tier_as_annotation: optional bool
If True, the annotation for each non-empty interval will be the name
of its parent Tier.
"""
from pympi import Eaf
eaf = Eaf(eaf_path)
for tier_name in eaf.get_tier_names():
if selected_tiers is not None and tier_name not in selected_tiers:
continue
for start, end, value in eaf.get_annotation_data_for_tier(tier_name):
if use_tier_as_annotation:
self.add(annotator, Segment(start, end), tier_name)
else:
self.add(annotator, Segment(start, end), value)
def merge(self, continuum: 'Continuum', in_place: bool = False) \
-> Optional['Continuum']:
"""
Merge two Continuua together. Units from the same annotators
are also merged together.
Parameters
----------
continuum: Continuum
other continuum to merge the current one with.
in_place: optional bool
If set to true, the merge is done in place, and the current
continuum (self) is the one being modified.
Returns
-------
continuum: optional Continuum
Only returned if "in_place" is false
"""
current_cont = self if in_place else self.copy()
for annotator, unit in continuum:
current_cont.add(annotator, unit.segment, unit.annotation)
if not in_place:
return current_cont
def __add__(self, other: 'Continuum'):
"""
Same as a "not-in-place" merge.
Parameters
----------
other: Continuum
Returns
-------
continuum: Continuum
See also
--------
:meth:`pygamma_agreement.Continuum.merge`
"""
return self.merge(other, in_place=False)
def __getitem__(self, *keys: Union[Annotator, Tuple[Annotator, int]]) \
-> Union[SortedSet, Unit]:
"""Get annotation object
>>> annotation = continuum[annotator]
"""
if len(keys) == 1:
annotator = keys[0]
return self._annotations[annotator]
elif len(keys) == 2 and isinstance(keys[1], int):
annotator, idx = keys
return self._annotations[annotator][idx]
def iterunits(self, annotator: str):
# TODO: implem and doc
"""Iterate over units (in chronological and alphabetical order
if annotations are present)
>>> for unit in continuum.iterunits("Max"):
... # do something with the unit
"""
return iter(self._annotations)
def compute_disorders(self, dissimilarity: AbstractDissimilarity):
assert isinstance(dissimilarity, AbstractDissimilarity)
assert len(self.annotators) >= 2
disorder_args = dissimilarity.build_args(self)
nb_unit_per_annot = [len(arr) + 1 for arr in self._annotations.values()]
all_disorders = []
all_valid_tuples = []
for tuples_batch in chunked_cartesian_product(nb_unit_per_annot, CHUNK_SIZE):
batch_disorders = dissimilarity(tuples_batch, *disorder_args)
# Property section 5.1.1 to reduce initial complexity
valid_disorders_ids, = np.where(batch_disorders < self.num_annotators * dissimilarity.delta_empty)
all_disorders.append(batch_disorders[valid_disorders_ids])
all_valid_tuples.append(tuples_batch[valid_disorders_ids])
disorders = np.concatenate(all_disorders)
possible_unitary_alignments = np.concatenate(all_valid_tuples)
# Definition of the integer linear program
num_possible_unitary_alignements = len(disorders)
x = cp.Variable(shape=num_possible_unitary_alignements, boolean=True)
true_units_ids = []
num_units = 0
for units in self._annotations.values():
true_units_ids.append(np.arange(num_units, num_units + len(units)).astype(np.int32))
num_units += len(units)
# Constraints matrix
A = np.zeros((num_units, num_possible_unitary_alignements))
for p_id, unit_ids_tuple in enumerate(possible_unitary_alignments):
for annotator_id, unit_id in enumerate(unit_ids_tuple):
if unit_id != len(true_units_ids[annotator_id]):
A[true_units_ids[annotator_id][unit_id], p_id] = 1
obj = cp.Minimize(disorders.T @ x)
constraints = [cp.matmul(A, x) == 1]
prob = cp.Problem(obj, constraints)
# we don't actually care about the optimal loss value
optimal_value = prob.solve()
# compare with 0.9 as cvxpy returns 1.000 or small values i.e. 10e-14
chosen_alignments_ids, = np.where(x.value > 0.9)
self._chosen_alignments = possible_unitary_alignments[chosen_alignments_ids]
self._alignments_disorders = disorders[chosen_alignments_ids]
return self._alignments_disorders.sum() / len(self._alignments_disorders)
def get_best_alignment(self, dissimilarity: Optional['AbstractDissimilarity'] = None):
if self._chosen_alignments is None or self._alignments_disorders is None:
if dissimilarity is not None:
self.compute_disorders(dissimilarity)
else:
raise ValueError("Best alignment disorder hasn't been computed, "
"a the dissimilarity argument is required")
from .alignment import UnitaryAlignment, Alignment
set_unitary_alignements = []
for alignment_id, alignment in enumerate(self._chosen_alignments):
u_align_tuple = []
for annotator_id, unit_id in enumerate(alignment):
annotator, units = self._annotations.peekitem(annotator_id)
try:
unit = units[unit_id]
u_align_tuple.append((annotator, unit))
except IndexError: # it's a "null unit"
u_align_tuple.append((annotator, None))
unitary_alignment = UnitaryAlignment(tuple(u_align_tuple))
unitary_alignment.disorder = self._alignments_disorders[alignment_id]
set_unitary_alignements.append(unitary_alignment)
return Alignment(set_unitary_alignements, continuum=self, check_validity=True)
def compute_gamma(self,
dissimilarity: 'AbstractDissimilarity',
n_samples: int = 30,
precision_level: Optional[Union[float, PrecisionLevel]] = None,
ground_truth_annotators: Optional[List[Annotator]] = None,
sampling_strategy: str = "single",
pivot_type: PivotType = "float_pivot",
random_seed: Optional[float] = 4577
) -> 'GammaResults':
"""
Parameters
----------
dissimilarity: AbstractDissimilarity
dissimilarity instance. Used to compute the disorder between units.
n_samples: optional int
number of random continuum sampled from this continuum used to
estimate the gamma measure
precision_level: optional float or "high", "medium", "low"
error percentage of the gamma estimation. If a literal
precision level is passed (e.g. "medium"), the corresponding numerical
value will be used (high: 1%, medium: 2%, low : 5%)
ground_truth_annotators:
if set, the random continuua will only be sampled from these
annotators. This should be used when you want to compare a prediction
against some ground truth annotation.
pivot_type: 'float_pivot' or 'int_pivot'
pivot type to be used when sampling continuua
random_seed: optional float, int or str
random seed used to set up the random state before sampling the
random continuua
Returns
-------
"""
assert sampling_strategy in ("single", "multi")
if sampling_strategy == "multi":
raise NotImplemented("Multi-continuum sampling strategy is not "
"supported for now")
if random_seed is not None:
random.seed(random_seed)
chance_disorders = []
for _ in range(n_samples):
sampled_continuum = Continuum.sample_from_continuum(self, pivot_type, ground_truth_annotators)
sample_disorder = sampled_continuum.compute_disorders(dissimilarity)
chance_disorders.append(sample_disorder)
if precision_level is not None:
if isinstance(precision_level, str):
precision_level = PRECISION_LEVEL[precision_level]
assert 0 < precision_level < 1.0
# taken from subsection 5.3 of the original paper
# confidence at 95%, i.e., 1.96
variation_coeff = np.std(chance_disorders) / np.mean(chance_disorders)
confidence = 1.96
required_samples = np.ceil((variation_coeff * confidence / precision_level) ** 2).astype(np.int32)
logging.debug(f"Number of required samples for confidence {precision_level}: {required_samples}")
if required_samples > n_samples:
for _ in range(required_samples - n_samples):
sampled_continuum = Continuum.sample_from_continuum(self, pivot_type, ground_truth_annotators)
sample_disorder = sampled_continuum.compute_disorders(dissimilarity)
chance_disorders.append(sample_disorder)
best_alignment = self.get_best_alignment(dissimilarity)
return GammaResults(
best_alignment=best_alignment,
pivot_type=pivot_type,
n_samples=n_samples,
chance_disorders=np.array(chance_disorders),
precision_level=precision_level
)
def compute_gamma_cat(self):
raise NotImplemented()
def to_csv(self, path: Union[str, Path], delimiter=","):
if isinstance(path, str):
path = Path(path)
with open(path, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=delimiter)
for annotator, unit in self:
writer.writerow([annotator, unit.annotation,
unit.segment.start, unit.segment.end])
def _repr_png_(self):
"""IPython notebook support
See also
--------
:mod:`pygamma_agreement.notebook`
"""
from .notebook import repr_continuum
return repr_continuum(self)
| 37.300562 | 114 | 0.613224 |
b8f295ce12bf7401ea1d40884fb3f417f25a7bfd | 6,907 | py | Python | stomasimulator/febio/xplt/xplt_calcs.py | woolfeh/stomasimulator | ead78b78809f35c17e2d784259bdeb56589a9d1c | [
"MIT"
] | 2 | 2017-07-27T12:57:26.000Z | 2017-07-28T13:55:15.000Z | stomasimulator/febio/xplt/xplt_calcs.py | woolfeh/stomasimulator | ead78b78809f35c17e2d784259bdeb56589a9d1c | [
"MIT"
] | null | null | null | stomasimulator/febio/xplt/xplt_calcs.py | woolfeh/stomasimulator | ead78b78809f35c17e2d784259bdeb56589a9d1c | [
"MIT"
] | 1 | 2020-06-02T15:31:04.000Z | 2020-06-02T15:31:04.000Z | import stomasimulator.geom.geom_utils as geom
if __name__ == '__main__':
pass
| 37.538043 | 106 | 0.624005 |
b8f30a5084a67468fea8c7e34b0fb7344b7f99fe | 801 | py | Python | ifplus/vfs/__init__.py | hitakaken/ifplus | 8354eeceea8abcbcaeb5dcd1c11eef69cbef6557 | [
"MIT"
] | null | null | null | ifplus/vfs/__init__.py | hitakaken/ifplus | 8354eeceea8abcbcaeb5dcd1c11eef69cbef6557 | [
"MIT"
] | null | null | null | ifplus/vfs/__init__.py | hitakaken/ifplus | 8354eeceea8abcbcaeb5dcd1c11eef69cbef6557 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .helpers.vfs import VirtualFileSystem
from .views.files import ns
| 33.375 | 84 | 0.516854 |
b8f325c7a53b048ae96a1a8dd82c6640cb732eac | 51,954 | py | Python | fordclassifier/evaluator/evaluatorClass.py | Orieus/one_def_classification | 3269290e1fa06ec104a38810c5dffa5401f34ef1 | [
"MIT"
] | null | null | null | fordclassifier/evaluator/evaluatorClass.py | Orieus/one_def_classification | 3269290e1fa06ec104a38810c5dffa5401f34ef1 | [
"MIT"
] | null | null | null | fordclassifier/evaluator/evaluatorClass.py | Orieus/one_def_classification | 3269290e1fa06ec104a38810c5dffa5401f34ef1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
@author: Angel Navia Vzquez
May 2018
'''
# import code
# code.interact(local=locals())
import os
import pickle
# from fordclassifier.classifier.classifier import Classifier
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
import json
import matplotlib.pyplot as plt
import operator
import itertools
from sklearn.metrics import confusion_matrix
from collections import OrderedDict
import pyemd
# Local imports
from fordclassifier.evaluator.predictorClass import Predictor
from fordclassifier.evaluator.rbo import *
import pdb
| 37.484848 | 100 | 0.508527 |
b8f4752d0093b3381dd899cada064a8f50a481ea | 16 | py | Python | cdn/__init__.py | Kingjmk/mlfaati | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | [
"MIT"
] | 1 | 2021-01-04T07:34:34.000Z | 2021-01-04T07:34:34.000Z | cdn/__init__.py | Kingjmk/mlfaati | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | [
"MIT"
] | null | null | null | cdn/__init__.py | Kingjmk/mlfaati | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | [
"MIT"
] | null | null | null | """
CDN App
""" | 4 | 7 | 0.375 |
b8f6634f75893c98121099a51543c4b0b9463dc6 | 2,722 | py | Python | data/r_outletsdata.py | ljunhui/Koufu_SG_Map | 8d440605cc90c49c6635f4d5202bd262e30b0efb | [
"MIT"
] | 1 | 2021-04-01T13:57:15.000Z | 2021-04-01T13:57:15.000Z | data/r_outletsdata.py | ljunhui/Koufu_SG_Map | 8d440605cc90c49c6635f4d5202bd262e30b0efb | [
"MIT"
] | null | null | null | data/r_outletsdata.py | ljunhui/Koufu_SG_Map | 8d440605cc90c49c6635f4d5202bd262e30b0efb | [
"MIT"
] | null | null | null | # %% Import
import numpy as np
import pandas as pd
import requests
import os
from bs4 import BeautifulSoup
"""
Takes a dictionary of relevant brands and their URLs and returns a raw csv file
"""
# %% Functions
def outlets_crawl(brand, url):
"""
Returns a raw, unformatted df of outlets with it's brand from the url inserted
"""
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
# ensure crawler had actual results to work with.
try:
results = _check_results("outlet_item")
except ValueError:
results = _check_results("lease_item")
# continue
_ls = []
for result in results:
_ls.append([i for i in result.stripped_strings])
df = pd.DataFrame(_ls)
df.insert(0, "brand", brand, allow_duplicates=True)
return df
def loop_outlets_crawl(dict, outputfn):
"""
Loops outlets_crawl func through a dictionary of urls and their brands. Returns a concatenated df and saves it as a temporary csv.
"""
_ls = []
for brand, url in dict.items():
_ls.append(outlets_crawl(brand, url))
print(f"{brand} done.")
df = pd.concat(_ls)
df.to_csv(outputfn, index=False)
# %% Main
if __name__ == "__main__":
main()
os.system("pause")
| 33.604938 | 134 | 0.653564 |
b8f7dac938dacb0d70352e73d7ee85999cfcb966 | 5,918 | py | Python | ue4docker/setup_cmd.py | Wadimich/ue4-docker | 01ef4af09cf8e7b9e845203031b2bed3db06034b | [
"MIT"
] | 1 | 2021-05-19T16:41:04.000Z | 2021-05-19T16:41:04.000Z | ue4docker/setup_cmd.py | Wadimich/ue4-docker | 01ef4af09cf8e7b9e845203031b2bed3db06034b | [
"MIT"
] | null | null | null | ue4docker/setup_cmd.py | Wadimich/ue4-docker | 01ef4af09cf8e7b9e845203031b2bed3db06034b | [
"MIT"
] | null | null | null | import docker, os, platform, requests, shutil, subprocess, sys
from .infrastructure import *
# Runs a command without displaying its output and returns the exit code
# Performs setup for Linux hosts
# Performs setup for Windows Server hosts
| 38.679739 | 126 | 0.70784 |
b8f9dd022646dc722a37cd9325b2748aca492315 | 180 | py | Python | src/lesson_developer_tools/unittest_truth.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | 3 | 2018-08-14T09:33:52.000Z | 2022-03-21T12:31:58.000Z | src/lesson_developer_tools/unittest_truth.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | null | null | null | src/lesson_developer_tools/unittest_truth.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | null | null | null | import unittest
| 15 | 35 | 0.694444 |
b8f9fb55632e48828f82b3c4a79b4f130acc6705 | 6,570 | py | Python | tia/trad/monitor_mainTr.py | jmakov/market_tia | 0804fd82b4fb3ea52c171ea0759f0e10fc659bb2 | [
"MIT"
] | 1 | 2020-07-24T04:18:57.000Z | 2020-07-24T04:18:57.000Z | tia/trad/monitor_mainTr.py | jmakov/market_tia | 0804fd82b4fb3ea52c171ea0759f0e10fc659bb2 | [
"MIT"
] | null | null | null | tia/trad/monitor_mainTr.py | jmakov/market_tia | 0804fd82b4fb3ea52c171ea0759f0e10fc659bb2 | [
"MIT"
] | 1 | 2020-07-24T04:22:14.000Z | 2020-07-24T04:22:14.000Z | import sys
import time
from tia.trad.tools.io.follow import followMonitor
import tia.configuration as conf
from tia.trad.tools.errf import eReport
import ujson as json
import matplotlib.pyplot as plt
import math
import collections
import logging
from tia.trad.tools.ipc.processLogger import PROCESS_NAME
LOGGER_NAME = PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
reportFile = None
| 37.118644 | 120 | 0.555403 |
b8faedfafe51cef8b7826a43e9c04a44b4437054 | 1,975 | py | Python | irocr/config.py | guidj/ir-orc | 46476a847605d7d36deda5eb27d282eaa9e04d9a | [
"Apache-2.0"
] | 1 | 2016-04-05T15:46:28.000Z | 2016-04-05T15:46:28.000Z | irocr/config.py | guidj/ir-orc | 46476a847605d7d36deda5eb27d282eaa9e04d9a | [
"Apache-2.0"
] | null | null | null | irocr/config.py | guidj/ir-orc | 46476a847605d7d36deda5eb27d282eaa9e04d9a | [
"Apache-2.0"
] | null | null | null | import os
import os.path
import ConfigParser
PROJECT_BASE = ''.join([os.path.dirname(os.path.abspath(__file__)), "/../"])
CONFIG_FILE = ''.join([PROJECT_BASE, 'config.ini'])
_UNSET = object()
| 25.320513 | 93 | 0.515443 |
b8fc2913caa7185f3d28c952db02652d27ed5b76 | 8,940 | py | Python | mmtbx/ions/tst_environment.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/ions/tst_environment.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/ions/tst_environment.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | # -*- coding: utf-8; py-indent-offset: 2 -*-
from __future__ import division
from mmtbx.ions.environment import ChemicalEnvironment
import mmtbx.ions.identify
from mmtbx import ions
import mmtbx.monomer_library.pdb_interpretation
from mmtbx import monomer_library
from mmtbx.ions.environment import chem_carboxy, chem_amide, chem_backbone, \
chem_water, chem_phosphate, \
chem_nitrogen_primary, chem_nitrogen_secondary, \
chem_chloride, chem_oxygen, chem_nitrogen, chem_sulfur
import libtbx.load_env
from collections import OrderedDict, Counter
import os
import sys
if __name__ == "__main__":
exercise()
| 41.581395 | 80 | 0.631767 |
b8fde4b07b6cd3c768fcd79e7fc1ef7c9a747340 | 600 | py | Python | extinfo/extractors/fileinfo_com.py | rpdelaney/extinfo | 35463afe295b1bc83478960e67762ffb10915175 | [
"Apache-2.0"
] | null | null | null | extinfo/extractors/fileinfo_com.py | rpdelaney/extinfo | 35463afe295b1bc83478960e67762ffb10915175 | [
"Apache-2.0"
] | null | null | null | extinfo/extractors/fileinfo_com.py | rpdelaney/extinfo | 35463afe295b1bc83478960e67762ffb10915175 | [
"Apache-2.0"
] | null | null | null | import re
from ..utils import Report, fetch
SITE = "fileinfo.com"
PATH = "/extension"
| 26.086957 | 67 | 0.67 |
b8fdf6d347c186c16105c41f259ca397f53533cf | 801 | py | Python | style/api/routers/prediction.py | imagination-ai/kerem-side-projects-monorepo | 3d9da9d57f305ac2d6a03bab3787acfbee7269ee | [
"MIT"
] | null | null | null | style/api/routers/prediction.py | imagination-ai/kerem-side-projects-monorepo | 3d9da9d57f305ac2d6a03bab3787acfbee7269ee | [
"MIT"
] | 2 | 2022-01-20T15:46:39.000Z | 2022-02-16T20:51:47.000Z | style/api/routers/prediction.py | imagination-ai/kerem-side-projects-monorepo | 3d9da9d57f305ac2d6a03bab3787acfbee7269ee | [
"MIT"
] | null | null | null | from fastapi import APIRouter
from pydantic import BaseModel
from style.predict.servable.serve import get_servable
router = APIRouter()
| 25.03125 | 73 | 0.746567 |
b8fe991a0b450794e796f906cb32a0c3c5911676 | 77 | py | Python | pyrepl/iconsole.py | thinkle/snippets | a19fd709fc618cee9d76b7481b834c3e0d4ed397 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pyrepl/iconsole.py | thinkle/snippets | a19fd709fc618cee9d76b7481b834c3e0d4ed397 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pyrepl/iconsole.py | thinkle/snippets | a19fd709fc618cee9d76b7481b834c3e0d4ed397 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-08-28T22:06:53.000Z | 2019-08-28T22:06:53.000Z | from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
ipshell()
| 11 | 38 | 0.779221 |
b8feb9d082e79ca3a8c079efe501a2cd98406b92 | 2,623 | py | Python | src/tests/ftest/pool/create_capacity_test.py | berserk-fury/daos | e0a3249aa886962cef2345135b907b45f7109cae | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/pool/create_capacity_test.py | berserk-fury/daos | e0a3249aa886962cef2345135b907b45f7109cae | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/pool/create_capacity_test.py | berserk-fury/daos | e0a3249aa886962cef2345135b907b45f7109cae | [
"BSD-2-Clause-Patent"
] | 1 | 2021-11-03T05:00:42.000Z | 2021-11-03T05:00:42.000Z | #!/usr/bin/python3
"""
(C) Copyright 2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
from pool_test_base import PoolTestBase
from server_utils import ServerFailed
| 34.973333 | 80 | 0.626382 |
b8fecc2152a699d192482875bb377312659faf77 | 577 | py | Python | async-utils/setup.py | goc9000/python-library | 0a4a09278df6e84061baedda8997071e2201103f | [
"MIT"
] | null | null | null | async-utils/setup.py | goc9000/python-library | 0a4a09278df6e84061baedda8997071e2201103f | [
"MIT"
] | null | null | null | async-utils/setup.py | goc9000/python-library | 0a4a09278df6e84061baedda8997071e2201103f | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='atmfjstc-async-utils',
version='0.1.0',
author_email='atmfjstc@protonmail.com',
package_dir={'': 'src'},
packages=find_packages(where='src'),
install_requires=[
],
zip_safe=True,
description="Utilities for async code",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: AsyncIO",
"Typing :: Typed",
],
python_requires='>=3.9',
)
| 20.607143 | 49 | 0.60312 |
b8fef77cc6fd6e6d00ddf3b311025b4035166678 | 5,865 | py | Python | msg_scheduler/analyzer.py | buaales/tt_offline_scheduler | 257d8e2c94fc896c891e7d2a014bb2eebde996ce | [
"MIT"
] | 5 | 2021-05-18T11:34:42.000Z | 2022-02-24T03:33:43.000Z | msg_scheduler/analyzer.py | buaales/tt_offline_scheduler | 257d8e2c94fc896c891e7d2a014bb2eebde996ce | [
"MIT"
] | null | null | null | msg_scheduler/analyzer.py | buaales/tt_offline_scheduler | 257d8e2c94fc896c891e7d2a014bb2eebde996ce | [
"MIT"
] | 3 | 2020-09-10T05:58:59.000Z | 2022-02-25T01:50:25.000Z | import subprocess
import sys
from collections import defaultdict
import pandas as pd
import networkx
import random
import functools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from .model import Network, Link, Frame, Node
import io
if sys.platform == 'darwin':
matplotlib.use("TkAgg")
| 38.333333 | 119 | 0.554135 |
b8ff8b94d402dcdb466c2d51a4b1cfbb02411cf0 | 3,286 | py | Python | endpoints/cotect-endpoints/cotect_endpoints/security.py | JNKielmann/cotect | 1b213459b41ef18119948633385ebad2cc16e9e2 | [
"MIT"
] | 19 | 2020-03-18T15:49:58.000Z | 2021-02-11T12:07:22.000Z | endpoints/cotect-endpoints/cotect_endpoints/security.py | JNKielmann/cotect | 1b213459b41ef18119948633385ebad2cc16e9e2 | [
"MIT"
] | 6 | 2020-03-21T18:50:29.000Z | 2022-02-27T01:38:20.000Z | endpoints/cotect-endpoints/cotect_endpoints/security.py | JNKielmann/cotect | 1b213459b41ef18119948633385ebad2cc16e9e2 | [
"MIT"
] | 7 | 2020-03-24T14:42:35.000Z | 2020-04-06T13:22:29.000Z | import logging
import os
import firebase_admin
from fastapi import HTTPException, Security, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from fastapi.security.api_key import APIKeyCookie, APIKeyHeader, APIKeyQuery
from firebase_admin import auth
from cotect_endpoints.utils import id_utils
from cotect_endpoints.schema import User
# Initialize logger
log = logging.getLogger(__name__)
firebase_app = None
firebase_credentials = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if firebase_credentials and os.path.isfile(firebase_credentials):
# Initilize firebase
firebase_app = firebase_admin.initialize_app()
else:
log.warning(
"GOOGLE_APPLICATION_CREDENTIALS was not set with a valid path. Firebase will not be initalized."
)
API_KEY_NAME = "api_token"
api_key_bearer = HTTPBearer(auto_error=False)
api_key_query = APIKeyQuery(name=API_KEY_NAME, auto_error=False)
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
# Cookie security specification is not supported by swagger 2.0 specs
# api_key_cookie = APIKeyCookie(name=API_KEY_NAME, auto_error=False)
| 33.530612 | 114 | 0.693244 |
77009347b5bee01d461e0bc59d8b6aa0208dc523 | 7,201 | py | Python | ui/Pytest/test_Range.py | MoisesHenr/OCEAN | e99c853893adc89652794ace62fcc8ffa78aa7ac | [
"MIT"
] | 15 | 2021-06-15T13:48:03.000Z | 2022-01-26T13:51:46.000Z | ui/Pytest/test_Range.py | MoisesHenr/OCEAN | e99c853893adc89652794ace62fcc8ffa78aa7ac | [
"MIT"
] | 1 | 2021-07-04T02:58:29.000Z | 2021-07-04T02:58:29.000Z | ui/Pytest/test_Range.py | MoisesHenr/OCEAN | e99c853893adc89652794ace62fcc8ffa78aa7ac | [
"MIT"
] | 2 | 2021-06-21T20:44:01.000Z | 2021-06-23T11:10:56.000Z | # Author: Moises Henrique Pereira
# this class handle the functions tests of controller of the component of the numerical features
import pytest
import sys
from PyQt5 import QtWidgets
from ui.mainTest import StaticObjects | 56.257813 | 113 | 0.805583 |
770214b97687e419b49ca7614e24a42a26a9954c | 2,092 | py | Python | tools/clean_autogen_protos.py | embeddery/stackrox | d653406651df4331a714839ec2c0a23a93425c64 | [
"Apache-2.0"
] | 22 | 2022-03-31T14:32:18.000Z | 2022-03-31T22:11:30.000Z | tools/clean_autogen_protos.py | embeddery/stackrox | d653406651df4331a714839ec2c0a23a93425c64 | [
"Apache-2.0"
] | 5 | 2022-03-31T14:35:28.000Z | 2022-03-31T22:40:13.000Z | tools/clean_autogen_protos.py | embeddery/stackrox | d653406651df4331a714839ec2c0a23a93425c64 | [
"Apache-2.0"
] | 4 | 2022-03-31T16:33:58.000Z | 2022-03-31T22:19:26.000Z | #!/usr/bin/env python3
import argparse
import pathlib
GENERATED_EXTENSIONS = ["pb.go", "pb.gw.go", "swagger.json"]
if __name__ == '__main__':
main()
| 31.223881 | 112 | 0.707935 |
7702c9e7da503201d8308cee20a4f5351db96b01 | 21,848 | py | Python | skbl/helpers.py | spraakbanken/skblportalen | 05d0113c9ca73f8092765a08597d23091ba3bc1f | [
"MIT"
] | 2 | 2018-03-15T16:19:36.000Z | 2019-03-18T10:25:38.000Z | skbl/helpers.py | spraakbanken/skblportalen | 05d0113c9ca73f8092765a08597d23091ba3bc1f | [
"MIT"
] | 3 | 2018-06-05T19:35:11.000Z | 2019-03-18T10:26:50.000Z | skbl/helpers.py | spraakbanken/skblportalen | 05d0113c9ca73f8092765a08597d23091ba3bc1f | [
"MIT"
] | 1 | 2018-06-05T19:07:56.000Z | 2018-06-05T19:07:56.000Z | """Define different helper functions."""
import datetime
import json
import re
import sys
import urllib.parse
from urllib.request import Request, urlopen
import icu
import markdown
from flask import current_app, g, make_response, render_template, request, url_for
from flask_babel import gettext
from . import static_info
VONAV_LIST = ["von", "af", "av"]
def set_language_switch_link(route, fragment=None, lang=""):
"""Fix address and label for language switch button."""
if not lang:
lang = g.language
if lang == "en":
g.switch_language = {"url": url_for("views." + route + "_sv"), "label": "Svenska"}
else:
g.switch_language = {"url": url_for("views." + route + "_en"), "label": "English"}
if fragment is not None:
g.switch_language["url"] += "/" + fragment
def cache_name(pagename, lang=""):
"""Get page from cache."""
if not lang:
lang = "sv" if "sv" in request.url_rule.rule else "en"
return "%s_%s" % (pagename, lang)
def karp_query(action, query, mode=None):
"""Generate query and send request to Karp."""
if not mode:
mode = current_app.config["KARP_MODE"]
query["mode"] = mode
query["resource"] = current_app.config["KARP_LEXICON"]
if "size" not in query:
query["size"] = current_app.config["RESULT_SIZE"]
params = urllib.parse.urlencode(query)
return karp_request("%s?%s" % (action, params))
def karp_request(action):
"""Send request to Karp backend."""
q = Request("%s/%s" % (current_app.config["KARP_BACKEND"], action))
if current_app.config["DEBUG"]:
log("%s/%s\n" % (current_app.config["KARP_BACKEND"], action), "REQUEST")
if current_app.config.get("USE_AUTH", False):
q.add_header("Authorization", "Basic %s" % (current_app.config["KARP_AUTH_HASH"]))
response = urlopen(q).read()
data = json.loads(response.decode("UTF-8"))
return data
def karp_fe_url():
"""Get URL for Karp frontend."""
return current_app.config["KARP_FRONTEND"] + "/#?mode=" + current_app.config["KARP_MODE"]
def serve_static_page(page, title=""):
"""Serve static html."""
set_language_switch_link(page)
with current_app.open_resource("static/pages/%s/%s.html" % (page, g.language)) as f:
data = f.read().decode("UTF-8")
return render_template("page_static.html",
content=data,
title=title)
def check_cache(page, lang=""):
"""
Check if page is in cache.
If the cache should not be used, return None.
"""
if current_app.config["TEST"]:
return None
try:
with g.mc_pool.reserve() as client:
# Look for the page, return if found
art = client.get(cache_name(page, lang))
if art is not None:
return art
except Exception:
# TODO what to do??
pass
# If nothing is found, return None
return None
def set_cache(page, name="", lang="", no_hits=0):
"""
Browser cache handling.
Add header to the response.
May also add the page to the memcache.
"""
pagename = cache_name(name, lang="")
if no_hits >= current_app.config["CACHE_HIT_LIMIT"]:
try:
with g.mc_pool.reserve() as client:
client.set(pagename, page, time=current_app.config["LOW_CACHE_TIME"])
except Exception:
# TODO what to do??
pass
r = make_response(page)
r.headers.set("Cache-Control", "public, max-age=%s" %
current_app.config["BROWSER_CACHE_TIME"])
return r
def get_first_name(source):
"""Return the given name (first name)."""
return re.sub("/", "", source["name"].get("firstname", "")).strip()
def format_names(source, fmt="strong"):
"""Return the given name (first name), and the formatted callingname (tilltalsnamnet)."""
if fmt:
return re.sub("(.*)/(.+)/(.*)", r"\1<%s>\2</%s>\3" % (fmt, fmt), source["name"].get("firstname", ""))
else:
return re.sub("(.*)/(.+)/(.*)", r"\1\2\3", source["name"].get("firstname", ""))
def get_life_range(source):
"""
Return the birth and death year from _source (as a tuple).
Return empty strings if not available.
"""
years = []
for event in ["from", "to"]:
if source["lifespan"].get(event):
date = source["lifespan"][event].get("date", "")
if date:
date = date.get("comment", "")
if "-" in date and not re.search("[a-zA-Z]", date):
year = date[:date.find("-")]
else:
year = date
else:
year = ""
years.append(year)
return years[0], years[1]
def get_life_range_force(source):
"""
Return the birth and death year from _source (as a tuple).
Try to also parse non-dates like "ca. 1500-talet".
Return -1, 1000000 if not available.
"""
default_born = -1
default_died = 1000000
born = convert("from", default_born)
dead = convert("to", default_died)
# Sorting hack: if there is no birth year, set it to dead -100 (and vice versa)
# to make is appear in a more reasonable position in the chronology
if born == default_born and dead != default_died:
born = dead - 100
if dead == default_died and born != default_born:
dead = born + 100
return born, dead
def get_date(source):
"""Get birth and death date if available. Return empty strings otherwise."""
dates = []
for event in ["from", "to"]:
if source["lifespan"][event].get("date"):
date = source["lifespan"][event]["date"].get("comment", "")
else:
date = ""
dates.append(date)
return dates[0], dates[1]
def get_current_date():
"""Get the current date."""
return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d")
def markdown_html(text):
"""Convert markdown text to html."""
return markdown.markdown(text)
def group_by_type(objlist, name):
"""Group objects by their type (=name), e.g. 'othernames'."""
newdict = {}
for obj in objlist:
val = obj.get(name, "")
key_sv = obj.get("type", "vrigt")
key_en = obj.get("type_eng", "Other")
if key_sv not in newdict:
newdict[key_sv] = (key_en, [])
newdict[key_sv][1].append(val)
result = []
for key, val in list(newdict.items()):
result.append({"type": key, "type_eng": val[0], name: ", ".join(val[1])})
return result
def rewrite_von(name):
"""Move 'von' and 'av' to end of name."""
vonaf_pattern = re.compile(r"^(%s) (.+)$" % "|".join(VONAV_LIST))
return re.sub(vonaf_pattern, r"\2 \1", name)
def make_alphabetic(hits, processname, sortnames=False, lang="sv"):
"""
Loop through hits, apply the function 'processname' on each object and then sort the result in alphabetical order.
The function processname should append zero or more processed form of
the object to the result list.
This processed forms should be a pair (first_letter, result)
where first_letter is the first_letter of each object (to sort on), and the result
is what the html-template want e.g. a pair of (name, no_hits)
"""
results = []
for hit in hits:
processname(hit, results)
letter_results = {}
# Split the result into start letters
for first_letter, result in results:
if first_letter == "":
first_letter = ""
if first_letter == "":
first_letter = ""
if first_letter == "":
first_letter = "Y"
if lang == "en" and first_letter == "":
first_letter = "O"
if lang == "en" and first_letter in "":
first_letter = "A"
if first_letter not in letter_results:
letter_results[first_letter] = [result]
else:
letter_results[first_letter].append(result)
# Sort result dictionary alphabetically into list
if lang == "en":
collator = icu.Collator.createInstance(icu.Locale("en_EN.UTF-8"))
else:
collator = icu.Collator.createInstance(icu.Locale("sv_SE.UTF-8"))
for _n, items in list(letter_results.items()):
if sortnames:
items.sort(key=lambda x: collator.getSortKey(fix_lastname(x[0]) + " " + x[1]))
else:
items.sort(key=lambda x: collator.getSortKey(x[0]))
letter_results = sorted(list(letter_results.items()), key=lambda x: collator.getSortKey(x[0]))
return letter_results
def make_simplenamelist(hits, search):
"""
Create a list with links to the entries url or _id.
Sort entries with names matching the query higher.
"""
results = []
used = set()
namefields = ["firstname", "lastname", "sortname"]
search_terms = [st.lower() for st in search.split()]
for hit in hits["hits"]:
# score = sum(1 for field in hit["highlight"] if field.startswith("name."))
hitname = hit["_source"]["name"]
score = sum(1 for nf in namefields if any(st in hitname.get(nf, "").lower() for st in search_terms))
if score:
name = join_name(hit["_source"], mk_bold=True)
liferange = get_life_range(hit["_source"])
subtitle = hit["_source"].get("subtitle", "")
subtitle_eng = hit["_source"].get("subtitle_eng", "")
subject_id = hit["_source"].get("url") or hit["_id"]
results.append((-score, name, liferange, subtitle, subtitle_eng, subject_id))
used.add(hit["_id"])
return sorted(results), used
def make_namelist(hits, exclude=set(), search=""):
"""
Split hits into one list per first letter.
Return only info necessary for listing of names.
"""
results = []
first_letters = [] # List only containing letters in alphabetical order
current_letterlist = [] # List containing entries starting with the same letter
current_total = 0
if search:
max_len = current_app.config["SEARCH_RESULT_SIZE"] - len(exclude)
else:
max_len = None
for hit in hits["hits"]:
if hit["_id"] in exclude:
continue
# Seperate names from linked names
is_link = hit["_index"].startswith(current_app.config["SKBL_LINKS"])
if is_link:
name = hit["_source"]["name"].get("sortname", "")
linked_name = join_name(hit["_source"])
else:
name = join_name(hit["_source"], mk_bold=True)
linked_name = False
liferange = get_life_range(hit["_source"])
subtitle = hit["_source"].get("subtitle", "")
subtitle_eng = hit["_source"].get("subtitle_eng", "")
subject_id = hit["_source"].get("url") or hit["_id"]
# Get first letter from sort[0]
firstletter = hit["sort"][1].upper()
if firstletter not in first_letters:
if current_letterlist:
results.append(current_letterlist)
current_letterlist = []
first_letters.append(firstletter)
current_letterlist.append((firstletter, is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id))
current_total += 1
# Don't show more than SEARCH_RESULT_SIZE number of results
if max_len and current_total >= max_len:
break
if current_letterlist:
# Append last letterlist
results.append(current_letterlist)
return (first_letters, results)
def make_datelist(hits):
"""Extract information relevant for chronology list (same as make_namelist but without letter splitting)."""
result = []
for hit in hits:
is_link = hit["_index"].startswith(current_app.config["SKBL_LINKS"])
if is_link:
name = hit["_source"]["name"].get("sortname", "")
linked_name = join_name(hit["_source"])
else:
name = join_name(hit["_source"], mk_bold=True)
linked_name = False
liferange = get_life_range(hit["_source"])
subtitle = hit["_source"].get("subtitle", "")
subtitle_eng = hit["_source"].get("subtitle_eng", "")
subject_id = hit["_source"].get("url") or hit["_id"]
result.append((is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id))
return result
def join_name(source, mk_bold=False):
"""Retrieve and format name from source."""
name = []
lastname = source["name"].get("lastname", "")
vonaf_pattern = re.compile(r"(%s |)(.*)" % " |".join(VONAV_LIST))
match = re.search(vonaf_pattern, lastname)
vonaf = match.group(1)
lastname = match.group(2)
if lastname:
if mk_bold:
name.append("<strong>%s</strong>," % lastname)
else:
name.append(lastname + ",")
if mk_bold:
name.append(format_names(source, fmt="strong"))
else:
name.append(source["name"].get("firstname", ""))
name.append(vonaf)
return " ".join(name)
def sort_places(stat_table, route):
"""Translate place names and sort list."""
# Work in progress! Waiting for translation list.
# Or should this be part of the data instead??
place_translations = {
"Gteborg": "Gothenburg"
}
if "place" in route.rule:
lang = "en"
else:
lang = "sv"
if lang == "en":
for d in stat_table:
d["display_name"] = place_translations.get(d["name"], d["name"])
else:
for d in stat_table:
d["display_name"] = d["name"]
stat_table.sort(key=lambda x: x.get("name").strip())
return stat_table
def mk_links(text):
"""Fix display of links within an article text."""
# TODO markdown should fix this itself
try:
text = re.sub(r"\[\]\((.*?)\)", r"[\1](\1)", text)
for link in re.findall(r"\]\((.*?)\)", text):
text = re.sub(r"\(%s\)" % link, "(%s)" % url_for("views.article_index_" + g.language, search=link), text)
except Exception:
# If there are parenthesis within the links, problems will occur.
text = text
return text
def unescape(text):
"""Unescape some html chars."""
text = re.sub(">", r">", text)
text = re.sub("'", r"'", text)
return text
def is_email_address_valid(email):
"""
Validate the email address using a regex.
It may not include any whitespaces, has exactly one "@" and at least one "." after the "@".
"""
if " " in email:
return False
# if not re.match("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$", email):
# More permissive regex: does allow non-ascii chars
if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
return False
return True
def is_ascii(s):
"""Check if s contains of ASCII-characters only."""
return all(ord(c) < 128 for c in s)
def get_lang_text(json_swe, json_eng, ui_lang):
"""Get text in correct language if available."""
if ui_lang == "en":
if json_eng:
return json_eng
else:
return json_swe
else:
return json_swe
def get_shorttext(text):
"""Get the initial 200 characters of text. Remove HTML and line breaks."""
shorttext = re.sub(r"<.*?>|\n|\t", " ", text)
shorttext = shorttext.strip()
shorttext = re.sub(r" ", " ", shorttext)
return shorttext[:200]
def get_org_name(organisation):
"""Get short name for organisation (--> org.)."""
if organisation.endswith("organisation") or organisation.endswith("organization"):
return organisation[:-9] + "."
else:
return organisation
def lowersorted(xs):
"""Sort case-insentitively."""
return sorted(xs, key=lambda x: x[0].lower())
def get_infotext(text, rule):
"""
Get infotext in correct language with Swedish as fallback.
text = key in the infotext dict
rule = request.url_rule.rule
"""
textobj = static_info.infotexter.get(text)
if "sv" in rule:
return textobj.get("sv")
else:
return textobj.get("en", textobj.get("sv"))
def log(data, msg=""):
"""Log data to stderr."""
if msg:
sys.stderr.write("\n" + msg + ": " + str(data) + "\n")
else:
sys.stderr.write("\n" + str(data) + "\n")
def swedish_translator(firstname, lastname):
"""Check if 'firstname lastname' is a Swedish translator."""
swedish_translators = [
"Linnea shede"
]
name = firstname + " " + lastname
if name in swedish_translators:
return True
return False
def get_littb_id(skbl_url):
"""Get Litteraturbanken ID for an article if available."""
if not skbl_url:
return None
littb_url = ("https://litteraturbanken.se/api/list_all/author?filter_and={%22wikidata.skbl_link%22:%20%22" +
skbl_url + "%22}&include=authorid")
try:
# Fake the user agent to avoid getting a 403
r = Request(littb_url, headers={"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"})
contents = urlopen(r).read()
except Exception as e:
log("Could not open URL %s. Error: %s" % (e, littb_url))
return None
resp = json.loads(contents)
if resp.get("data"):
return resp["data"][0]["authorid"]
return None
| 34.244514 | 123 | 0.592045 |
77054d9b1fb16933bc175b8744bb05cb5f7182d5 | 5,037 | py | Python | boundaries/migrations/0001_initial.py | MinnPost/represent-boundaries | 17f65d34a6ed761e72dbdf13ea78b64fdeaa356d | [
"MIT"
] | 20 | 2015-03-17T09:10:39.000Z | 2020-06-30T06:08:08.000Z | boundaries/migrations/0001_initial.py | MinnPost/represent-boundaries | 17f65d34a6ed761e72dbdf13ea78b64fdeaa356d | [
"MIT"
] | 14 | 2015-04-24T17:22:00.000Z | 2021-06-22T16:50:24.000Z | boundaries/migrations/0001_initial.py | MinnPost/represent-boundaries | 17f65d34a6ed761e72dbdf13ea78b64fdeaa356d | [
"MIT"
] | 16 | 2015-04-27T23:32:46.000Z | 2020-07-05T11:18:04.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
| 65.415584 | 239 | 0.6351 |
7706515165e3817a767c32b6ac93a3b7c85f245e | 1,267 | py | Python | gitz/git/reference_branch.py | rec/gitz | cbb07f99dd002c85b5ca95896b33d03150bf9282 | [
"MIT"
] | 24 | 2019-07-26T03:57:16.000Z | 2021-11-22T22:39:13.000Z | gitz/git/reference_branch.py | rec/gitz | cbb07f99dd002c85b5ca95896b33d03150bf9282 | [
"MIT"
] | 212 | 2019-06-13T13:44:26.000Z | 2020-06-02T17:59:51.000Z | gitz/git/reference_branch.py | rec/gitz | cbb07f99dd002c85b5ca95896b33d03150bf9282 | [
"MIT"
] | 2 | 2019-08-09T13:55:38.000Z | 2019-09-07T11:17:59.000Z | from . import functions
from ..program import ARGS
from ..program import ENV
from ..program import PROGRAM
_HELP_REFERENCE_BRANCH = (
'Branch to create from, in the form ``branch`` or ``remote/branch``'
)
| 28.155556 | 75 | 0.651144 |
7707130bae4f273be796d5022abf873f7542914d | 89 | py | Python | cookies/apps.py | hamishwillee/http_tester_site | 5c9fa6840c7931f4a7dbd669616cb7b06e29c068 | [
"MIT"
] | null | null | null | cookies/apps.py | hamishwillee/http_tester_site | 5c9fa6840c7931f4a7dbd669616cb7b06e29c068 | [
"MIT"
] | 8 | 2021-03-19T10:14:39.000Z | 2022-03-12T00:24:41.000Z | cookies/apps.py | ADpDinamo/site | d7313cd6c151a381ccc803b81768673587cb8d45 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 |
77076be0aee637dc1db01b51cb1e1bf652954a05 | 7,016 | py | Python | src/single_pendulum.py | dpopchev/Computation_python | 790bfc451b003ecbc626867035dc03a7b55d1fb9 | [
"MIT"
] | null | null | null | src/single_pendulum.py | dpopchev/Computation_python | 790bfc451b003ecbc626867035dc03a7b55d1fb9 | [
"MIT"
] | null | null | null | src/single_pendulum.py | dpopchev/Computation_python | 790bfc451b003ecbc626867035dc03a7b55d1fb9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# do not hesitate to debug
import pdb
# python computation modules and visualization
import numpy as np
import sympy as sy
import scipy as sp
import matplotlib.pyplot as plt
from sympy import Q as syQ
sy.init_printing(use_latex=True,forecolor="White")
def Lyapunov_stability_test_linear(ev):
''' test if a linear homogeneous system with constant coefficients is stable
in the sense of Lyapunov by checking the theorem conditions against the
provided eigenvalues
source https://www.math24.net/stability-theory-basic-concepts/
TODO taking into account eigenvalue multiplicity '''
# the criteria result will be saved here
r = None
# system is asymptotically stable if only if
# all eigenvalues have negative real parts
r = 'asymptotically stable' if ( not r
and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None
# system is stable if and only if
# all eigenvalues have nonpositive real parts
# TODO incorporate algebraic and geometric multiplicity criteria
r = 'stable' if ( not r
and all(sy.ask(syQ.nonpositive(sy.re(_))) for _ in ev) ) else None
# system is unstable if
# at least one eigenvalue has positive real part
# TODO incorporate algebraic and geometric multiplicity criteria
r = 'unstable' if ( not r
and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None
return r
def Lyapunov_stability_test_nonlinear(ev):
''' test if the fixed point of a nonlinear structure stable system
is stable, unstable, critical or impossible to determine using Lyapunov
criteria of first order and thus other methods are needed
TODO tests are only applicable for structurally stable systems, i.e.
with purely imaginary eigenvalues are not taken into account
source https://www.math24.net/stability-first-approximation/ '''
# the criteria result will be saved here
r = None
# system is asymptotically stable if only if
# all eigenvalues have negative real parts
r = 'asymptotically stable' if ( not r
and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None
# system is unstable if
# at least one eigenvalue has positive real part
r = 'unstable' if ( not r
and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None
# if all eigenvalues have non-positive real parts,
# and there is at least one eigenvalue with zero real part
# then fixed point can be stable or unstable and other methods should be
# used, thus mark the point critical
r = 'critical' if ( not r
and all(sy.ask(Q.nonpositive(sy.re(_))) for _ in ev)
and any(sy.re(_) == 0 for _ in ev)
) else None
return r if r else 'not decided'
def RouthHurwitz_Criterion(p):
''' return principal minors of Hurwitz matrix as sympy polynomials, which if
all are positive it is sufficient condition for asymptotic stability
NOTE: if all n-1 principal minors are positive, and nth minor is zero,
the system is at the boundary of stability, with two cases:
a_n = 0 -- one of the root is zero and system is on the boundary of
aperiodic stability
n-1 minor is zero -- there are two complex conjugate imaginary roots and
the system is at boundary of oscillatory stability
source https://www.math24.net/routh-hurwitz-criterion/ '''
# initial key and index pair needed to create Hurwitz matrix via sympy banded
# each entry is of the type [ dictionary key, coefficient slice ]
idxs = [ [ 1, 0 ] ]
# generate next key by decrementing with 1
genKey = lambda _: _ - 1
# generate next index by incrementing with 1 if key was nonnegative
# or with 2 if key is negative
genSlice = lambda _, __: __ + 1 if _ >= 0 else __ + 2
# fill the rest pairs w.r.t. the polynomial degree - 1, as we already have
# one entry
for _ in range(p.degree() - 1):
key = genKey(idxs[-1][0])
idxs.append( [ key, genSlice(key, idxs[-1][1] ) ] )
# create the matrix itself
H = sy.banded({ k: p.all_coeffs()[v:] for k, v in idxs })
return [ H[:_, :_].det() if _ > 0 else p.LC() for _ in range(0, p.degree()+1) ]
# define independent variable
t = sy.symbols('t', real=True)
# define dependent variables individually and pact them in an variable
theta, omega = sy.symbols(r'\theta, \omega', real = True)
Y = theta, omega
# define free parameters of they system and pack them in a variable
g, L = sy.symbols('g, L', positive = True)
parms = g, L
# create rhs as sympy expressions
theta_dt = omega
omega_dt = -(g/L)*sy.sin(theta)
rhs = {}
rhs['sympy'] = sy.Matrix([theta_dt, omega_dt])
# convert the sympy matrix function to numpy function with usual signature
rhs['numpy'] = sy.lambdify((t, Y, *parms), rhs['sympy'], 'numpy')
# create Jacobian matrix as sympy expression
J = {}
J['sympy'] = rhs['sympy'].jacobian(Y)
# convert the sympy Jacobian expression to numpy function with usual signature
J['numpy'] = sy.lambdify((t, Y, *parms), J['sympy'])
# calculate rhs fixed points
fixed_points = sy.solve(rhs['sympy'], Y)
# substitute each fixed point in the Jacobian
# and calculate the eigenvalues
J_fixed = {}
for i, fp in enumerate(fixed_points):
J_subs = J['sympy'].subs( [(y, v) for y, v in zip(Y, fp)])
#J_eigenvals = J_subs.eigenvals(multiple=True)
J_eigenvals = J_subs.eigenvals()
# save the fixed point results in more details
# most importantly the eigenvalues and their corresponding multiplicity
J_fixed[i] = {
'fixed point': fp,
'subs': J_subs,
'eigenvalues': list(J_eigenvals.keys()),
'multiplicity': list(J_eigenvals.values())
}
def plot_phase_portrait(ax, rhs, section, args=(), n_points=25):
''' plot section of phase space of a field defined via its rhs '''
# create section grid
x_grid, y_grid = np.meshgrid(
np.linspace( section[0][0], section[0][1], n_points ),
np.linspace( section[1][0], section[1][1], n_points )
)
# calculate rhs on the grid
xx, yy = rhs(None, ( x_grid, y_grid ), *args)
# compute vector norms and make line width proportional to them
# i.e. greater the vector length, the thicker the line
# TODO not sure why rhs returns different shape
vector_norms = np.sqrt(xx[0]**2 + yy[0]**2)
lw = 0.25 + 3*vector_norms/vector_norms.max()
# plot the phase portrait
ax.streamplot(
x_grid, y_grid,
xx[0], yy[0],
linewidth = lw,
arrowsize = 1.2,
density = 1
)
return ax
if __name__ == '__main__':
plot_main()
| 34.392157 | 83 | 0.651511 |
770880f1a07d4982b42b16b52ebec66b0adb1c55 | 1,690 | py | Python | web/accounts/views.py | drejkim/reading-quantified-server | 54cf83629ae0139cbf4b9dc82b27a54056afef36 | [
"MIT"
] | 2 | 2020-10-30T23:46:44.000Z | 2021-02-17T09:11:52.000Z | web/accounts/views.py | estherjk/reading-quantified-server | 54cf83629ae0139cbf4b9dc82b27a54056afef36 | [
"MIT"
] | 7 | 2020-05-09T17:15:51.000Z | 2021-09-22T18:16:55.000Z | web/accounts/views.py | drejkim/reading-quantified-server | 54cf83629ae0139cbf4b9dc82b27a54056afef36 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import mixins
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework.decorators import action
from .models import User
from .serializers import UserSerializer
# Create your views here. | 32.5 | 68 | 0.63432 |
77089cdd70ca47f3aa10526e20e9f8906eab1767 | 2,197 | py | Python | fixit/common/pseudo_rule.py | sk-/Fixit | ee0c2c9699f3cf5557b7f1210447c68be1542024 | [
"Apache-2.0"
] | 313 | 2020-09-02T20:35:57.000Z | 2022-03-29T07:55:37.000Z | fixit/common/pseudo_rule.py | sk-/Fixit | ee0c2c9699f3cf5557b7f1210447c68be1542024 | [
"Apache-2.0"
] | 93 | 2020-09-02T19:51:22.000Z | 2022-01-19T18:29:46.000Z | fixit/common/pseudo_rule.py | sk-/Fixit | ee0c2c9699f3cf5557b7f1210447c68be1542024 | [
"Apache-2.0"
] | 46 | 2020-09-02T21:16:57.000Z | 2022-03-16T18:49:37.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
import io
import tokenize
from pathlib import Path
from typing import Iterable, Optional
from fixit.common.report import BaseLintRuleReport
| 30.513889 | 88 | 0.680018 |
770a2f395758f1a8fbdf72af2cefdb909802a41f | 356 | py | Python | homeschool/referrals/tests/test_models.py | chriswedgwood/homeschool | d5267b13154aaa52c9c3edbf06b251f123583ae8 | [
"MIT"
] | 154 | 2019-12-24T17:45:44.000Z | 2022-03-30T23:03:06.000Z | homeschool/referrals/tests/test_models.py | chriswedgwood/homeschool | d5267b13154aaa52c9c3edbf06b251f123583ae8 | [
"MIT"
] | 397 | 2019-11-05T03:23:45.000Z | 2022-03-31T04:51:55.000Z | homeschool/referrals/tests/test_models.py | chriswedgwood/homeschool | d5267b13154aaa52c9c3edbf06b251f123583ae8 | [
"MIT"
] | 44 | 2020-02-24T13:08:52.000Z | 2022-02-24T05:03:13.000Z | from homeschool.referrals.tests.factories import ReferralFactory
from homeschool.test import TestCase
| 29.666667 | 64 | 0.752809 |
770aad7e1ff56e67c95983849d2bf6bbbc1649fe | 284 | py | Python | slackwebhook/__init__.py | FoundryGroup/Slack-Webhook | 1a71f68eec876684ffaa7ba936bbc099f55dfb81 | [
"MIT"
] | null | null | null | slackwebhook/__init__.py | FoundryGroup/Slack-Webhook | 1a71f68eec876684ffaa7ba936bbc099f55dfb81 | [
"MIT"
] | null | null | null | slackwebhook/__init__.py | FoundryGroup/Slack-Webhook | 1a71f68eec876684ffaa7ba936bbc099f55dfb81 | [
"MIT"
] | null | null | null | ################################################################################
# Python package __init__.py file.
#
# Author: Carl Cortright
# Date: 12/20/2016
#
################################################################################
from slackwebhook import slackwebhook
| 28.4 | 80 | 0.323944 |
770b052dd7eccaa42dd94c9096322a70a4b8491d | 229 | py | Python | scripts/fasta2vcf.py | jodyphelan/pathogenseq | 2e04190f25063d722ef653e819b94eb66407ea8d | [
"MIT"
] | null | null | null | scripts/fasta2vcf.py | jodyphelan/pathogenseq | 2e04190f25063d722ef653e819b94eb66407ea8d | [
"MIT"
] | null | null | null | scripts/fasta2vcf.py | jodyphelan/pathogenseq | 2e04190f25063d722ef653e819b94eb66407ea8d | [
"MIT"
] | 1 | 2018-05-11T14:54:51.000Z | 2018-05-11T14:54:51.000Z | #! /usr/bin/env python
import sys
import pathogenseq as ps
ref_file = sys.argv[1]
query_file = sys.argv[2]
prefix = sys.argv[3]
ps.mauve_call_variants(ref_file,query_file,prefix)
cmd = "bgzip -f %s.vcf" % prefix
ps.run_cmd(cmd)
| 20.818182 | 50 | 0.737991 |
770b263fbdf34c06e41fa87b5529ee3e705b5a07 | 20 | py | Python | test/__init__.py | miguelcarrasco/anothercryptosolver | 57ac6be024574a46492d1e84782ff02763e57010 | [
"MIT"
] | null | null | null | test/__init__.py | miguelcarrasco/anothercryptosolver | 57ac6be024574a46492d1e84782ff02763e57010 | [
"MIT"
] | null | null | null | test/__init__.py | miguelcarrasco/anothercryptosolver | 57ac6be024574a46492d1e84782ff02763e57010 | [
"MIT"
] | null | null | null | __author__ = 'deon'
| 10 | 19 | 0.7 |
770c52f41e079a4cb403bba6dcadc3852fc8a850 | 231 | py | Python | job_scheduler/cache/__init__.py | konkolorado/job-scheduler | e76b24d0592d9d1f62b5a1525b6a152b9983b2fa | [
"MIT"
] | null | null | null | job_scheduler/cache/__init__.py | konkolorado/job-scheduler | e76b24d0592d9d1f62b5a1525b6a152b9983b2fa | [
"MIT"
] | null | null | null | job_scheduler/cache/__init__.py | konkolorado/job-scheduler | e76b24d0592d9d1f62b5a1525b6a152b9983b2fa | [
"MIT"
] | 1 | 2021-08-09T15:28:49.000Z | 2021-08-09T15:28:49.000Z | from job_scheduler.cache.base import ScheduleCache
from job_scheduler.cache.fake import FakeScheduleCache
from job_scheduler.cache.redis import RedisScheduleCache
all = ["ScheduleCache", "RedisScheduleCache", "FakeScheduleCache"]
| 38.5 | 66 | 0.848485 |
770c61ce8220d1f9ab5e398ccfbfd93f6911fe13 | 317 | py | Python | programming/python/ex004.py | Vinicius-Moraes20/personal-projects | c041909ab1c66eeca11768f8f7944eb351c8b8e7 | [
"MIT"
] | null | null | null | programming/python/ex004.py | Vinicius-Moraes20/personal-projects | c041909ab1c66eeca11768f8f7944eb351c8b8e7 | [
"MIT"
] | null | null | null | programming/python/ex004.py | Vinicius-Moraes20/personal-projects | c041909ab1c66eeca11768f8f7944eb351c8b8e7 | [
"MIT"
] | null | null | null | valor = input("Digite algo: ")
print(" do tipo", type(valor))
print("Valor numrico:", valor.isnumeric())
print("Valor Alfa:", valor.isalpha())
print("Valor Alfanumrico:", valor.isalnum())
print("Valor ASCII:", valor.isascii())
print("Valor Decimal", valor.isdecimal())
print("Valor Printavel", valor.isprintable()) | 39.625 | 45 | 0.712934 |
770d1178d917aa0b3ade69999920d0f07b37f63c | 447 | py | Python | backend/src/util/observable.py | r2binx/heimboard | 42059d367e5b15c4910e61f4be0e3b462da8d5f7 | [
"MIT"
] | 6 | 2021-12-20T21:36:03.000Z | 2022-03-30T16:04:54.000Z | backend/src/util/observable.py | r2binx/heimboard | 42059d367e5b15c4910e61f4be0e3b462da8d5f7 | [
"MIT"
] | 16 | 2021-12-20T20:14:43.000Z | 2022-01-26T12:43:59.000Z | backend/src/util/observable.py | r2binx/heimboard | 42059d367e5b15c4910e61f4be0e3b462da8d5f7 | [
"MIT"
] | 1 | 2022-01-25T20:59:35.000Z | 2022-01-25T20:59:35.000Z | from typing import List
| 20.318182 | 48 | 0.621924 |
770d8aff527e695d052230658f4cc6a96df88def | 26,579 | py | Python | ae-tpcc-polyjuice-rl/training/PG.py | derFischer/Polyjuice | 3ce467807822b5659efdd5759cae4563a9152b00 | [
"Apache-2.0"
] | 23 | 2021-05-11T13:14:36.000Z | 2022-03-23T05:59:07.000Z | ae-tpcc-polyjuice-rl/training/PG.py | derFischer/Polyjuice | 3ce467807822b5659efdd5759cae4563a9152b00 | [
"Apache-2.0"
] | 1 | 2021-08-16T07:37:18.000Z | 2021-08-16T07:37:18.000Z | ae-tpcc-polyjuice-rl/training/PG.py | derFischer/Polyjuice | 3ce467807822b5659efdd5759cae4563a9152b00 | [
"Apache-2.0"
] | 1 | 2021-07-01T15:33:25.000Z | 2021-07-01T15:33:25.000Z | #coding=utf-8
import numpy as np
import tensorflow as tf
import os
import sys
import time
import shutil
import re
import signal
import subprocess
import numpy as np
import math
from Policy import *
np.set_printoptions(threshold=np.inf)
BASELINES = 1
| 53.051896 | 177 | 0.637533 |
770d8f29602f5abced8ace8b5ba5e47df2e792c0 | 335 | py | Python | src/data/preprocessors/__init__.py | paulwarkentin/tf-ssd-vgg | f48e3ccbb8eb092d3cb82a9d90164c7328880477 | [
"MIT"
] | 5 | 2021-09-26T07:19:42.000Z | 2022-03-11T23:25:36.000Z | ssd/src/data/preprocessors/__init__.py | bharatmahaur/ComparativeStudy | 2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d | [
"Apache-2.0"
] | null | null | null | ssd/src/data/preprocessors/__init__.py | bharatmahaur/ComparativeStudy | 2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d | [
"Apache-2.0"
] | null | null | null | ##
## /src/data/preprocessors/__init__.py
##
## Created by Paul Warkentin <paul@warkentin.email> on 15/07/2018.
## Updated by Paul Warkentin <paul@warkentin.email> on 15/07/2018.
##
from .bbox_preprocessor import BBoxPreprocessor
from .default_preprocessor import DefaultPreprocessor
from .image_preprocessor import ImagePreprocessor
| 30.454545 | 66 | 0.797015 |
770e96f574a33ca2bee58218e94c93fab61c4349 | 4,775 | py | Python | camera.py | chenhsuanlin/signed-distance-SRN | d47ecca9d048e29adfa7f5b0170d1daba897e740 | [
"MIT"
] | 94 | 2020-10-26T17:32:32.000Z | 2022-03-06T12:22:31.000Z | camera.py | albertotono/signed-distance-SRN | 2e750d3fb71cf7570cf9be9f4a39040b5173795d | [
"MIT"
] | 15 | 2020-10-27T12:48:31.000Z | 2022-01-22T02:29:48.000Z | camera.py | albertotono/signed-distance-SRN | 2e750d3fb71cf7570cf9be9f4a39040b5173795d | [
"MIT"
] | 12 | 2020-10-26T20:26:07.000Z | 2021-12-31T08:13:01.000Z | import numpy as np
import os,sys,time
import torch
import torch.nn.functional as torch_F
import collections
from easydict import EasyDict as edict
import util
pose = Pose()
| 37.598425 | 97 | 0.604188 |
7710dc16a8fbe11c81dbff2a20f74da32953814d | 1,550 | py | Python | solutions/python3/problem1265.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | solutions/python3/problem1265.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | solutions/python3/problem1265.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
1265. Print Immutable Linked List in Reverse
You are given an immutable linked list, print out all values of each node in reverse with the help of the following
interface:
ImmutableListNode: An interface of immutable linked list, you are given the head of the list.
You need to use the following functions to access the linked list (you can't access the ImmutableListNode directly):
ImmutableListNode.printValue(): Print value of the current node.
ImmutableListNode.getNext(): Return the next node.
The input is only given to initialize the linked list internally.
You must solve this problem without modifying the linked list.
In other words, you must operate the linked list using only the mentioned APIs.
Constraints:
The length of the linked list is between [1, 1000].
The value of each node in the linked list is between [-1000, 1000].
Follow up:
Could you solve this problem in:
Constant space complexity?
Linear time complexity and less than linear space complexity?
"""
"""
This is the ImmutableListNode's API interface.
You should not implement it, or speculate about its implementation.
"""
| 29.245283 | 116 | 0.74129 |
771202ad53d30186bb1f539c888cffb5dbe12c2c | 3,403 | py | Python | standard.py | futureisatyourhand/self-supervised-learning | af8b18639c89d138dbc3490827f7fe867d38387b | [
"Apache-2.0"
] | 1 | 2022-02-09T10:14:12.000Z | 2022-02-09T10:14:12.000Z | standard.py | futureisatyourhand/self-supervised-learning | af8b18639c89d138dbc3490827f7fe867d38387b | [
"Apache-2.0"
] | null | null | null | standard.py | futureisatyourhand/self-supervised-learning | af8b18639c89d138dbc3490827f7fe867d38387b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------------------
# @Time : 2021/5/12$ 12:12$
# @Author : Qian Li
# @Email : 1844857573@qq.com
# @File : network.py
# Description : details(i.e., online network,online projector network, online predictor,classifier, target network, target projector,) for self-supervised learning
import torch
from functools import wraps
from torch import nn
import numpy as np
from utils import MLP,ResNet50,accuracy
import copy
from torch.nn import init
from torchvision import models
| 41 | 163 | 0.569498 |
771328ea922df3260ea4280307fa28df861e95c9 | 789 | py | Python | aqualogic/frames.py | mj-sakellaropoulos/aqualogic | 75a4803d36730eb634d4bb31de564e647ed40624 | [
"MIT"
] | null | null | null | aqualogic/frames.py | mj-sakellaropoulos/aqualogic | 75a4803d36730eb634d4bb31de564e647ed40624 | [
"MIT"
] | null | null | null | aqualogic/frames.py | mj-sakellaropoulos/aqualogic | 75a4803d36730eb634d4bb31de564e647ed40624 | [
"MIT"
] | null | null | null | from enum import Enum, unique
| 30.346154 | 78 | 0.712294 |
77135615dccca76a8c5274c97ffda5de511d3e32 | 87 | py | Python | Python/Sum/main.py | drtierney/hyperskill-problems | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | [
"MIT"
] | 5 | 2020-08-29T15:15:31.000Z | 2022-03-01T18:22:34.000Z | Python/Sum/main.py | drtierney/hyperskill-problems | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | [
"MIT"
] | null | null | null | Python/Sum/main.py | drtierney/hyperskill-problems | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | [
"MIT"
] | 1 | 2020-12-02T11:13:14.000Z | 2020-12-02T11:13:14.000Z | num1 = input()
num2 = input()
num3 = input()
print(int(num1) + int(num2) + int(num3))
| 14.5 | 40 | 0.609195 |
77139d03885bd7af5b622aa37432a424a7f5a2fe | 5,525 | py | Python | Python/scheduledEventsInteractiveTool.py | Azure-Samples/virtual-machines-python-scheduled-events-central-logging | d9028f296e4b78eb449e295b4e72a9204da84dcf | [
"MIT"
] | 7 | 2017-04-20T03:09:10.000Z | 2021-02-08T17:07:54.000Z | Python/scheduledEventsInteractiveTool.py | Azure-Samples/virtual-machines-python-scheduled-events-central-logging | d9028f296e4b78eb449e295b4e72a9204da84dcf | [
"MIT"
] | 8 | 2017-04-19T17:57:48.000Z | 2017-04-21T18:31:44.000Z | Python/scheduledEventsInteractiveTool.py | Azure-Samples/virtual-machines-python-scheduled-events-central-logging | d9028f296e4b78eb449e295b4e72a9204da84dcf | [
"MIT"
] | 4 | 2017-04-19T17:33:50.000Z | 2021-02-10T11:21:01.000Z | #!/usr/bin/python
import json
import socket
import sys
import getopt
import logging
from enum import Enum
from datetime import datetime
import base64
import hmac
import hashlib
import time
import urllib.request
import urllib.parse
import configparser
metadata_url = 'http://169.254.169.254/metadata/scheduledevents?api-version=2017-03-01'
headers = {'Metadata': 'true'}
this_host = socket.gethostname()
log_format = '%(asctime)s [%(levelname)s] %(message)s'
logger = logging.getLogger('example')
logging.basicConfig(format=log_format, level=logging.DEBUG)
config_key_endpoint = 'Endpoint'
config_key_shared_access_key_name = 'SharedAccessKeyName'
config_key_shared_access_key = 'SharedAccessKey'
config_key_entity_path = 'EntityPath'
encoding = 'utf-8'
if __name__ == '__main__':
main()
sys.exit(0)
| 36.833333 | 97 | 0.673122 |
7714068c84e56c46ce9cbe59a4ed57f2565d3970 | 1,750 | py | Python | E2E_TOD/config.py | kingb12/pptod | 4cc920494b663c5352a507ed1e32f1e2509a8c93 | [
"Apache-2.0"
] | 54 | 2021-10-02T13:31:09.000Z | 2022-03-25T03:44:54.000Z | E2E_TOD/config.py | programmeddeath1/pptod | 52d26ddc7b917c86af721e810a202db7c7d3b398 | [
"Apache-2.0"
] | 8 | 2021-11-10T06:05:20.000Z | 2022-03-25T03:27:29.000Z | E2E_TOD/config.py | programmeddeath1/pptod | 52d26ddc7b917c86af721e810a202db7c7d3b398 | [
"Apache-2.0"
] | 14 | 2021-10-02T13:31:01.000Z | 2022-03-27T15:49:33.000Z | import logging, time, os
| 42.682927 | 91 | 0.645714 |
77147ffa79f630a4609f9a112ce607e6646e1ea3 | 6,438 | py | Python | advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 6 | 2020-01-28T22:54:35.000Z | 2022-02-10T00:44:46.000Z | advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 8 | 2020-12-14T15:49:24.000Z | 2022-03-23T18:38:36.000Z | from __future__ import print_function
import time
import sys
import os
import shutil
import csv
import boto3
from awsglue.utils import getResolvedOptions
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql.functions import *
from mleap.pyspark.spark_support import SimpleSparkSerializer
if __name__ == "__main__":
main()
| 42.92 | 225 | 0.694626 |
7714bae382cfe5335e914024d6f5ee9028364bc3 | 1,350 | py | Python | response/response.py | benyamin-7/simple-snmp-collector | f21dc75bc2a28af0ce1c881837166d0034cac213 | [
"MIT"
] | null | null | null | response/response.py | benyamin-7/simple-snmp-collector | f21dc75bc2a28af0ce1c881837166d0034cac213 | [
"MIT"
] | null | null | null | response/response.py | benyamin-7/simple-snmp-collector | f21dc75bc2a28af0ce1c881837166d0034cac213 | [
"MIT"
] | null | null | null | from datetime import datetime
__author__ = 'aGn'
__copyright__ = "Copyright 2018, Planet Earth"
| 24.107143 | 86 | 0.474074 |
77174314400427e0f14a7aea762b47ab497d31f3 | 1,399 | py | Python | properjpg/filesmanager.py | vitorrloureiro/properjpg | 4d68e4b9dc930f829d6f67b1d68e1018bdf6f87e | [
"MIT"
] | 3 | 2022-02-16T14:38:25.000Z | 2022-02-18T12:20:19.000Z | properjpg/filesmanager.py | vitorrloureiro/properjpg | 4d68e4b9dc930f829d6f67b1d68e1018bdf6f87e | [
"MIT"
] | 2 | 2022-02-21T05:54:14.000Z | 2022-02-23T14:14:29.000Z | properjpg/filesmanager.py | vitorrloureiro/properjpg | 4d68e4b9dc930f829d6f67b1d68e1018bdf6f87e | [
"MIT"
] | null | null | null | import mimetypes
import os
from pathlib import Path
def ignore_files(dir: str, files: list[str]):
"""
Returns a list of files to ignore.
To be used by shutil.copytree()
"""
return [f for f in files if Path(dir, f).is_file()]
| 27.98 | 86 | 0.623302 |
77176f91a315883bc70d79d05e8925871389967c | 3,117 | py | Python | mcoc/cdt_core/fetch_data.py | sumitb/mcoc-v3 | 93fa5d9d9b28541d19969765b6186072f0d747e7 | [
"MIT"
] | null | null | null | mcoc/cdt_core/fetch_data.py | sumitb/mcoc-v3 | 93fa5d9d9b28541d19969765b6186072f0d747e7 | [
"MIT"
] | null | null | null | mcoc/cdt_core/fetch_data.py | sumitb/mcoc-v3 | 93fa5d9d9b28541d19969765b6186072f0d747e7 | [
"MIT"
] | null | null | null | from ..abc import MixinMeta
import json
import re
def bcg_recompile(str_data):
"""Scrape out the color decorators from Kabam JSON file"""
hex_re = re.compile(r'\[[0-9a-f]{6,8}\](.+?)\[-\]', re.I)
return hex_re.sub(r'**\1**', str_data)
| 35.827586 | 85 | 0.5624 |
77196d4e2e1432027536633a3f1233790aa78b63 | 7,175 | py | Python | evaluate_network_example.py | VU-BEAM-Lab/DNNBeamforming | e8ee8c1e57188a795816b119279ac2e60e5c5236 | [
"Apache-2.0"
] | 1 | 2021-04-12T19:52:43.000Z | 2021-04-12T19:52:43.000Z | evaluate_network_example.py | VU-BEAM-Lab/DNNBeamforming | e8ee8c1e57188a795816b119279ac2e60e5c5236 | [
"Apache-2.0"
] | null | null | null | evaluate_network_example.py | VU-BEAM-Lab/DNNBeamforming | e8ee8c1e57188a795816b119279ac2e60e5c5236 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Jaime Tierney, Adam Luchies, and Brett Byram
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the license at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# INSTALL NECESSARY PACKAGES PRIOR TO RUNNING THIS NOTEBOOK
# (SEE README FOR INSTRUCTIONS)
# pytorch
# jupyter
# numpy
# scipy
# matplotlib
# pandas
# h5py
# IMPORT PYTHON PACKAGES
import torch
import os
import numpy as np
from torch import nn
import time
import argparse
import sys
import h5py
from scipy.io import loadmat
from scipy.io import savemat
from scipy.signal import hilbert
import matplotlib.pyplot as plt
# IMPORT FUNCTIONS FROM PROVIDED SOURCE CODE
sys.path.insert(0,'src')
from utils import read_model_params
from model import FullyConnectedNet
# In[ ]:
# SPECIFY PATH TO MODEL (THIS IS ALSO OUTPUT PATH)
model_path = 'models/model_1/k_8/'
# LOAD IN MODEL PARAMS
model_params = read_model_params(model_path+'model_params.txt')
# PROVIDE TEST DATA FILE INFO
test_data_path = 'test_data/'
test_data_name = 'chandat_phantom_5mm_70mm'
# In[ ]:
# SPECIFY CUDA AVAILABILITY
print('torch.cuda.is_available(): ' + str(torch.cuda.is_available()))
if model_params['cuda'] and torch.cuda.is_available():
print('Using ' + str(torch.cuda.get_device_name(0)))
else:
print('Not using CUDA')
model_params['cuda']=False
device = torch.device("cuda:0" if model_params['cuda'] else "cpu")
# In[ ]:
# LOAD IN THE TEST DATA AND REFORMAT FOR NETWORK PROCESSING
# load in delayed RF channel data
f = h5py.File(os.path.join(test_data_path,test_data_name+'.mat'),'r')
rf_data = np.asarray(f['chandat'])
f.close()
# get dimension info
[N_beams,N_elements,N_depths] = rf_data.shape
# get analytic data
analytic_data = hilbert(rf_data,axis=2)
del rf_data
# switch depth and channel axes
analytic_data = np.moveaxis(analytic_data,1,2)
# concatenate real and imaginary components into data variable
data_real = np.real(analytic_data)
data_imag = np.imag(analytic_data)
data = np.concatenate([data_real,data_imag],axis=2)
del analytic_data
# get conventional DAS B-mode data
env = np.sqrt(np.power(np.sum(data_real,axis=2),2)+
np.power(np.sum(data_imag,axis=2),2))
bmode = 20*np.log10(env)
del data_real, data_imag
# reshape data to flatten depth and beam axes
data = np.reshape(data,[N_beams*N_depths,2*N_elements])
# normalize data by L1 norm
data_norm = np.linalg.norm(data,ord=np.inf,axis=1)
data = data / data_norm[:,np.newaxis]
# load data into pytorch and onto gpu
data = torch.from_numpy(data).float()
data = data.to(device)
# In[ ]:
# PASS TEST DATA THROUGH NETWORK
# start timer
t0 = time.time()
# load the model
model = FullyConnectedNet(input_dim=model_params['input_dim'],
output_dim=model_params['output_dim'],
layer_width=model_params['layer_width'],
dropout=model_params['dropout'],
dropout_input=model_params['dropout_input'],
num_hidden=model_params['num_hidden'],
starting_weights=None,
batch_norm_enable=model_params['batch_norm_enable'])
print('Loading weights from: ' + str(os.path.join(model_params['save_dir'], 'model.dat')))
model.load_state_dict(torch.load(os.path.join(model_params['save_dir'],
'model.dat'), map_location='cpu'))
model.eval()
model = model.to(device)
# process test data with the model
with torch.set_grad_enabled(False):
data_dnn = model(data).to('cpu').data.numpy()
# stop timer
print('Processing time: {:.2f}'.format(time.time()-t0))
# clear the model and input data
del model, data
# In[ ]:
# REFORMAT PROCESSED TEST DATA
# scale back
data_dnn = data_dnn * data_norm[:,np.newaxis]
# unflatten depth and beam axes
data_dnn = np.reshape(data_dnn,[N_beams,N_depths,2*N_elements])
# split up real and imaginary
data_dnn_real = data_dnn[:,:,0:N_elements]
data_dnn_imag = data_dnn[:,:,N_elements:2*N_elements]
# get DNN beamformer B-mode data
env_dnn = np.sqrt(np.power(np.sum(data_dnn_real,axis=2),2)+
np.power(np.sum(data_dnn_imag,axis=2),2))
bmode_dnn = 20*np.log10(env_dnn)
# In[ ]:
# MAKE IMAGES AND COMPUTE IMAGE QUALITY METRICS
# load in params file
f = h5py.File(os.path.join(test_data_path,test_data_name+'_params.mat'),'r')
beam_position_x = np.asarray(f['beam_position_x'])
t = np.asarray(f['t'])
fs = np.asarray(f['fs'])
c = np.asarray(f['c'])
mask_in = np.asarray(f['mask_in'])
mask_out = np.asarray(f['mask_out'])
f.close()
depths = t/fs*c/2
# make DAS image
bmode_scaled = bmode - np.max(bmode)
fig,axs = plt.subplots(nrows=1,ncols=2,sharey=True)
das_img=axs[0].imshow(np.moveaxis(bmode_scaled,0,1),cmap='gray',
aspect='equal',vmin=-60,vmax=0,
extent=[beam_position_x[0][0]*1000,
beam_position_x[-1][0]*1000,
depths[0][-1]*1000,
depths[0][0]*1000])
axs[0].set_title('DAS')
axs[0].set_ylabel('Depth (mm)')
axs[0].set_xlabel('Lateral Pos. (mm)')
fig.colorbar(das_img,ax=axs[0])
# make DNN image
bmode_dnn_scaled = bmode_dnn - np.max(bmode_dnn)
dnn_img=axs[1].imshow(np.moveaxis(bmode_dnn_scaled,0,1),cmap='gray',
aspect='equal',vmin=-60,vmax=0,
extent=[beam_position_x[0][0]*1000,
beam_position_x[-1][0]*1000,
depths[0][-1]*1000,
depths[0][0]*1000])
axs[1].set_title('DNN')
axs[1].set_xlabel('Lateral Pos. (mm)')
# add colorbar and save figure
fig.colorbar(dnn_img,ax=axs[1])
fig.savefig(os.path.join(model_path,test_data_name+'_result.png'))
# find indicies corresponding to inside and outside of lesion
idx_in = np.where(mask_in==1)
idx_out = np.where(mask_out==1)
# compute mean and variance for DAS
mean_in = np.mean(env[idx_in])
mean_out = np.mean(env[idx_out])
var_in = np.var(env[idx_in])
var_out = np.var(env[idx_out])
# compute mean and variance for DNN
mean_in_dnn = np.mean(env_dnn[idx_in])
mean_out_dnn = np.mean(env_dnn[idx_out])
var_in_dnn = np.var(env_dnn[idx_in])
var_out_dnn = np.var(env_dnn[idx_out])
# compute image quality metrics
CNR = 20*np.log10(np.abs(mean_in-mean_out)/np.sqrt(var_in+var_out))
CNR_DNN = 20*np.log10(np.abs(mean_in_dnn-mean_out_dnn)/
np.sqrt(var_in_dnn+var_out_dnn))
CR = -20*np.log10(np.abs(mean_in/mean_out))
CR_DNN = -20*np.log10(np.abs(mean_in_dnn/mean_out_dnn))
print('CNR DAS: {:.2f}'.format(CNR))
print('CNR DNN: {:.2f}'.format(CNR_DNN))
print('CR DAS: {:.2f}'.format(CR))
print('CR DNN: {:.2f}'.format(CR_DNN))
# In[ ]:
| 28.137255 | 90 | 0.683902 |
771ab20147dc0551086f34101e79824ead557fa2 | 4,392 | py | Python | nexus_constructor/geometry/slit/slit_geometry.py | ess-dmsc/nexus-geometry-constructor | c4d869b01d988629a7864357b8fc2f49a0325111 | [
"BSD-2-Clause"
] | null | null | null | nexus_constructor/geometry/slit/slit_geometry.py | ess-dmsc/nexus-geometry-constructor | c4d869b01d988629a7864357b8fc2f49a0325111 | [
"BSD-2-Clause"
] | 62 | 2018-09-18T14:50:34.000Z | 2019-02-05T15:43:02.000Z | nexus_constructor/geometry/slit/slit_geometry.py | ess-dmsc/nexus-geometry-constructor | c4d869b01d988629a7864357b8fc2f49a0325111 | [
"BSD-2-Clause"
] | null | null | null | from typing import List
from PySide2.QtGui import QVector3D
from nexus_constructor.common_attrs import SHAPE_GROUP_NAME, CommonAttrs
from nexus_constructor.model.component import Component
from nexus_constructor.model.geometry import OFFGeometryNoNexus
| 33.784615 | 87 | 0.523452 |
771bb5f41967c5159144e1d6ef84a2f513ef5409 | 5,029 | py | Python | part4/test.py | willogy-team/insights--tensorflow | 2d4885c99e7b550e94d679bed1f192f62f7e4139 | [
"MIT"
] | null | null | null | part4/test.py | willogy-team/insights--tensorflow | 2d4885c99e7b550e94d679bed1f192f62f7e4139 | [
"MIT"
] | null | null | null | part4/test.py | willogy-team/insights--tensorflow | 2d4885c99e7b550e94d679bed1f192f62f7e4139 | [
"MIT"
] | null | null | null | import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from visualizations.manual_plot_by_matplotlib import plot_filters_of_a_layer
from visualizations.manual_plot_by_matplotlib import plot_feature_maps_of_a_layer, plot_feature_maps_of_multiple_layers
from visualizations.automatic_plot_by_tf_keras_vis import plot_activation_maximization_of_a_layer
from visualizations.automatic_plot_by_tf_keras_vis import plot_vanilla_saliency_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_smoothgrad_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_plusplus_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_scorecam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_faster_scorecam_of_a_model
ap = argparse.ArgumentParser()
ap.add_argument("-trd", "--train_dir", required=True, help="Path to dataset train directory")
ap.add_argument("-mdp", "--model_path", required=True, help="Path to the folder for saving checkpoints")
args = vars(ap.parse_args())
model = create_model()
checkpoint_path = os.path.join(args["model_path"], 'models')
model.load_weights(checkpoint_path)
for idx, layer in enumerate(model.layers):
print('[*] layer: ', layer)
if 'conv' not in layer.name:
print('No')
continue
filters_weights, biases_weights = layer.get_weights()
print('[**] id: {}, layer.name: {}, filters_weights.shape: {}, biases_weights.shape: {}'.format(idx, layer.name, filters_weights.shape, biases_weights.shape))
print('[**] layer.output.shape: {}'.format(layer.output.shape))
filters_max, filters_min = filters_weights.max(), filters_weights.min()
filters_weights = (filters_weights - filters_min)/(filters_max - filters_min)
print('[**] filters_weights: ', filters_weights)
plot_filters_of_a_layer(filters_weights, 3)
# === Output feature maps from a single layer ===
# A PIL object
img = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
# Convert to numpy array
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
# img = model.preprocess_input(img)
img = img/255
model_1 = Model(inputs=model.inputs, outputs=model.layers[0].output)
feature_maps_1 = model_1.predict(img)
print('[*] feature_maps_1.shape: ', feature_maps_1.shape)
plot_feature_maps_of_a_layer(feature_maps_1)
# === Output feature maps from multiple layers ===
list_of_outputs = [model.layers[idx].output for idx in range(3)]
model_2 = Model(inputs=model.inputs, outputs=list_of_outputs)
model_2.summary()
feature_maps_2 = model_2.predict(img)
for feature_map in feature_maps_2:
print('[*] feature_map.shape: ', feature_map.shape)
plot_feature_maps_of_multiple_layers(feature_maps_2)
# === Output activation maximization from a single layer ===
plot_activation_maximization_of_a_layer(model, 2)
# === GradCam++ from a single layer ===
# plot_gradcam_plusplus_of_a_layer(model, 2)
# === Attentions ===
image_titles = ['Chihuahua', 'Japanese_spaniel', 'Maltese_dog']
img1 = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
img2 = load_img(os.path.join(args["train_dir"], 'n02085782-Japanese_spaniel', 'n02085782_2874.jpg'), target_size=(128, 128))
img3 = load_img(os.path.join(args["train_dir"], 'n02085936-Maltese_dog', 'n02085936_4245.jpg'), target_size=(128, 128))
img1 = np.asarray(img1)
img2 = np.asarray(img2)
img3 = np.asarray(img3)
images = np.asarray([img1, img2, img3])
X = images/255
## Vanilla saliency
print('[*] Vanilla saliency')
plot_vanilla_saliency_of_a_model(model, X, image_titles)
## SmoothGrad
print('[*] SmoothGrad')
plot_smoothgrad_of_a_model(model, X, image_titles)
## GradCAM
print('[*] GradCAM')
plot_gradcam_of_a_model(model, X, image_titles, images)
## GradCAM++
print('[*] GradCAM++')
plot_gradcam_plusplus_of_a_model(model, X, image_titles, images)
## ScoreCAM
print('[*] ScoreCam')
plot_scorecam_of_a_model(model, X, image_titles, images)
## Faster-ScoreCAM
print('[*] Faster-ScoreCAM')
plot_faster_scorecam_of_a_model(model, X, image_titles, images) | 39.912698 | 162 | 0.766156 |
771d0991f9537430f57ccbbc794e519d04ca435c | 5,149 | py | Python | tlg_bot.py | macrergate/PIK_monitor | 06f337d9b07c63619f3d6bbed0bbac03a6db87b3 | [
"MIT"
] | null | null | null | tlg_bot.py | macrergate/PIK_monitor | 06f337d9b07c63619f3d6bbed0bbac03a6db87b3 | [
"MIT"
] | null | null | null | tlg_bot.py | macrergate/PIK_monitor | 06f337d9b07c63619f3d6bbed0bbac03a6db87b3 | [
"MIT"
] | null | null | null | import telegram
from flatten_dict import flatten
import os
import time
import datetime
from pik import PIKData
from helpers import hash_vals, dump_data, load_data, compare
if __name__ == '__main__':
folder = os.environ.get('DATA_DIR', 'data')
mode = os.environ.get('MODE', 'single')
delay = int(os.environ.get('DELAY', 600))
credentials_json = os.path.join(folder, 'credentials.json')
credentials = Credentials(credentials_json)
checker = Checker(credentials, folder, silent = False)
if mode == 'single':
checker.check()
elif mode == 'loop':
while True:
checker.check()
print("Wait {} sec.".format(delay))
time.sleep(delay)
| 34.557047 | 105 | 0.571373 |
771d3fa0c3bd43d72d1bdf5d1c6f1888cb0021be | 15,025 | py | Python | CopyrightHeaderChecker.py | medazzo/CopyRitghHeaderChecker- | 320642ebd9216338820b6876519e9fae69252dd7 | [
"MIT"
] | 2 | 2019-01-07T14:42:44.000Z | 2019-01-07T14:42:46.000Z | CopyrightHeaderChecker.py | medazzo/CopyRightHeaderChecker | 320642ebd9216338820b6876519e9fae69252dd7 | [
"MIT"
] | null | null | null | CopyrightHeaderChecker.py | medazzo/CopyRightHeaderChecker | 320642ebd9216338820b6876519e9fae69252dd7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# @author Mohamed Azzouni , Paris, France
#
import os
import time
import ntpath
import sys
import json
import argparse
from os.path import join, getsize
from shutil import copyfile
behaviour = """{
"reporting": true ,
"updatefiles": true ,
"excludeDirs" :[".git",".repo"],
"shebang":
{
"she":["#!/","#!/bin","#!/usr/bin"],
"check": true
},
"oldCopyright":
{
"lookforandwarn": true,
"forceNewCopyright": false,
"numberofline":6
},
"checks":
[
{
"brief":"C/C++ Code",
"extensions":[".c",".cpp",".h",".hpp"],
"names":[],
"copyright":[
"/// @author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$",
"/// ",
"/// @copyright $$CompanyYear$$ $$CompanyName$$",
"/// All rights exclusively reserved for $$CompanyName$$,",
"/// unless otherwise expressly agreed",
""]
},
{
"brief":"bash/scripting Code",
"extensions":[".conf",".conf.sample",".bb",".inc",".service",".sh",".cfg",".m4" ,".init",".py",".pl"],
"names":["init","run-ptest","llvm-config","build-env-set","init-build-env","setup-build-env","Dockerfile"],
"copyright":[
"# @author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$",
"#",
"# @copyright $$CompanyYear$$ $$CompanyName$$",
"# All rights exclusively reserved for $$CompanyName$$,",
"# unless otherwise expressly agreed",
""]
},
{
"brief":"html/js Code",
"extensions":[".html"],
"names":[],
"copyright":[
"<!-- @author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$ -->",
"<!-- -->",
"<!-- @copyright $$CompanyYear$$ $$CompanyName$$ -->",
"<!-- All rights exclusively reserved for $$CompanyName$$ , -->",
"<!-- unless otherwise expressly agreed -->",
""]
},
{
"brief":"Markdown Code",
"extensions":[".md"],
"names":[],
"copyright":[
"[comment]: <> (@author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$ )",
"[comment]: <> ( )",
"[comment]: <> (@copyright $$CompanyYear$$ $$CompanyName$$ )",
"[comment]: <> (All rights exclusively reserved for $$CompanyName$$, )",
"[comment]: <> (unless otherwise expressly agreed )",
""]
}
]
}"""
# Define
Debug = False
Outputfolder=""
Rbehaviour = json.loads(behaviour)
filesAlreadyCopyright = []
# Parameters :
# --dumpShebang : : dump the current list of managed shebang
# --dumpExtension : : dump the current list of managed files extensions
# -r --report [default: False]: if true print a complete report for what has done
# -u --update [default: False]: if true files will be updated else a modified copy will be generated
# -w --warnOldHeader [default: False]: if true do warn about Old Header existant in files in traces
# -f --forceOldHeader [default: False]: if true do replace old header if exist (exclusif with option warnOldHeader )
# -n --nameCompany : : string
# -a --adressCompany : : string
# -c --countryCompany : : string
# -y --yearCompany : : string
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Find all concerned Files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def SetupParserParameter( ):
""" this functions will setup parameter and parser for argument"""
parser = argparse.ArgumentParser(description='Checks sources code files for Copyright Header and add ours.',
prog='CopyrightHeaderChecker')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('--verbose', action='store_true', help='verbose mode ')
subparsers = parser.add_subparsers(help='sub command :')
parser_info = subparsers.add_parser('info', help='get checker informations ')
parser_info.add_argument('-s','--dumpShebang', dest='dumpShebang',action='store_true',
help='dump the current list of managed shebang')
parser_info.add_argument('-e', '--dumpExtension', dest='dumpExtension',action='store_true',
help='dump the current list of managed files extensions')
parser_process = subparsers.add_parser('process', help='process checker')
parser_process.add_argument('-r','--report', dest='report',action='store_true',
help='print a detailled report for what has done')
parser_process.add_argument('-u','--update', dest='update',action='store_true',
help='update files in sources path')
parser_process.add_argument('-w','--warnOldHeader', dest='warnOldHeader',action='store_false',
help='warn about Old Header existant in files in traces ')
parser_process.add_argument('-f','--forceOldHeader', dest='forceOldHeader',action='store_true',
help='replace old header if exist in files ')
parser_process.add_argument('-n','--nameCompany', dest='nameCompany',required=True,
help='company name to be used in copyright header')
parser_process.add_argument('-a','--adressCompany', dest='adressCompany',required=True,
help='company address to be used in copyright header')
parser_process.add_argument('-c','--countryCompany', dest='countryCompany',required=True,
help='company country to be used in copyright header')
parser_process.add_argument('-y','--yearCompany', dest='yearCompany',required=True,
help='years to be used in copyright header ')
parser_process.add_argument('-i','--inputSourecCodeFolder', dest='inputFolder',required=True,
help='path to folder containing source code to operate on')
args = parser.parse_args()
return args
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Find all concerned Files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def FindFiles(rootfolder, report ):
""" this functions will find files as defined up """
start = time.time()
for bhv in Rbehaviour["checks"]:
bhv["files"]=[]
for root, dirs,files in os.walk(rootfolder):
dirs[:] = [d for d in dirs if d not in Rbehaviour["excludeDirs"]]
for x in files :
sfileN = os.path.join(root, x)
if Debug : print(' ==> Checking file --> {}', format(sfileN))
# check old copyright
if Rbehaviour["oldCopyright"]["lookforandwarn"]:
if checkfileCopyright(sfileN):
filesAlreadyCopyright.append(sfileN)
if not Rbehaviour["oldCopyright"]["forceNewCopyright"]:
break
# checks
found = False
for bhv in Rbehaviour["checks"]:
# Check if file is in names
try:
bhv["names"].index(x)
except :
# Check if file is in extensions
if Debug :
print bhv["brief"]," extensions ==> Checking file --> ",
for x in bhv["extensions"]:
print x,
print " "
for ext in bhv["extensions"] :
if x.endswith(ext):
bhv["files"].append(sfileN)
if Debug :
print bhv["brief"]," >> ",ext," extensions ==> Found file --> ",x
found = True
break
else:
bhv["files"].append(sfileN)
found = True
if Debug : print ("{} names ==> Found file -->",format(bhv["brief"],x))
if found:
break
end = time.time()
took = end - start
if(report):
print " - - - - - - Analyse ",bhv['brief']," took %.4f sec - - - - - - "% took
for bhv in Rbehaviour["checks"]:
print " - - - - - - ",len(bhv["files"])," ",bhv["brief"]," files."
if (Rbehaviour["oldCopyright"]["lookforandwarn"]):
print " - - - - - - ! ",len(filesAlreadyCopyright)," files are already with a Copyright Headers :"
for x in filesAlreadyCopyright:
print " - ",x
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# for Sfiles check shebang
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def checkfileShebang(filename):
""" return true if file has a shebang """
if Rbehaviour["shebang"]["check"]:
if Debug : print(" Will check shebang .. " )
infile = open(filename, 'r')
firstLine = infile.readline()
infile.close()
for she in Rbehaviour["shebang"]["she"]:
if Debug : print("?? did file ",filename," start with ",she ," [",firstLine,"] " )
if firstLine.startswith(she):
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# To check if file contain already a License Copyright Header
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def checkfileCopyright(filename):
""" return true if file has already a Copyright in first X lines """
infile = open(filename, 'r')
for x in xrange(6):
x = x
line = infile.readline()
if "Copyright" in line or "copyright" in line:
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Apply new Copyright to a file
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ApplyCopyright( srcfile, dstfile , copyright, cname, ccontry, caddress, cyear):
""" will apply new Copyright on dst file then append the old src file """
# apply comany information
copyright = [w.replace('$$CompanyName$$', cname) for w in copyright]
copyright = [w.replace('$$CompanyCountry$$', ccontry) for w in copyright]
copyright = [w.replace('$$CompanyAddress$$', caddress) for w in copyright]
copyright = [w.replace('$$CompanyYear$$', cyear) for w in copyright]
if(srcfile != dstfile):
# create dir file if not exist
nbase = os.path.dirname(dstfile)
if not os.path.exists(nbase):
os.makedirs(nbase)
dst = open(dstfile, "w")
else:
tmp = "/tmp/tmp-fheadercopyrightLicense"
dst = open(tmp, "w")
isSheb = checkfileShebang(srcfile)
src = open(srcfile, "r")
if isSheb:
line = src.readline()
dst.write(line)
for cop in copyright:
dst.write(cop)
dst.write('\n')
# continue copy src file
while line:
line = src.readline()
dst.write(line)
else:
if Debug : print(" \t ==> file ",srcfile," DONT have shebang !" )
for cop in copyright:
dst.write(cop)
dst.write('\n')
dst.write(src.read())
dst.close()
src.close()
if(srcfile == dstfile):
copyfile(tmp, dstfile)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# To apply new Copyright headers in files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ApplyInTmp(OutDir,report, cname, ccontry, caddress, cyear):
""" will apply new Copyright on array of files into OutDir with Same tree as original """
global Outputfolder
# checks
for bhv in Rbehaviour["checks"]:
start = time.time()
for x in bhv["files"] :
# fix folder
p = os.path.dirname(x)
while p.startswith('../'):
p = p[3:]
if p.startswith('/'):
p = p[1:]
Outputfolder = OutDir+"/"+p
nfile = Outputfolder+"/"+ntpath.basename(x)
ApplyCopyright(x, nfile, bhv["copyright"], cname, ccontry, caddress, cyear)
end = time.time()
took = end - start
if(report):
print " - - - - - - Applying ",bhv['brief']," took %.4f sec - - - - - - "% took
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# To apply new Copyright headers in files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ApplyIn(report, cname, ccontry, caddress, cyear):
""" will apply new Copyright on array of files into original Dir"""
# checks
for bhv in Rbehaviour["checks"]:
start = time.time()
for x in bhv["files"] :
ApplyCopyright(x, x, bhv["copyright"], cname, ccontry, caddress, cyear)
end = time.time()
took = end - start
if(report):
print" - - - - - - Applying ",bhv['brief']," took %.4f sec - - - - - - "% took
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # M A I N # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
print("- - - - - - - - - - - - - - - - - - Copyright Header - - - - - - - - - - - - - - - - - - - - -")
args = SetupParserParameter()
Debug = args.verbose
if "dumpShebang" in args:
print("- - - - - - - Info - - - - - - ->")
if(args.dumpShebang == True):
print " Supportted shebang: ",
for x in Rbehaviour["shebang"]["she"]:
print x,
print " "
if(args.dumpExtension == True):
print " Supportted Extensions: "
for bhv in Rbehaviour["checks"]:
print " ",
print bhv["brief"]," : ",
for x in bhv["extensions"]:
print x,
print " "
else:
if not os.path.exists(args.inputFolder):
print(" - - - Bad parameter , source code path !! => ",args.inputFolder)
print(" - - - folder source did not exist ! - - - ")
exit(-2)
print("- - - - - - - Analyse - - - - - - ->")
FindFiles(args.inputFolder, args.report)
print("- - - - - - - Process - - - - - - ->")
if ( args.update == True):
ApplyIn(args.report,args.nameCompany, args.countryCompany, args.adressCompany, args.yearCompany)
else:
ApplyInTmp("/tmp", args.report, args.nameCompany, args.countryCompany, args.adressCompany, args.yearCompany)
print " Generated ", Outputfolder
print("<- - - - - - - Done - - - - - - - - - -")
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # D O N E # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
| 43.175287 | 122 | 0.493178 |
771d6750899b13f63733f55154de5c6a095ec756 | 2,132 | py | Python | PYTHON/singly_linked_list.py | ceccs17d55/open-source-contribution | 63d95a990cdcc1e31c5fca3cb61f2fa34dae9e1f | [
"MIT"
] | 2 | 2022-03-10T17:37:24.000Z | 2022-03-10T17:40:05.000Z | PYTHON/singly_linked_list.py | ceccs17d55/open-source-contribution | 63d95a990cdcc1e31c5fca3cb61f2fa34dae9e1f | [
"MIT"
] | 1 | 2021-10-03T19:52:07.000Z | 2021-10-03T19:52:07.000Z | PYTHON/singly_linked_list.py | ceccs17d55/open-source-contribution | 63d95a990cdcc1e31c5fca3cb61f2fa34dae9e1f | [
"MIT"
] | 1 | 2021-10-04T17:22:09.000Z | 2021-10-04T17:22:09.000Z |
# Nodes: 4 -> 5 -> 7 -> 2
link = LinkedList()
link.head = Node(4)
first_node = Node(5)
second_node = Node(7)
third_node = Node(2)
link.head.next = first_node
first_node.next = second_node
second_node.next = third_node
link.print_list()
# Nodes: 4 -> 5 -> 7 -> 2
# Insert 3 at index 2
# Nodes: 4 -> 5 -> 3 -> 7 -> 2
link.insert_node(3, 2)
link.print_list()
# Nodes: 4 -> 5 -> 3 -> 7 -> 2
# Delete 3
# Nodes: 4 -> 5 -> 7 -> 2
link.delete_node(3)
link.print_list()
| 20.304762 | 61 | 0.533771 |
771de5725155e6d31fa7d7b90220c29436ed35b2 | 22,048 | py | Python | addons/odoo_marketplace/models/res_config.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | addons/odoo_marketplace/models/res_config.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | addons/odoo_marketplace/models/res_config.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | 1 | 2021-05-05T07:59:08.000Z | 2021-05-05T07:59:08.000Z | # -*- coding: utf-8 -*-
#################################################################################
# Author : Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# Copyright(c): 2015-Present Webkul Software Pvt. Ltd.
# License URL : https://store.webkul.com/license.html/
# All Rights Reserved.
#
#
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#
# You should have received a copy of the License along with this program.
# If not, see <https://store.webkul.com/license.html/>
#################################################################################
from odoo import models, fields, api, _
from odoo.tools.translate import _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
| 73.249169 | 186 | 0.73408 |
771e1c4b8e1935e576368e845f369c110a609b20 | 18,274 | py | Python | igf_data/utils/tools/picard_util.py | imperial-genomics-facility/data-management-python | 7b867d8d4562a49173d0b823bdc4bf374a3688f0 | [
"Apache-2.0"
] | 7 | 2018-05-08T07:28:08.000Z | 2022-02-21T14:56:49.000Z | igf_data/utils/tools/picard_util.py | imperial-genomics-facility/data-management-python | 7b867d8d4562a49173d0b823bdc4bf374a3688f0 | [
"Apache-2.0"
] | 15 | 2021-08-19T12:32:20.000Z | 2022-02-09T19:52:51.000Z | igf_data/utils/tools/picard_util.py | imperial-genomics-facility/data-management-python | 7b867d8d4562a49173d0b823bdc4bf374a3688f0 | [
"Apache-2.0"
] | 2 | 2017-05-12T15:20:10.000Z | 2020-05-07T16:25:11.000Z | import os,subprocess
from shlex import quote
import pandas as pd
from igf_data.utils.singularity_run_wrapper import execute_singuarity_cmd
from igf_data.utils.fileutils import check_file_path,get_temp_dir
| 43.927885 | 140 | 0.525172 |
771f7ee9bb91bc23000b0e85deecce770eb956d7 | 8,348 | py | Python | app/utils/NetworkingUtils.py | DiegoSilva776/linkehub_insigth_api | 1909a9c1b28901ab6dc0be6815741aed848b4363 | [
"MIT"
] | 2 | 2018-06-25T03:07:28.000Z | 2018-06-26T13:52:23.000Z | app/utils/NetworkingUtils.py | DiegoSilva776/linkehub_insigth_api | 1909a9c1b28901ab6dc0be6815741aed848b4363 | [
"MIT"
] | null | null | null | app/utils/NetworkingUtils.py | DiegoSilva776/linkehub_insigth_api | 1909a9c1b28901ab6dc0be6815741aed848b4363 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
import json
import http.client
import urllib
import time
sys.path.append("../")
from models.ApiInstance import ApiInstance
from utils.ConstantUtils import ConstantUtils
'''
NetworkingUtils is responsible for holding the external URLs and the default parameters
of each URL used by the API.
'''
| 36.295652 | 157 | 0.574868 |
7720601585c87e81f391830224a24710fc679947 | 11,203 | py | Python | utils.py | michaelpatrickpurcell/balanced-nontransitive-dice | d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b | [
"MIT"
] | null | null | null | utils.py | michaelpatrickpurcell/balanced-nontransitive-dice | d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b | [
"MIT"
] | null | null | null | utils.py | michaelpatrickpurcell/balanced-nontransitive-dice | d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b | [
"MIT"
] | null | null | null | import numpy as np
from scipy.special import factorial
from itertools import permutations, product
from pysat.solvers import Minisat22, Minicard
from pysat.pb import PBEnc
from clauses import build_clauses, build_max_min_clauses
from clauses import build_permutation_clauses
from clauses import build_cardinality_lits, build_exclusivity_lits
# ----------------------------------------------------------------------------
# ============================================================================
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
| 29.954545 | 86 | 0.595376 |
7722bc9189fc79c029275036a7e49a54482e4d8c | 38 | py | Python | pkg/agents/team4/trainingAgent/findBestConfigs.py | SOMAS2021/SOMAS2021 | acaa13e3d663d3f59589f3b26860db643b3bf29e | [
"MIT"
] | 13 | 2021-12-02T09:28:47.000Z | 2022-01-14T18:39:51.000Z | pkg/agents/team4/trainingAgent/findBestConfigs.py | SOMAS2021/SOMAS2021 | acaa13e3d663d3f59589f3b26860db643b3bf29e | [
"MIT"
] | 190 | 2021-11-19T15:37:44.000Z | 2022-01-17T00:23:13.000Z | pkg/agents/team4/trainingAgent/findBestConfigs.py | SOMAS2021/SOMAS2021 | acaa13e3d663d3f59589f3b26860db643b3bf29e | [
"MIT"
] | 4 | 2021-11-22T18:21:53.000Z | 2021-12-22T13:55:42.000Z | # TODO: autmatate finding best agents
| 19 | 37 | 0.789474 |
772382a62fd85bce40038234f29c973df9cee412 | 2,653 | py | Python | tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 153 | 2016-12-23T20:59:03.000Z | 2019-07-02T06:47:52.000Z | tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 2,466 | 2016-12-24T01:03:52.000Z | 2019-07-04T13:41:08.000Z | tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2016-12-23T16:28:00.000Z | 2019-07-01T15:55:20.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test update of link labels."""
from uuid import uuid4
from aiida.common import timezone
from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator
def test_legacy_jobcalc_attrs(perform_migrations: PsqlDostoreMigrator):
"""Test update of link labels."""
# starting revision
perform_migrations.migrate_up('django@django_0042')
# setup the database
user_model = perform_migrations.get_current_table('db_dbuser')
node_model = perform_migrations.get_current_table('db_dbnode')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
user = user_model(
email='user@aiida.net',
first_name='John',
last_name='Doe',
institution='EPFL',
)
session.add(user)
session.commit()
node_process = node_model(
uuid=str(uuid4()),
node_type='process.calculation.calcjob.CalcJobNode.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
node_data = node_model(
uuid=str(uuid4()),
node_type='data.core.dict.Dict.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
session.add(node_process)
session.add(node_data)
session.commit()
link = link_model(
input_id=node_data.id,
output_id=node_process.id,
type='input',
label='_return',
)
session.add(link)
session.commit()
link_id = link.id
# final revision
perform_migrations.migrate_up('django@django_0043')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
link = session.get(link_model, link_id)
assert link.label == 'result'
| 35.373333 | 75 | 0.551074 |
7729ca0d13aba7858c6f6bf672c7c5cb27ab55a0 | 7,901 | py | Python | tests/src/SI/MAP/School_Map_regression_testing.py | JalajaTR/cQube | 6bf58ab25f0c36709630987ab730bbd5d9192c03 | [
"MIT"
] | null | null | null | tests/src/SI/MAP/School_Map_regression_testing.py | JalajaTR/cQube | 6bf58ab25f0c36709630987ab730bbd5d9192c03 | [
"MIT"
] | null | null | null | tests/src/SI/MAP/School_Map_regression_testing.py | JalajaTR/cQube | 6bf58ab25f0c36709630987ab730bbd5d9192c03 | [
"MIT"
] | null | null | null | import time
import unittest
from Data.parameters import Data
from SI.MAP.check_infrascore_with_download_functionality import SchoolInfra_scores
from SI.MAP.check_sc_map_clusterwise_records import test_school_map_schoollevel_records
from SI.MAP.click_on_anydistrict_and_download_csv import download_icon
from SI.MAP.click_on_block_cluster_school_and_check_schoolscount import Block_cluster_school_count
from SI.MAP.click_on_blocks import click_on_blocks
from SI.MAP.click_on_blocks_and_scores import block_btn_scores
from SI.MAP.click_on_clusters import cluster_button
from SI.MAP.click_on_clusters_and_scores import cluster_btn_scores
from SI.MAP.click_on_district_and_homeicon import district_home
from SI.MAP.click_on_hyperlink import click_on_hyperlink
from SI.MAP.click_on_infra_score import click_on_infrascores
from SI.MAP.click_on_schools import click_schoolbutton
from SI.MAP.click_on_schools_and_scores import schools_btn_scores
from reuse_func import GetData
| 38.541463 | 98 | 0.678142 |
772a4eead684d14c1321c64fcce204b67581646f | 4,217 | py | Python | src/manual/melt_oxcgrt2.py | lshtm-gis/WHO_PHSM_Cleaning | 5892673922fc555fb86d6e0be548b48c7dc66814 | [
"MIT"
] | null | null | null | src/manual/melt_oxcgrt2.py | lshtm-gis/WHO_PHSM_Cleaning | 5892673922fc555fb86d6e0be548b48c7dc66814 | [
"MIT"
] | 123 | 2020-10-12T11:06:27.000Z | 2021-04-28T15:32:29.000Z | src/manual/melt_oxcgrt2.py | lshtm-gis/WHO_PHSM_Cleaning | 5892673922fc555fb86d6e0be548b48c7dc66814 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:24:46 2020
@author: hamishgibbs
"""
import pandas as pd
import re
import numpy as np
#%%
ox = pd.read_csv('https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv')
#%%
ox = ox[0:100]
#%%
ox.fillna(0.0, inplace = True)
#%%
def oxcgrt_records(ox, drop_columns = []):
'''
Function to convert OXCGRT data to records
This is an additional challenge because of the wide format of the Oxford data
'''
full_value_names, value_names, stub_names = get_names(ox)
id_columns = [x for x in list(set(ox.columns).difference(set(full_value_names))) if x not in drop_columns]
records = ox.to_dict(orient="records")
rs = [x for x in [get_measure_records(r, stub_names, id_columns) for r in records] if x != []]
rs = [item for sublist in rs for item in sublist]
return(rs)
def get_names(ox):
'''
Function to get names of columns holding measure information.
These columns begin with the prefix "A1_" etc.
returns:
full_value_names: the names of all columns with measure information
value_names: the names of measure columns
stub_names: the measure column prefixes (i.e. "A1")
'''
stub_exp = r'[A-Z][0-9]+_'
full_value_names = [match for match in ox.columns if re.findall(stub_exp , match) != []]
value_names = [x for x in full_value_names if 'Flag' not in x]
value_names = [x for x in value_names if 'Notes' not in x]
stub_names = [x.split('_')[0] for x in value_names]
return(full_value_names, value_names, stub_names)
def get_measure_records(combined_record, stub_names, id_columns):
'''Function to break rows into individual records by stub group
i.e. subset a row for only C4 records and other information, repeat for all possible measures.
Also drops records with no data where sum(all values) == 0
'''
records = []
for stub in stub_names:
stub_keys = [x for x in full_value_names if stub in x]
keys = id_columns + stub_keys
try:
flag_key = [x for x in stub_keys if '_Flag' in x][0]
except:
pass
try:
notes_key = [x for x in stub_keys if '_Notes' in x][0]
except:
pass
subset = {key: value for key, value in combined_record.items() if key in keys}
try:
if sum([subset[key] for key in stub_keys]) == 0:
continue
except:
pass
try:
subset['flag'] = subset.pop(flag_key)
except:
subset['flag'] = 0.0
pass
try:
subset['notes'] = subset.pop(notes_key)
except:
pass
measure_key = list(set(list(subset.keys())).difference(set(id_columns + ['measure_name', 'flag', 'notes'])))
subset['measure'] = subset.pop(measure_key[0])
subset['measure_name'] = measure_key[0]
records.append(subset)
return(records)
#%%
drop_columns = ['ConfirmedCases',
'ConfirmedDeaths', 'StringencyIndex', 'StringencyIndexForDisplay',
'StringencyLegacyIndex', 'StringencyLegacyIndexForDisplay',
'GovernmentResponseIndex', 'GovernmentResponseIndexForDisplay',
'ContainmentHealthIndex', 'ContainmentHealthIndexForDisplay',
'EconomicSupportIndex', 'EconomicSupportIndexForDisplay']
#%%
ox_r = oxcgrt_records(ox, drop_columns)
#%%
len(ox_r)
#%%
keep_columns = list(set(ox.columns).difference(set(drop_columns)))
full_value_names, value_names, stub_names = get_names(ox)
id_columns = [x for x in list(set(ox.columns).difference(set(full_value_names))) if x not in drop_columns]
#%%
records = ox.to_dict(orient="records")
#%%
rs = [x for x in [get_measure_records(r, stub_names, id_columns) for r in records] if x != []]
rs = [item for sublist in rs for item in sublist]
rs = pd.DataFrame(rs)
#%%
| 27.562092 | 121 | 0.609912 |
772a5de76c01fda9fdad90cbd5de3085dda181b3 | 2,082 | py | Python | src/wildfires/cache/same_call.py | akuhnregnier/wildfires | 4d31cbdd4a1303ecebc391a35c73b8f07d8fe400 | [
"MIT"
] | 1 | 2021-01-30T15:38:32.000Z | 2021-01-30T15:38:32.000Z | src/wildfires/cache/same_call.py | akuhnregnier/wildfires | 4d31cbdd4a1303ecebc391a35c73b8f07d8fe400 | [
"MIT"
] | null | null | null | src/wildfires/cache/same_call.py | akuhnregnier/wildfires | 4d31cbdd4a1303ecebc391a35c73b8f07d8fe400 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Decorator guaranteeing uniform function calls."""
from inspect import Parameter, signature
def extract_uniform_args_kwargs(f, *args, ignore=None, **kwargs):
"""Extract uniform arguments given a function and the parameters it is called with.
Args:
f (callable): Function being called.
*args, **kwargs: Function arguments.
ignored (None or iterable of str): Arguments to ignore. Their corresponding
values will never be returned.
Returns:
args, kwargs: Standardised representation of the given arguments.
"""
if ignore is None:
ignore = set()
sig = signature(f)
name_kind = {p.name: p.kind for p in sig.parameters.values()}
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
# Possible argument types:
#
# KEYWORD_ONLY
# POSITIONAL_ONLY
# POSITIONAL_OR_KEYWORD
# VAR_KEYWORD
# VAR_POSITIONAL
#
# Accumulate POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, and VAR_POSITIONAL in the
# order given in `arguments`.
new_args = []
pos_kind = (
Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD,
Parameter.VAR_POSITIONAL,
)
for name, value in bound_args.arguments.items():
if name in ignore:
continue
if name_kind[name] not in pos_kind:
break
if name_kind[name] == Parameter.VAR_POSITIONAL:
new_args.extend(value)
else:
new_args.append(value)
# Accumulate KEYWORD_ONLY and VAR_KEYWORD in the
# order given in `arguments`.
new_kwargs = {}
kw_kind = (Parameter.KEYWORD_ONLY, Parameter.VAR_KEYWORD)
for name, value in bound_args.arguments.items():
if name in ignore:
continue
if name_kind[name] in pos_kind:
continue
assert name_kind[name] in kw_kind
if name_kind[name] == Parameter.VAR_KEYWORD:
new_kwargs.update(value)
else:
new_kwargs[name] = value
return new_args, new_kwargs
| 29.742857 | 87 | 0.64121 |
772a5f878a0f88d452d599cf44b77a39b7955775 | 2,862 | py | Python | models/mnist_model.py | dcurry09/Tensorflow-Project-OOP | 7b142046cf6d736790029092dc83c0ce0009586b | [
"Apache-2.0"
] | null | null | null | models/mnist_model.py | dcurry09/Tensorflow-Project-OOP | 7b142046cf6d736790029092dc83c0ce0009586b | [
"Apache-2.0"
] | null | null | null | models/mnist_model.py | dcurry09/Tensorflow-Project-OOP | 7b142046cf6d736790029092dc83c0ce0009586b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Implements a TF Model class by inheriting the Model base class.
@author: David Curry
@version: 1.0
"""
from base.base_model import BaseModel
import tensorflow as tf
| 33.27907 | 144 | 0.621593 |
772b333423680d442d0295b333722d5a3ecb17ce | 2,388 | py | Python | tilemap.py | AI0702/Among-Us-clone | e75a1410c8bc9e82b41f2ab51deec373c8486e29 | [
"Unlicense"
] | null | null | null | tilemap.py | AI0702/Among-Us-clone | e75a1410c8bc9e82b41f2ab51deec373c8486e29 | [
"Unlicense"
] | null | null | null | tilemap.py | AI0702/Among-Us-clone | e75a1410c8bc9e82b41f2ab51deec373c8486e29 | [
"Unlicense"
] | null | null | null | import pygame as pg
from settings import *
import pytmx
| 32.712329 | 74 | 0.562395 |
772bff3df8d91dc18f1f77932eab53991f3d258d | 768 | py | Python | exdir/utils/path.py | knc-neural-calculus/exdir | 5448d41d60c0583892ab7bcf10342d8fb2f2a26b | [
"MIT"
] | 67 | 2017-10-25T11:08:59.000Z | 2022-02-25T18:04:36.000Z | exdir/utils/path.py | knc-neural-calculus/exdir | 5448d41d60c0583892ab7bcf10342d8fb2f2a26b | [
"MIT"
] | 107 | 2017-02-03T16:50:53.000Z | 2022-03-18T04:18:14.000Z | exdir/utils/path.py | knc-neural-calculus/exdir | 5448d41d60c0583892ab7bcf10342d8fb2f2a26b | [
"MIT"
] | 11 | 2018-09-11T11:05:44.000Z | 2022-02-13T10:37:09.000Z | try:
import pathlib
except ImportError as e:
try:
import pathlib2 as pathlib
except ImportError:
raise e
| 24.774194 | 88 | 0.628906 |
772c7aa25a9dad643c71fd03ef2e8fca224182d9 | 15,292 | py | Python | bids2nda/main.py | Shotgunosine/BIDS2NDA | 11d6d39ec1aafbe1e24cf8c3840c71e90aa43ee2 | [
"Apache-2.0"
] | null | null | null | bids2nda/main.py | Shotgunosine/BIDS2NDA | 11d6d39ec1aafbe1e24cf8c3840c71e90aa43ee2 | [
"Apache-2.0"
] | null | null | null | bids2nda/main.py | Shotgunosine/BIDS2NDA | 11d6d39ec1aafbe1e24cf8c3840c71e90aa43ee2 | [
"Apache-2.0"
] | 1 | 2018-08-22T15:51:33.000Z | 2018-08-22T15:51:33.000Z | #!/usr/bin/env python
#
# import modules used here -- sys is a very standard one
from __future__ import print_function
import argparse
import csv
import logging
import zipfile
from collections import OrderedDict
from glob import glob
import os
import sys
import nibabel as nb
import json
import pandas as pd
import numpy as np
# Gather our code in a main() function
from shutil import copy
if __name__ == '__main__':
main()
| 44.069164 | 145 | 0.587889 |
772d6d4f45275295dcb92a649c3abaa349cebcf6 | 431 | py | Python | src/features/threshold.py | HninPwint/nba-career-prediction | ffce32507cad2c4dd020c62cee7f33cf97c886f7 | [
"MIT"
] | 1 | 2021-02-01T10:38:16.000Z | 2021-02-01T10:38:16.000Z | src/features/threshold.py | HninPwint/nba-career-prediction | ffce32507cad2c4dd020c62cee7f33cf97c886f7 | [
"MIT"
] | 3 | 2021-02-02T11:06:16.000Z | 2021-02-06T11:44:19.000Z | src/features/threshold.py | HninPwint/nba-career-prediction | ffce32507cad2c4dd020c62cee7f33cf97c886f7 | [
"MIT"
] | 4 | 2021-01-31T10:57:23.000Z | 2021-02-02T06:16:35.000Z | end
| 26.9375 | 71 | 0.556845 |
7730282673237879a35fb5efc177b9a2f6881b87 | 514 | py | Python | cheers/settings/prod.py | bahattincinic/cheers | 4443b23ad752c233743d71d1e035b757583a05f3 | [
"MIT"
] | 3 | 2019-03-12T03:38:13.000Z | 2021-03-15T16:48:49.000Z | cheers/settings/prod.py | bahattincinic/cheers | 4443b23ad752c233743d71d1e035b757583a05f3 | [
"MIT"
] | null | null | null | cheers/settings/prod.py | bahattincinic/cheers | 4443b23ad752c233743d71d1e035b757583a05f3 | [
"MIT"
] | 2 | 2022-01-05T11:43:42.000Z | 2022-03-16T00:05:19.000Z | from .base import *
import os
import dj_database_url
ALLOWED_HOSTS = ['*']
DEBUG = False
MIDDLEWARE += [
'whitenoise.middleware.WhiteNoiseMiddleware'
]
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
] + INSTALLED_APPS
DATABASES = {
'default': dj_database_url.config()
}
EMAIL_USE_TLS = True
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
| 17.133333 | 59 | 0.743191 |
77319ed1468248ddab354a491c37c6712455692a | 1,175 | py | Python | week2/problem1.py | jgathogo/python_level_1 | 129411fe42aa5ef0e32d9d3d9cf2ad90e182e455 | [
"Apache-2.0"
] | 1 | 2021-06-13T09:06:24.000Z | 2021-06-13T09:06:24.000Z | week2/problem1.py | jgathogo/python_level_1 | 129411fe42aa5ef0e32d9d3d9cf2ad90e182e455 | [
"Apache-2.0"
] | null | null | null | week2/problem1.py | jgathogo/python_level_1 | 129411fe42aa5ef0e32d9d3d9cf2ad90e182e455 | [
"Apache-2.0"
] | null | null | null | import os
import sys
"""
Notes:
- It's great that you've used functions even though we haven't reached that part of the course.
Also, the naming of the function is clear and a good variable name.
- Typically, the docstring for the function starts immediately after the triple quote otherwise we
introduce a newline (\n) in the documentation, which doesn't look good. I've corrected it below.
- The 'return' variable in the docstring is not correct since your program actually returns None (you can test this)
- Trivial point: to preserve the order of modules, name them problem0.py,...,problem9.py; this way they will always appear in order
- Feel free to include additional testing modules if you need to though you don't have to commit them to the repo.
"""
def print_name_age():
"""Ask user name and age and print out the result"""
name = input("Please enter your name: ")
age = input("Please enter your age in years: ")
print(f"Your name is {name} and you are {age} years old")
if __name__ == "__main__":
sys.exit(main())
| 37.903226 | 131 | 0.72 |
7731f6b63900ac030b3e3491a417310c77c7bf81 | 2,313 | py | Python | Graphs/dijkstra_algorithm.py | hemraj4545/Data-Structures-and-Algorithms-in-Python | 633062369ceb3c9c1627f7e826243be7a84d4a7e | [
"MIT"
] | 3 | 2019-10-05T07:11:06.000Z | 2021-08-04T12:15:39.000Z | Graphs/dijkstra_algorithm.py | Satyagovind/Data-Structures-and-Algorithms-in-Python | e13becf63097e86dc073bc2de3b8d5586623743d | [
"MIT"
] | 5 | 2019-10-03T08:51:34.000Z | 2020-11-19T11:49:13.000Z | Graphs/dijkstra_algorithm.py | Satyagovind/Data-Structures-and-Algorithms-in-Python | e13becf63097e86dc073bc2de3b8d5586623743d | [
"MIT"
] | 6 | 2019-09-25T17:59:34.000Z | 2021-07-17T05:58:14.000Z | """
>>> G = Graph(6)
>>> G.insert(0, 1, 3)
>>> G.insert(0, 2, 7)
>>> G.insert(0, 4, 8)
>>> G.insert(0, 5, 1)
>>> G.insert(1, 2, 2)
>>> G.insert(1, 4, 13)
>>> G.insert(2, 3, 15)
>>> G.insert(3, 5, 17)
>>> G.insert(4, 5, 9)
>>> G.dijkstra(0)[0]
[0, 3, 5, 20, 8, 1]
>>> G.shortest_distance(1, 5)
[1, 4, 5]
"""
"""
Lazy implementation of Dijkstra's Algorithm.
In this implementation we Lazily check all the (node, distance) pair
even if a better distance for given node exists(i.e. duplicates exists).
Priority queue which is always sorted in ascending order of distance.
"""
from sys import maxsize
import heapq
from collections import defaultdict as dd
| 30.84 | 108 | 0.531345 |
7732a52cf70bb1c65299ac307a32800ed068e230 | 854 | py | Python | src/7/accessing_variables_defined_inside_a_closure/example2.py | tuanavu/python-gitbook | 948a05e065b0f40afbfd22f697dff16238163cde | [
"MIT"
] | 14 | 2017-05-20T04:06:46.000Z | 2022-01-23T06:48:45.000Z | src/7/accessing_variables_defined_inside_a_closure/example2.py | tuanavu/python-gitbook | 948a05e065b0f40afbfd22f697dff16238163cde | [
"MIT"
] | 1 | 2021-06-10T20:17:55.000Z | 2021-06-10T20:17:55.000Z | src/7/accessing_variables_defined_inside_a_closure/example2.py | tuanavu/python-gitbook | 948a05e065b0f40afbfd22f697dff16238163cde | [
"MIT"
] | 15 | 2017-03-29T17:57:33.000Z | 2021-08-24T02:20:08.000Z | # Example of faking classes with a closure
import sys
# Example use
def Stack():
items = []
return ClosureInstance()
if __name__ == '__main__':
s = Stack()
print(s)
s.push(10)
s.push(20)
s.push('Hello')
print(len(s))
print(s.pop())
print(s.pop())
print(s.pop())
| 20.829268 | 73 | 0.580796 |
7732d1b5ac77c6e2332d3fe38f546a806fa00262 | 434 | py | Python | miniamf/adapters/_array.py | zackw/pyamf | 59ca667e37a20d8464b098f4ebec89de6f319413 | [
"MIT"
] | 14 | 2017-05-04T17:22:30.000Z | 2020-01-23T06:30:19.000Z | miniamf/adapters/_array.py | zackw/pyamf | 59ca667e37a20d8464b098f4ebec89de6f319413 | [
"MIT"
] | 1 | 2020-05-16T06:28:02.000Z | 2020-05-16T06:28:02.000Z | miniamf/adapters/_array.py | zackw/pyamf | 59ca667e37a20d8464b098f4ebec89de6f319413 | [
"MIT"
] | 6 | 2017-09-13T19:30:35.000Z | 2021-07-26T14:41:57.000Z | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
U{array<http://docs.python.org/library/array.html>} adapter module.
Will convert all array.array instances to a python list before encoding. All
type information is lost (but degrades nicely).
@since: 0.5
"""
from __future__ import absolute_import
import array
import miniamf
from miniamf.adapters import util
miniamf.add_type(array.ArrayType, util.to_list)
| 21.7 | 76 | 0.774194 |
77331bed5a7248d07a4fb3851abb1699ae7ce662 | 929 | py | Python | KristaBackup/common/schemes/__init__.py | javister/krista-backup | f8852c20afdf483e842ff22497bdd80eedc30c78 | [
"Apache-2.0"
] | 7 | 2020-07-28T06:53:02.000Z | 2022-03-18T05:23:03.000Z | KristaBackup/common/schemes/__init__.py | javister/krista-backup | f8852c20afdf483e842ff22497bdd80eedc30c78 | [
"Apache-2.0"
] | 1 | 2020-11-25T16:13:26.000Z | 2020-11-25T16:13:26.000Z | KristaBackup/common/schemes/__init__.py | javister/krista-backup | f8852c20afdf483e842ff22497bdd80eedc30c78 | [
"Apache-2.0"
] | 1 | 2020-07-28T13:47:09.000Z | 2020-07-28T13:47:09.000Z | from .scheme_factory import SchemeFactory
from .schemes import schemes
_default_scheme_id = 'default'
def get_scheme(scheme_id=None):
""" scheme_id.
Args:
scheme_id: , .
Returns:
Scheme None, scheme_id .
"""
global _default_scheme_id
if not scheme_id:
scheme_id = _default_scheme_id
scheme = schemes.get(scheme_id, None)
if scheme:
return scheme()
return None
def get_scheme_by_config(scheme_config):
""" .
Returns:
Raises:
scheme_id .
"""
return SchemeFactory.from_dict(scheme_config)
| 19.765957 | 62 | 0.70183 |
77342baf47053521f8e1f5ab72083d2e5edeca75 | 4,425 | py | Python | data/utils.py | dojoteef/synst | a1842682cf757e8a501cd9cee16f20e1a14158f1 | [
"BSD-3-Clause"
] | 81 | 2019-06-03T18:04:22.000Z | 2022-02-04T14:20:49.000Z | data/utils.py | dojoteef/synst | a1842682cf757e8a501cd9cee16f20e1a14158f1 | [
"BSD-3-Clause"
] | 7 | 2019-08-02T06:41:20.000Z | 2020-07-31T18:31:48.000Z | data/utils.py | dojoteef/synst | a1842682cf757e8a501cd9cee16f20e1a14158f1 | [
"BSD-3-Clause"
] | 5 | 2019-06-14T04:00:25.000Z | 2020-09-14T02:50:09.000Z | '''
Utilities useful for datasets
'''
import os
from functools import partial
from urllib.request import urlretrieve
import requests
from tqdm import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler
from data.sampler import SequenceLengthSampler
# See https://github.com/tqdm/tqdm#hooks-and-callbacks
def maybe_download(filepath, url):
''' Download the requested URL to the requested path if it does not already exist '''
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
if os.path.exists(filepath):
return filepath
if 'drive.google.com' in url:
return download_from_google_drive(filepath, url)
else:
return download_url(filepath, url)
def download_url(filepath, url):
''' Downloads the given url to the specified file path. '''
filename = os.path.basename(filepath)
with DownloadProgressBar(filename) as progress:
urlretrieve(url, filepath, reporthook=progress.update_to)
return filepath
def download_from_google_drive(filepath, url):
'''
Downloads a file from Google Drive.
Apparently Google Drive may issue a warning about scanning for viruses and require confirmation
to continue the download.
'''
confirmation_token = None
session = requests.Session()
response = session.get(url, stream=True)
for key, value in response.cookies.items():
if key.startswith("download_warning"):
confirmation_token = value
if confirmation_token:
url = url + "&confirm=" + confirmation_token
response = session.get(url, stream=True)
total_size = int(response.headers.get('content-length', 0))
block_size = 16 * 1024
filename = os.path.basename(filepath)
with open(filepath, "wb") as file:
with DownloadProgressBar(filename) as progress:
blocks = iter(
file.write(block)
for block in response.iter_content(block_size)
if block
)
for i, block in enumerate(blocks):
progress.update_to(i, block_size, total_size)
return filepath
def get_dataloader(config, worker_init_fn=None, pin_memory=True, num_devices=1, shuffle=False):
''' Utility function that gets a data loader '''
dataset = config.dataset(config, split=config.split).load()
if config.batch_method == 'token':
# Calculate batch sizes for each device. Potentially reduce the batch size on device 0 as
# the optimization step (all the gradients from all devices) happens on device 0.
batch_sizes = [config.batch_size - config.batch_size_buffer]
batch_sizes += [config.batch_size] * (num_devices - 1)
batch_sampler = SequenceLengthSampler(
batch_sizes,
[(len(d['input']), len(d['target'])) for d in dataset.data],
shuffle=shuffle,
granularity=config.token_bucket_granularity
)
elif config.batch_method == 'example':
sampler_fn = RandomSampler if shuffle else SequentialSampler
batch_sampler = BatchSampler(
sampler_fn(dataset),
config.batch_size,
False
)
else:
raise ValueError('Unknown batch method!')
return DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=partial(dataset.collate, sort=True),
num_workers=num_devices,
pin_memory=pin_memory,
worker_init_fn=worker_init_fn
)
| 33.522727 | 99 | 0.661695 |
7734720921a60ab16b14a023eaab75451a582742 | 3,092 | py | Python | check_changelog.py | pllim/action-check_astropy_changelog | 915511a895712098ca250cb3416e2c08ffb1a0fa | [
"BSD-3-Clause"
] | null | null | null | check_changelog.py | pllim/action-check_astropy_changelog | 915511a895712098ca250cb3416e2c08ffb1a0fa | [
"BSD-3-Clause"
] | null | null | null | check_changelog.py | pllim/action-check_astropy_changelog | 915511a895712098ca250cb3416e2c08ffb1a0fa | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import sys
from astropy_changelog import loads
from github import Github
event_name = os.environ['GITHUB_EVENT_NAME']
if event_name not in ('pull_request_target', 'pull_request'):
print(f'No-op for {event_name}')
sys.exit(0)
event_jsonfile = os.environ['GITHUB_EVENT_PATH']
with open(event_jsonfile, encoding='utf-8') as fin:
event = json.load(fin)
pr_labels = [e['name'] for e in event['pull_request']['labels']]
if 'skip-changelog-checks' in pr_labels:
print('Changelog checks manually disabled for this pull request.')
sys.exit(0) # Green but no-op
forkrepo = event['pull_request']['head']['repo']['full_name']
pr_branch = os.environ['GITHUB_HEAD_REF']
g = Github(os.environ.get('GITHUB_TOKEN'))
clog_file = os.environ.get('CHANGELOG_FILENAME', 'CHANGES.rst')
repo = g.get_repo(forkrepo)
try:
contents = repo.get_contents(clog_file, ref=pr_branch)
except Exception:
print('This repository does not appear to have a change log! '
f'(Expecting a file named {clog_file})')
sys.exit(1)
# Parse changelog
changelog = loads(contents.decoded_content.decode('utf-8'))
# Find versions for the pull request we are looking at
pr_num = event['number']
versions = changelog.versions_for_issue(pr_num)
if len(versions) > 1:
print('Change log entry present in multiple version sections '
f'({", ".join(versions)}).')
sys.exit(1)
if len(versions) == 1:
version = versions[0]
if 'no-changelog-entry-needed' in pr_labels:
print(f'Changelog entry present in {version} but '
'**no-changelog-entry-needed** label set.')
sys.exit(1)
if 'Affects-dev' in pr_labels:
print(f'Changelog entry present in {version} but '
'**Affects-dev** label set.')
sys.exit(1)
base_repo = event['pull_request']['base']['repo']['full_name']
repo = g.get_repo(base_repo)
pr = repo.get_pull(pr_num)
if not pr.milestone:
print(f'Cannot check for consistency of change log in {version} since '
'milestone is not set.')
sys.exit(1)
milestone = pr.milestone.title
if milestone.startswith('v'):
milestone = milestone[1:]
if version.startswith('v'):
version = version[1:]
if milestone != version:
print(f'Changelog entry section ({version}) '
f'inconsistent with milestone ({milestone}).')
sys.exit(1)
print(f'Changelog entry consistent with milestone ({milestone}).')
else: # No change log found
if 'Affects-dev' in pr_labels:
print('Changelog entry not present, as expected since the '
'**Affects-dev** label is present.')
elif 'no-changelog-entry-needed' in pr_labels:
print('Changelog entry not present, as expected since the '
'**no-changelog-entry-needed** label is present')
else:
print('Changelog entry not present, (or PR number missing) and '
'neither the **Affects-dev** nor the '
'**no-changelog-entry-needed** label is set.')
sys.exit(1)
| 31.876289 | 79 | 0.654916 |
7734b23f84997ddc3801f990923aea0601af3e94 | 4,037 | py | Python | examples/python/example-05-async.py | lukasm91/serialbox2 | 3a8dba366ef160df684c957e59c0a5f6b1b17244 | [
"BSD-2-Clause"
] | 1 | 2020-09-04T00:43:52.000Z | 2020-09-04T00:43:52.000Z | examples/python/example-05-async.py | mlange05/serialbox2 | fa72d8a39f62c7c0b76536680f7a9953957f59f2 | [
"BSD-2-Clause"
] | null | null | null | examples/python/example-05-async.py | mlange05/serialbox2 | fa72d8a39f62c7c0b76536680f7a9953957f59f2 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## This example demonstrates the asynchronous API of Serialbox which can improve the throughput of
## read operations.
##
##===------------------------------------------------------------------------------------------===##
#
# First, we have to make sure Python finds the Serialbox module. Alternatively, you can also set the
# environment variable PYTHONPATH.
#
import os
import sys
import time
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../python')
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../src/serialbox-python')
#
# Import Serialbox
#
import serialbox as ser
import numpy as np
if __name__ == '__main__':
main()
| 36.7 | 102 | 0.626951 |
7734ec4ada3d6545396115d166790d365032b3f9 | 6,793 | py | Python | johnny_cache/cache.py | Sonictherocketman/cache-proxy | 75650fb143b365e922c03f87e388c5710ad21799 | [
"MIT"
] | 3 | 2019-07-23T02:33:04.000Z | 2021-05-25T16:57:24.000Z | johnny_cache/cache.py | Sonictherocketman/cache-proxy | 75650fb143b365e922c03f87e388c5710ad21799 | [
"MIT"
] | null | null | null | johnny_cache/cache.py | Sonictherocketman/cache-proxy | 75650fb143b365e922c03f87e388c5710ad21799 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import datetime, timedelta
import json
import os.path
from dateutil.parser import parse
import pytz
import redis
from redis.lock import LockError
import requests
from . import settings
from .logger import logger
UNCACHED_HEADERS = (
'Age',
'Cache-Control',
'Date',
'X-Cache',
)
# Global Cache
request_cache = get_cache()
# Cache Functions
| 26.02682 | 83 | 0.568968 |
77356d7dc5fcffe3a5f270ff80863770415d901d | 25,609 | py | Python | discretizer.py | WeiXuanChan/PIMRMeshfree | 1011dc86e7363a53a13353db8e61dca31cc07350 | [
"MIT"
] | null | null | null | discretizer.py | WeiXuanChan/PIMRMeshfree | 1011dc86e7363a53a13353db8e61dca31cc07350 | [
"MIT"
] | null | null | null | discretizer.py | WeiXuanChan/PIMRMeshfree | 1011dc86e7363a53a13353db8e61dca31cc07350 | [
"MIT"
] | 1 | 2017-05-17T09:16:24.000Z | 2017-05-17T09:16:24.000Z | '''
File: discretizer.py
Description: function definition
History:
Date Programmer SAR# - Description
---------- ---------- ----------------------------
Author: w. x. chan 29Apr2016 - Created
'''
import numpy as np
from . import pinm as pinm
from stl import mesh
from mpl_toolkits import mplot3d
from matplotlib import pyplot
from matplotlib import colors as Colors
from matplotlib.widgets import Button
import matplotlib.cm as cmx
| 43.626917 | 189 | 0.619821 |
7735b7ce4419d727877113722c02541feac1a135 | 881 | py | Python | app/utils/urls.py | withshubh/memegen | 9667e0c6737334ca8ceb4347792e3df39ae52b3a | [
"MIT"
] | null | null | null | app/utils/urls.py | withshubh/memegen | 9667e0c6737334ca8ceb4347792e3df39ae52b3a | [
"MIT"
] | 1 | 2017-01-12T23:17:27.000Z | 2017-01-12T23:17:27.000Z | app/utils/urls.py | withshubh/memegen | 9667e0c6737334ca8ceb4347792e3df39ae52b3a | [
"MIT"
] | 1 | 2016-10-31T23:19:15.000Z | 2016-10-31T23:19:15.000Z | from urllib.parse import parse_qs, urlencode, urlparse
from .. import settings
| 23.810811 | 62 | 0.61975 |
7736dad67e1bf0f9644b352cfa50dc3d03404717 | 211 | py | Python | src/westpa/core/reweight/__init__.py | burntyellow/adelman_ci | cca251a51b34843faed0275cce01d7a307829993 | [
"MIT"
] | 140 | 2015-01-07T23:30:36.000Z | 2022-03-28T17:15:30.000Z | lib/west_tools/westpa/reweight/__init__.py | burntyellow/westpa | 9dc62478fcef0001b9c038cd56a40b6be1b9d64a | [
"MIT"
] | 157 | 2015-01-03T03:38:36.000Z | 2022-03-31T14:12:16.000Z | lib/west_tools/westpa/reweight/__init__.py | burntyellow/westpa | 9dc62478fcef0001b9c038cd56a40b6be1b9d64a | [
"MIT"
] | 56 | 2015-01-02T21:21:40.000Z | 2022-03-03T16:27:54.000Z |
'''
Function(s) for the postanalysis toolkit
'''
import logging
log = logging.getLogger(__name__)
from . import _reweight
from ._reweight import (stats_process, reweight_for_c)
from .matrix import FluxMatrix
| 17.583333 | 54 | 0.781991 |