hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f725e0913b22178375a220d288839fa6706545f3 | 520 | py | Python | backend/utils/management/commands/generate_dummy_skills.py | NumanIbnMazid/numanibnmazid.com | 905e3afab285316d88bafa30dc080dfbb0611731 | [
"MIT"
] | 1 | 2022-01-28T18:20:19.000Z | 2022-01-28T18:20:19.000Z | backend/utils/management/commands/generate_dummy_skills.py | NumanIbnMazid/numanibnmazid.com | 905e3afab285316d88bafa30dc080dfbb0611731 | [
"MIT"
] | null | null | null | backend/utils/management/commands/generate_dummy_skills.py | NumanIbnMazid/numanibnmazid.com | 905e3afab285316d88bafa30dc080dfbb0611731 | [
"MIT"
] | null | null | null | from portfolios.factories.skill_factory import create_skills_with_factory
from django.db import transaction
from django.core.management.base import BaseCommand
| 26 | 73 | 0.698077 |
f726670921d44f21aa09f17d795a742ee0c1fa0c | 8,397 | py | Python | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
| 37.995475 | 400 | 0.609265 |
f728ea2eb644fbcc81d1cbb2f7e623c7f87f0380 | 834 | py | Python | src/bokeh_app/graph_view.py | avbatchelor/insight-articles-project | 852b338b786cb5b9c281fcec2e378aed8d3dc617 | [
"MIT"
] | null | null | null | src/bokeh_app/graph_view.py | avbatchelor/insight-articles-project | 852b338b786cb5b9c281fcec2e378aed8d3dc617 | [
"MIT"
] | null | null | null | src/bokeh_app/graph_view.py | avbatchelor/insight-articles-project | 852b338b786cb5b9c281fcec2e378aed8d3dc617 | [
"MIT"
] | null | null | null | import networkx as nx
import pickle
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.models.graphs import from_networkx
processed_data_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\processed\\'
filename = processed_data_folder + 'graph_and_labels'
with open (filename, 'rb') as fp:
graph_mat, topic_labels = pickle.load(fp)
G = nx.from_numpy_matrix(graph_mat)
pos=nx.spring_layout(G)
nx.relabel_nodes(G,topic_labels)
nx.draw(G,pos)
nx.draw_networkx_labels(G,pos,topic_labels,font_size=16)
plot = figure(title="Blog Curator Demo", x_range=(-2.1,2.1), y_range=(-2.1,2.1),
tools="", toolbar_location=None)
graph = from_networkx(G, nx.spring_layout, scale=2, center=(0,0))
plot.renderers.append(graph)
output_file("networkx_graph.html")
show(plot) | 29.785714 | 105 | 0.758993 |
f72b30581d8ef30df8d3b88fde755c65a6390087 | 15,737 | py | Python | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
#
conf = Config()
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
"""
LCQMCword_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
# return test_set, test_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
"""
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
"""
LCQMCqueryword_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
# test_set = test_set[:100]
# return test_set, test_set, test_set
if __name__ == '__main__':
# prefix, query_prediction, title, tag, label
# query_prediction json
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
# data_train = get_data(file_train)
# data_train = get_data(file_vali)
# print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
| 37.20331 | 129 | 0.599797 |
f72d2d7694c02f9baefa28ab714fa7d648759fe9 | 8,778 | py | Python | groupbunk.py | shine-jayakumar/groupbunk-fb | ddf3d66cd902343e419dd2cf0c86f42850315f08 | [
"MIT"
] | 1 | 2022-02-11T05:31:48.000Z | 2022-02-11T05:31:48.000Z | groupbunk.py | shine-jayakumar/groupbunk-fb | ddf3d66cd902343e419dd2cf0c86f42850315f08 | [
"MIT"
] | null | null | null | groupbunk.py | shine-jayakumar/groupbunk-fb | ddf3d66cd902343e419dd2cf0c86f42850315f08 | [
"MIT"
] | null | null | null | """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
LICENSE: MIT
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import StaleElementReferenceException
from webdriver_manager.chrome import ChromeDriverManager
import argparse
import logging
import sys
from datetime import datetime
import time
from groupfuncs import *
import os
# suppress webdriver manager logs
os.environ['WDM_LOG_LEVEL'] = '0'
IGNORE_DIV = ['your feed', 'discover', 'your notifications']
FB_GROUP_URL = 'https://www.facebook.com/groups/feed/'
def display_intro():
'''
Displays intro of the script
'''
intro = """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
"""
print(intro)
def time_taken(start_time, logger):
'''
Calculates the time difference from now and start time
'''
end_time = time.time()
logger.info(f"Total time taken: {round(end_time - start_time, 4)} seconds")
def cleanup_and_quit(driver):
'''
Quits driver and exits the script
'''
if driver:
driver.quit()
sys.exit()
start_time = time.time()
# ====================================================
# Argument parsing
# ====================================================
description = "Leave your Facebook groups quietly"
usage = "groupbunk.py username password [-h] [-eg FILE] [-et TIMEOUT] [-sw WAIT] [-gr RETRYCOUNT] [-dg FILE]"
examples="""
Examples:
groupbunk.py bob101@email.com bobspassword101
groupbunk.py bob101@email.com bobspassword101 -eg keepgroups.txt
groupbunk.py bob101@email.com bobspassword101 -et 60 --scrollwait 10 -gr 7
groupbunk.py bob101@email.com bobspassword101 --dumpgroups mygroup.txt --groupretry 5
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
usage=usage,
epilog=examples,
prog='groupbunk')
# required arguments
parser.add_argument('username', type=str, help='Facebook username')
parser.add_argument('password', type=str, help='Facebook password')
# optional arguments
parser.add_argument('-eg', '--exgroups', type=str, metavar='', help='file with group names to exclude (one group per line)')
parser.add_argument('-et', '--eltimeout', type=int, metavar='', help='max timeout for elements to be loaded', default=30)
parser.add_argument('-sw', '--scrollwait', type=int, metavar='', help='time to wait after each scroll', default=4)
parser.add_argument('-gr', '--groupretry', type=int, metavar='', help='retry count while recapturing group names', default=5)
parser.add_argument('-dg', '--dumpgroups', type=str, metavar='', help='do not leave groups; only dump group names to a file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v.1.2')
args = parser.parse_args()
# ====================================================
# Setting up logger
# =====================================================
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(lineno)d:%(levelname)s:%(message)s")
file_handler = logging.FileHandler(f'groupbunk_{datetime.now().strftime("%d_%m_%Y__%H_%M_%S")}.log', 'w', 'utf-8')
file_handler.setFormatter(formatter)
stdout_formatter = logging.Formatter("[*] => %(message)s")
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
#=======================================================
try:
display_intro()
logger.info("script started")
# loading group names to be excluded
if args.exgroups:
logger.info("Loading group names to be excluded")
excluded_group_names = get_excluded_group_names(args.exgroups)
IGNORE_DIV.extend(excluded_group_names)
options = Options()
# supresses notifications
options.add_argument("--disable-notifications")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--log-level=3")
logger.info("Downloading latest chrome webdriver")
# UNCOMMENT TO SPECIFY DRIVER LOCATION
# driver = webdriver.Chrome("D:/chromedriver/98/chromedriver.exe", options=options)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
if not driver:
raise Exception('Unable to download chrome webdriver for your version of Chrome browser')
logger.info("Successfully downloaded chrome webdriver")
wait = WebDriverWait(driver, args.eltimeout)
logger.info(f"Opening FB GROUPS URL: {FB_GROUP_URL}")
driver.get(FB_GROUP_URL)
logger.info("Sending username")
wait.until(EC.visibility_of_element_located((By.ID, 'email'))).send_keys(args.username)
logger.info("Sending password")
driver.find_element(By.ID, 'pass').send_keys(args.password)
logger.info("Clicking on Log In")
wait.until(EC.presence_of_element_located((By.ID, 'loginbutton'))).click()
# get all the links inside divs representing group names
group_links = get_group_link_elements(driver, wait)
if not group_links:
raise Exception("Unable to find links")
no_of_currently_loaded_links = 0
logger.info(f"Initial link count: {len(group_links)-3}")
logger.info("Scrolling down to capture all the links")
# scroll until no new group links are loaded
while len(group_links) > no_of_currently_loaded_links:
no_of_currently_loaded_links = len(group_links)
logger.info(f"Updated link count: {no_of_currently_loaded_links-3}")
scroll_into_view(driver, group_links[no_of_currently_loaded_links-1])
time.sleep(args.scrollwait)
# re-capturing
group_links = get_group_link_elements(driver, wait)
logger.info(f"Total number of links found: {len(group_links)-3}")
# only show the group names and exit
if args.dumpgroups:
logger.info('Only dumping group names to file. Not leaving groups')
logger.info(f"Dumping group names to: {args.dumpgroups}")
dump_groups(group_links, args.dumpgroups)
time_taken(start_time, logger)
cleanup_and_quit(driver)
# first 3 links are for Your feed, 'Discover, Your notifications
i = 0
save_state = 0
no_of_retries = 0
failed_groups = []
total_groups = len(group_links)
while i < total_groups:
try:
# need only the group name and not Last Active
group_name = group_links[i].text.split('\n')[0]
# if group name not in ignore list
if group_name.lower() not in IGNORE_DIV:
logger.info(f"Leaving group: {group_name}")
link = group_links[i].get_attribute('href')
logger.info(f"Opening group link: {link}")
switch_tab(driver, open_new_tab(driver))
driver.get(link)
if not leave_group(wait):
logger.info('Unable to leave the group. You might not be a member of this group.')
driver.close()
switch_tab(driver, driver.window_handles[0])
else:
if group_name.lower() not in ['your feed', 'discover', 'your notifications']:
logger.info(f"Skipping group : {group_name}")
i += 1
except StaleElementReferenceException:
logger.error('Captured group elements gone stale. Recapturing...')
if no_of_retries > args.groupretry:
logger.error('Reached max number of retry attempts')
break
save_state = i
group_links = get_group_link_elements(driver, wait)
no_of_retries += 1
except Exception as ex:
logger.error(f"Unable to leave group {group_name}. Error: {ex}")
failed_groups.append(group_name)
i += 1
total_no_of_groups = len(group_links)-3
total_no_failed_groups = len(failed_groups)
logger.info(f"Total groups: {total_no_of_groups}")
logger.info(f"No. of groups failed to leave: {total_no_failed_groups}")
logger.info(f"Success percentage: {((total_no_of_groups - total_no_failed_groups)/total_no_of_groups) * 100} %")
if failed_groups:
failed_group_names = ", ".join(failed_groups)
logger.info(f"Failed groups: \n{failed_group_names}")
except Exception as ex:
logger.error(f"Script ended with exception: {ex}")
finally:
time_taken(start_time, logger)
cleanup_and_quit(driver) | 35.97541 | 127 | 0.670084 |
f72d8677c20fa3e3a54169d4eb48cb7ca7458055 | 11,575 | py | Python | OneSpanAnalysis_Mdl.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | null | null | null | OneSpanAnalysis_Mdl.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | 44 | 2021-02-10T23:58:28.000Z | 2021-12-14T02:38:21.000Z | OneSpanAnalysis_Mdl.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | null | null | null | import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
| 35.506135 | 149 | 0.723629 |
f72d949d658d47131c4a502292aadd093d90b245 | 212 | py | Python | test-examples/million_points.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/million_points.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/million_points.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | """Test converting an image to a pyramid.
"""
import numpy as np
import napari
points = np.random.randint(100, size=(50_000, 2))
with napari.gui_qt():
viewer = napari.view_points(points, face_color='red')
| 19.272727 | 57 | 0.712264 |
f72ddd7241194452b55a3968e1f8f4807cdc48eb | 1,166 | py | Python | pact/test/test_constants.py | dwang7/pact-python | da03551e812508652e062fc4ba6071f1119e5bf2 | [
"MIT"
] | null | null | null | pact/test/test_constants.py | dwang7/pact-python | da03551e812508652e062fc4ba6071f1119e5bf2 | [
"MIT"
] | null | null | null | pact/test/test_constants.py | dwang7/pact-python | da03551e812508652e062fc4ba6071f1119e5bf2 | [
"MIT"
] | null | null | null | from unittest import TestCase
from mock import patch
from .. import constants
| 30.684211 | 79 | 0.679245 |
f72eb585890bafe8941f0c78a9d950477be13230 | 2,555 | py | Python | backtrader/backtrader/indicators/__init__.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/indicators/__init__.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/indicators/__init__.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader import Indicator
from backtrader.functions import *
# The modules below should/must define __all__ with the Indicator objects
# of prepend an "_" (underscore) to private classes/variables
from .basicops import *
# base for moving averages
from .mabase import *
# moving averages (so envelope and oscillators can be auto-generated)
from .sma import *
from .ema import *
from .smma import *
from .wma import *
from .dema import *
from .kama import *
from .zlema import *
from .hma import *
from .zlind import *
from .dma import *
# depends on moving averages
from .deviation import *
# depend on basicops, moving averages and deviations
from .atr import *
from .aroon import *
from .bollinger import *
from .cci import *
from .crossover import *
from .dpo import *
from .directionalmove import *
from .envelope import *
from .heikinashi import *
from .lrsi import *
from .macd import *
from .momentum import *
from .oscillator import *
from .percentchange import *
from .percentrank import *
from .pivotpoint import *
from .prettygoodoscillator import *
from .priceoscillator import *
from .psar import *
from .rsi import *
from .stochastic import *
from .trix import *
from .tsi import *
from .ultimateoscillator import *
from .williams import *
from .rmi import *
from .awesomeoscillator import *
from .accdecoscillator import *
from .dv2 import * # depends on percentrank
# Depends on Momentum
from .kst import *
from .ichimoku import *
from .hurst import *
from .ols import *
from .hadelta import *
| 28.076923 | 79 | 0.699413 |
f72f3f991d29cfcde8c404665347a2b2067bd01a | 3,145 | py | Python | tests/test_game_map.py | brittleshinpass/mossbread | 6a225e5d11fdf1957d1bfe74c5a76d105561e12e | [
"MIT"
] | 1 | 2020-05-30T19:45:58.000Z | 2020-05-30T19:45:58.000Z | tests/test_game_map.py | brittleshinpass/mossbread | 6a225e5d11fdf1957d1bfe74c5a76d105561e12e | [
"MIT"
] | null | null | null | tests/test_game_map.py | brittleshinpass/mossbread | 6a225e5d11fdf1957d1bfe74c5a76d105561e12e | [
"MIT"
] | null | null | null | import pytest
from array import array
from game_map import GameMap
from tests.conftest import get_relative_path
sample_map_data = tuple(
reversed(
(
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
)
)
)
| 46.25 | 88 | 0.475994 |
f7300289bf48754135726dad8a8c684a9ab7d495 | 14,855 | py | Python | queryable_properties/managers.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 36 | 2019-10-22T11:44:37.000Z | 2022-03-15T21:27:03.000Z | queryable_properties/managers.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 6 | 2020-10-03T15:13:26.000Z | 2021-09-25T14:05:50.000Z | queryable_properties/managers.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 3 | 2021-04-26T08:30:46.000Z | 2021-08-18T09:04:49.000Z | # encoding: utf-8
from __future__ import unicode_literals
import six
from django.db.models import Manager
from django.db.models.query import QuerySet
from .compat import (ANNOTATION_SELECT_CACHE_NAME, ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP, chain_query, chain_queryset,
ModelIterable, ValuesQuerySet)
from .exceptions import QueryablePropertyDoesNotExist, QueryablePropertyError
from .query import QueryablePropertiesQueryMixin
from .utils import get_queryable_property
from .utils.internal import InjectableMixin, QueryPath, QueryablePropertyReference
def _resolve_update_kwargs(self, **kwargs):
"""
Look for the names of queryable properties in the given keyword
arguments for an update query and correctly resolve them into their
actual keyword arguments.
:param kwargs: Keyword arguments of an update query.
:return: A dictionary containing the resolved arguments.
:rtype: dict
"""
original_names = set(kwargs)
for original_name in original_names:
try:
prop = get_queryable_property(self.model, original_name)
except QueryablePropertyDoesNotExist:
continue
if not prop.get_update_kwargs:
raise QueryablePropertyError('Queryable property "{}" does not implement queryset updating.'
.format(prop))
# Call the method recursively since queryable properties can build
# upon each other.
additional_kwargs = self._resolve_update_kwargs(
**prop.get_update_kwargs(self.model, kwargs.pop(original_name)))
# Make sure that there are no conflicting values after resolving
# the update keyword arguments of the queryable properties.
for additional_name, value in six.iteritems(additional_kwargs):
if additional_name in kwargs and kwargs[additional_name] != value:
raise QueryablePropertyError(
'Updating queryable property "{prop}" would change field "{field}", but a conflicting value '
'was set for this field by another queryable property or explicitly in the update arguments.'
.format(prop=prop, field=additional_name)
)
kwargs[additional_name] = value
return kwargs
def select_properties(self, *names):
"""
Add the annotations of the queryable properties with the specified
names to this query. The annotation values will be cached in the
properties of resulting model instances, regardless of the regular
caching behavior of the queried properties.
:param names: Names of queryable properties.
:return: A copy of this queryset with the added annotations.
:rtype: QuerySet
"""
queryset = chain_queryset(self)
for name in names:
property_ref = QueryablePropertyReference(get_queryable_property(self.model, name), self.model, QueryPath())
# A full GROUP BY is required if the query is not limited to
# certain fields. Since only certain types of queries had the
# _fields attribute in old Django versions, fall back to checking
# for existing selection, on which the GROUP BY would be based.
full_group_by = not getattr(self, '_fields', self.query.select)
with queryset.query._add_queryable_property_annotation(property_ref, full_group_by, select=True):
pass
return queryset
def iterator(self, *args, **kwargs):
# Recent Django versions use the associated iterable class for the
# iterator() implementation, where the QueryablePropertiesModelIterable
# will be already mixed in. In older Django versions, use a standalone
# QueryablePropertiesModelIterable instead to perform the queryable
# properties processing.
iterable = super(QueryablePropertiesQuerySetMixin, self).iterator(*args, **kwargs)
if '_iterable_class' not in self.__dict__: # pragma: no cover
return iter(QueryablePropertiesIterable(self, iterable=iterable))
return iterable
def update(self, **kwargs):
# Resolve any queryable properties into their actual update kwargs
# before calling the base update method.
kwargs = self._resolve_update_kwargs(**kwargs)
return super(QueryablePropertiesQuerySetMixin, self).update(**kwargs)
class QueryablePropertiesQuerySet(QueryablePropertiesQuerySetMixin, QuerySet):
"""
A special queryset class that allows to use queryable properties in its
filter conditions, annotations and update queries.
"""
pass
if hasattr(Manager, 'from_queryset'):
QueryablePropertiesManager = Manager.from_queryset(QueryablePropertiesQuerySet)
else: # pragma: no cover
| 51.401384 | 120 | 0.671289 |
f7309823f58463b82e823f3fd4ecc77467f835fd | 11,759 | py | Python | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 183 | 2015-01-11T13:01:01.000Z | 2022-02-08T04:45:33.000Z | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 13 | 2015-05-12T17:39:42.000Z | 2018-07-29T18:01:38.000Z | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 166 | 2015-01-28T18:05:55.000Z | 2022-02-08T04:45:34.000Z | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
| 46.478261 | 133 | 0.541628 |
f730db018b5a100d3b9690cd2c3518425836dcfb | 2,353 | py | Python | setup.py | wdv4758h/rsglob | 342f950c240b5d84c629ecf4fec348401975d2ba | [
"BSD-2-Clause"
] | null | null | null | setup.py | wdv4758h/rsglob | 342f950c240b5d84c629ecf4fec348401975d2ba | [
"BSD-2-Clause"
] | null | null | null | setup.py | wdv4758h/rsglob | 342f950c240b5d84c629ecf4fec348401975d2ba | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
from setuptools import find_packages, setup, Extension
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
version = __import__('rsglob').VERSION
setup_requires = ['setuptools-rust>=0.6.0']
install_requires = get_requirements('requirements.txt')
test_requires = get_requirements('requirements-test.txt')
rust_extensions = [RustExtension('rsglob._rsglob', 'Cargo.toml')]
setup(
name='rsglob',
version=version,
url='https://github.com/wdv4758h/rsglob',
author='Chiu-Hsiang Hsu',
author_email='wdv4758h@gmail.com',
description=('Python glob in Rust'),
long_description=open("README.rst").read(),
download_url="https://github.com/wdv4758h/rsglob/archive/v{}.zip".format(
version
),
license='BSD',
tests_require=test_requires,
install_requires=install_requires,
packages=find_packages(),
rust_extensions=rust_extensions,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 30.960526 | 83 | 0.677008 |
f73176b9df2d9d3e6551836091e9a8f8bdc64a68 | 9,041 | py | Python | src/pyrobot/habitat/base.py | cihuang123/pyrobot | fe620097e31d11453b5ea7ac15e40f5f5721b29a | [
"MIT"
] | 2,150 | 2019-06-12T20:55:41.000Z | 2022-03-21T07:14:51.000Z | src/pyrobot/habitat/base.py | cihuang123/pyrobot | fe620097e31d11453b5ea7ac15e40f5f5721b29a | [
"MIT"
] | 124 | 2019-06-22T17:12:27.000Z | 2022-02-26T11:43:13.000Z | src/pyrobot/habitat/base.py | cihuang123/pyrobot | fe620097e31d11453b5ea7ac15e40f5f5721b29a | [
"MIT"
] | 329 | 2019-06-13T03:03:54.000Z | 2022-03-30T07:04:55.000Z | import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
| 36.603239 | 93 | 0.623825 |
f731ffc418c409ea5c8ec121e5505721921146e2 | 164 | py | Python | natwork/chats/admin.py | Potisin/Natwork | a42b89f18fdd8f8ac69e56cb7184696d6883a9f7 | [
"BSD-3-Clause"
] | null | null | null | natwork/chats/admin.py | Potisin/Natwork | a42b89f18fdd8f8ac69e56cb7184696d6883a9f7 | [
"BSD-3-Clause"
] | null | null | null | natwork/chats/admin.py | Potisin/Natwork | a42b89f18fdd8f8ac69e56cb7184696d6883a9f7 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from .models import Chat
admin.site.register(Chat, ChatAdmin)
| 12.615385 | 36 | 0.737805 |
f732fdf1128b31b7b49d386c93aa86199f8cc84f | 109 | py | Python | examples/etcc.py | t-pimpisa/pythainlp17 | cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f | [
"Apache-2.0"
] | null | null | null | examples/etcc.py | t-pimpisa/pythainlp17 | cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f | [
"Apache-2.0"
] | null | null | null | examples/etcc.py | t-pimpisa/pythainlp17 | cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from pythainlp.tokenize import etcc
print(etcc.etcc("")) # //
| 18.166667 | 46 | 0.642202 |
f733fdfd6251a8d71a648d7c85c3dac02509dfc4 | 75 | py | Python | cumulogenesis.py | stelligent/cumulogenesis | f5a3587aebd2592642c98cb4ad93d52a927dceeb | [
"MIT"
] | 1 | 2021-03-22T21:50:10.000Z | 2021-03-22T21:50:10.000Z | cumulogenesis.py | stelligent/cumulogenesis | f5a3587aebd2592642c98cb4ad93d52a927dceeb | [
"MIT"
] | 1 | 2021-03-25T22:23:04.000Z | 2021-03-25T22:23:04.000Z | cumulogenesis.py | stelligent/cumulogenesis | f5a3587aebd2592642c98cb4ad93d52a927dceeb | [
"MIT"
] | 1 | 2019-04-03T19:09:34.000Z | 2019-04-03T19:09:34.000Z | #!/usr/bin/env python
from cumulogenesis.interfaces import cli
cli.run()
| 12.5 | 40 | 0.76 |
f7356fdd90f419efa0300e27fdfd55d90e10cc07 | 2,897 | py | Python | nanpy/bmp180.py | AFTC-1/Arduino-rpi | c46079f937d7e07cc0a930cc7ae278036f50a47d | [
"MIT"
] | 178 | 2015-01-03T11:56:49.000Z | 2021-12-23T14:47:55.000Z | nanpy/bmp180.py | AFTC-1/Arduino-rpi | c46079f937d7e07cc0a930cc7ae278036f50a47d | [
"MIT"
] | 88 | 2015-01-23T09:06:43.000Z | 2021-12-26T19:58:51.000Z | nanpy/bmp180.py | AFTC-1/Arduino-rpi | c46079f937d7e07cc0a930cc7ae278036f50a47d | [
"MIT"
] | 77 | 2015-02-18T17:26:11.000Z | 2021-09-28T02:47:25.000Z | from __future__ import division
import logging
from nanpy.i2c import I2C_Master
from nanpy.memo import memoized
import time
log = logging.getLogger(__name__)
| 24.550847 | 67 | 0.491543 |
f735f8fc14c7fe9404c2a5d90d59491063b15f84 | 1,539 | py | Python | pygna/cli.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
] | 32 | 2019-07-11T22:58:14.000Z | 2022-03-04T19:34:55.000Z | pygna/cli.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
] | 3 | 2021-05-24T14:03:13.000Z | 2022-01-07T03:47:32.000Z | pygna/cli.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
] | 5 | 2019-07-24T09:38:07.000Z | 2021-12-30T09:20:20.000Z | import logging
import argh
import pygna.command as cmd
import pygna.painter as paint
import pygna.utils as utils
import pygna.block_model as bm
import pygna.degree_model as dm
"""
autodoc
"""
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
"""
MAIN
"""
main()
| 23.676923 | 43 | 0.654321 |
f7367b85ef33529c5c360e68d214cb8e6a80a38f | 4,752 | py | Python | dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py | njalloul90/Genomics_Oncology_Platform | 9bf6d0edca5df783f4e371fa1bc46b7b1576fe70 | [
"MIT"
] | 6 | 2021-07-26T14:21:25.000Z | 2021-07-26T14:32:01.000Z | dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py | njalloul90/Genomics_Oncology_Platform | 9bf6d0edca5df783f4e371fa1bc46b7b1576fe70 | [
"MIT"
] | 9 | 2021-03-18T23:10:27.000Z | 2022-03-11T23:43:55.000Z | dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py | njalloul90/Genomics_Oncology_Platform | 9bf6d0edca5df783f4e371fa1bc46b7b1576fe70 | [
"MIT"
] | 2 | 2019-03-11T05:06:49.000Z | 2019-03-22T21:48:49.000Z | """
PyColourChooser
Copyright (C) 2002 Michael Gilfix <mgilfix@eecs.tufts.edu>
This file is part of PyColourChooser.
This version of PyColourChooser is open source; you can redistribute it
and/or modify it under the licensed terms.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
# 12/14/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatibility update.
#
# 12/21/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxPyColorChooser -> PyColorChooser
# o wxPyColourChooser -> PyColourChooser
#
# Tags: phoenix-port
import wx
| 32.547945 | 71 | 0.643729 |
f738260086ccd3653bc2367e7b8083819a301d9b | 1,807 | py | Python | preprocessing/metadata.py | skincare-deep-learning/Skincare-backend | 80ed6b7a735291848be9248035231fbd55c93990 | [
"Apache-2.0"
] | 1 | 2019-11-27T20:56:27.000Z | 2019-11-27T20:56:27.000Z | preprocessing/metadata.py | skincare-deep-learning/Skincare-backend | 80ed6b7a735291848be9248035231fbd55c93990 | [
"Apache-2.0"
] | 10 | 2021-04-02T19:47:15.000Z | 2022-01-13T01:52:53.000Z | preprocessing/metadata.py | skincare-deep-learning/Skincare-backend | 80ed6b7a735291848be9248035231fbd55c93990 | [
"Apache-2.0"
] | null | null | null | import json
import csv
import pandas as pd
from isic_api import ISICApi
from pandas.io.json import json_normalize
# Initialize the API; no login is necessary for public data
api = ISICApi(username="SkinCare", password="unbdeeplearning")
outputFileName = 'imagedata'
imageList = api.getJson('image?limit=25000&offset=0&sort=name')
print('Fetching metadata for %s images' % len(imageList))
imageDetails = []
i = 0
for image in imageList:
print(' ', image['name'])
# Pull image details
imageDetail = api.getJson('image/%s' % image['_id'])
imageDetails.append(imageDetail)
"""
# Testing Parameters
print("****************************")
print(imageDetails[0]['meta']['clinical']['anatom_site_general'])
print("****************************")
data = json_normalize(imageDetails[0])
print(data.loc[0])
data = json_normalize(imageDetails[0])
print(data.loc[0])
print("========================================================")
print(data.loc[0]['dataset.name'])
"""
# Determine the union of all image metadata fields
metadataFields = set(
field
for imageDetail in imageDetails
for field in imageDetail['meta']['clinical'].keys()
)
metadataFields = ['isic_id'] + sorted(metadataFields)
# print(metadataFields)
outputFilePath = './metadata.csv'
# Write Metadata to a CSV
print('Writing metadata to CSV: %s' % 'metadata.csv')
with open(outputFilePath, 'w') as outputStream:
csvWriter = csv.DictWriter(outputStream, fieldnames=metadataFields)
csvWriter.writeheader() # Columns Names
for imageDetail in imageDetails:
rowDict = imageDetail['meta']['clinical'].copy()
rowDict['isic_id'] = imageDetail['name']
# rowDict['anatom_site_general'] = imageDetail['meta']['clinical']['anatom_site_general'] # Subjective
csvWriter.writerow(rowDict) | 30.627119 | 110 | 0.672939 |
f7387b7a0fda396aca3fe13d2312bd4427223bec | 1,317 | py | Python | nasbench/scripts/generate-all-graphs.py | bkj/nasbench | a238cf26d843aaffbe037569528ef96d3e37eb04 | [
"Apache-2.0"
] | null | null | null | nasbench/scripts/generate-all-graphs.py | bkj/nasbench | a238cf26d843aaffbe037569528ef96d3e37eb04 | [
"Apache-2.0"
] | null | null | null | nasbench/scripts/generate-all-graphs.py | bkj/nasbench | a238cf26d843aaffbe037569528ef96d3e37eb04 | [
"Apache-2.0"
] | 1 | 2021-07-25T16:36:34.000Z | 2021-07-25T16:36:34.000Z | #!/usr/bin/env python
"""
generate-all-graphs.py
python generate-all-graphs.py | gzip -c > all-graphs.gz
"""
import sys
import json
import itertools
import numpy as np
from tqdm import tqdm
from nasbench.lib import graph_util
from joblib import delayed, Parallel
max_vertices = 7
num_ops = 3
max_edges = 9
adjs = []
for vertices in range(2, max_vertices+1):
for bits in range(2 ** (vertices * (vertices-1) // 2)):
adjs.append((vertices, bits))
adjs = [adjs[i] for i in np.random.permutation(len(adjs))]
jobs = [delayed(make_graphs)(*adj) for adj in adjs]
res = Parallel(n_jobs=40, backend='multiprocessing', verbose=10)(jobs)
for r in res:
for rr in r:
print(json.dumps(rr)) | 24.388889 | 98 | 0.642369 |
f738e084271100fae4934591514291316a9bafdd | 1,500 | py | Python | ui/mext.py | szymonkaliski/nott | fa85e64b570f71733ea199dddbd0bc0f013a613b | [
"MIT"
] | 25 | 2019-07-01T14:58:48.000Z | 2021-11-13T17:00:44.000Z | ui/mext.py | szymonkaliski/nott | fa85e64b570f71733ea199dddbd0bc0f013a613b | [
"MIT"
] | 6 | 2019-12-30T02:50:19.000Z | 2021-05-10T16:41:47.000Z | ui/mext.py | szymonkaliski/nott | fa85e64b570f71733ea199dddbd0bc0f013a613b | [
"MIT"
] | 2 | 2020-01-05T13:02:07.000Z | 2020-05-21T15:54:57.000Z | # FIXME: fix all "happy paths coding" issues
import liblo
from threading import Thread
| 26.315789 | 85 | 0.544 |
f73a4d22041854c5326afaffc36927b22884b07a | 5,370 | py | Python | workers/test/test_exportactionlogsworker.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | workers/test/test_exportactionlogsworker.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | workers/test/test_exportactionlogsworker.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | import json
import os
import pytest
from datetime import datetime, timedelta
import boto3
from httmock import urlmatch, HTTMock
from moto import mock_s3
from app import storage as test_storage
from data import model, database
from data.logs_model import logs_model
from storage import S3Storage, StorageContext, DistributedStorage
from workers.exportactionlogsworker import ExportActionLogsWorker, POLL_PERIOD_SECONDS
from test.fixtures import *
_TEST_CONTENT = os.urandom(1024)
_TEST_BUCKET = "somebucket"
_TEST_USER = "someuser"
_TEST_PASSWORD = "somepassword"
_TEST_PATH = "some/cool/path"
_TEST_CONTEXT = StorageContext("nyc", None, None, None)
| 30.338983 | 97 | 0.58324 |
f73c025048313646ffa657c41d4c35ef79bc7325 | 6,699 | py | Python | pageplot/plotmodel.py | JBorrow/pageplot | 8abad574fda476d26a59fc8b7d36da2838f2c11e | [
"MIT"
] | null | null | null | pageplot/plotmodel.py | JBorrow/pageplot | 8abad574fda476d26a59fc8b7d36da2838f2c11e | [
"MIT"
] | null | null | null | pageplot/plotmodel.py | JBorrow/pageplot | 8abad574fda476d26a59fc8b7d36da2838f2c11e | [
"MIT"
] | null | null | null | """
The base top-level plot model class.
From this all data and plotting flow.
"""
from pageplot.exceptions import PagePlotParserError
from pathlib import Path
from typing import Any, Optional, Dict, List, Union
from pageplot.extensionmodel import PlotExtension
from pageplot.extensions import built_in_extensions
from pageplot.io.spec import IOSpecification
from pageplot.config import GlobalConfig
from pageplot.mask import get_mask
import matplotlib.pyplot as plt
import numpy as np
import unyt
import attr
| 29.641593 | 86 | 0.618749 |
f73d133f1804d0833d771530b775e1da1e558e30 | 853 | py | Python | src.py | duldiev/Assignment-2-Scrapping | a9dbb4bb14b7fe0a1c5ec6eba73491008ff8da52 | [
"MIT"
] | null | null | null | src.py | duldiev/Assignment-2-Scrapping | a9dbb4bb14b7fe0a1c5ec6eba73491008ff8da52 | [
"MIT"
] | null | null | null | src.py | duldiev/Assignment-2-Scrapping | a9dbb4bb14b7fe0a1c5ec6eba73491008ff8da52 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as soup
from selenium import webdriver | 38.772727 | 110 | 0.588511 |
f73dddf470763349a7f01540ff083d75743566dd | 19,409 | py | Python | qsubm.py | mark-caprio/mcscript | 7a5a69667857f27b8f2d2f9387b90301bc321df2 | [
"MIT"
] | 1 | 2017-05-30T20:45:24.000Z | 2017-05-30T20:45:24.000Z | qsubm.py | mark-caprio/mcscript | 7a5a69667857f27b8f2d2f9387b90301bc321df2 | [
"MIT"
] | 3 | 2020-06-15T16:10:23.000Z | 2020-10-15T02:47:21.000Z | qsubm.py | mark-caprio/mcscript | 7a5a69667857f27b8f2d2f9387b90301bc321df2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""qsubm -- generic queue submission for task-oriented batch scripts
Environment variables:
MCSCRIPT_DIR should specify the directory in which the mcscript package is
installed, i.e., the directory where the file qsubm.py is found. (Note that
qsubm uses this information to locate certain auxiliary script files used as
part of the job submission process.)
MCSCRIPT_RUN_HOME must specify the directory in which job files are found.
MCSCRIPT_WORK_HOME should specify the parent directory in which run scratch
directories should be made.
MCSCRIPT_INSTALL_HOME must specify the directory in which executables are found.
MCSCRIPT_LAUNCH_HOME (optional) should specify the parent directory in which
run subdirectories for qsub invocation and output logging should be made.
Otherwise, this will default to MCSCRIPT_WORK_HOME.
MCSCRIPT_PYTHON should give the full qualified filename (i.e., including
path) to the Python 3 executable for running run script files. A typical
value will simply be "python3", assuming the Python 3 executable is in the
shell's command search PATH. However, see note on "Availability of Python"
in INSTALL.md.
MCSCRIPT_RUN_PREFIX should specify the prefix for run names, e.g., set to
"run" if your scripts are to be named run<XXXX>.py.
Requires local definitions file config.py to translate options into
arguments for local batch server. See directions in readme.txt. Your local
definitions might not make use of or support all the parallel environment
options.
Language: Python 3
M. A. Caprio
University of Notre Dame
+ 3/6/13 (mac): Based on earlier qsubm csh script.
+ 7/4/13 (mac): Support for multiple cluster flavors via qsubm_local.
+ 1/22/14 (mac): Python 3 update.
+ 10/27/14 (mac): Updates to --archive handling.
+ 5/14/15 (mac):
- Insert "future" statements for Python 2 legacy support.
- Add --noredirect switch.
- Mandatory environment variable QSUBM_PYTHON.
+ 8/4/15 (mac): Make user environment variable definitions into option.
+ 6/13/16 (mac): Rename environment variables to MCSCRIPT_*.
+ 6/22/16 (mac): Update to use config.py for local configuration.
+ 12/14/16 (mac): Add --here option.
+ 12/29/16 (mac):
- Add --spread option.
- Remove --pernode option.
- Make --opt option repeatable.
+ 1/16/17 (mac): Add --serialthreads option.
+ 2/23/17 (mac): Switch from os.mkdir to mcscript.utils.mkdir.
+ 3/16/17 (mac):
- Add --setup option.
- Change environment interface to pass MCSCRIPT_TASK_MODE.
+ 3/18/17 (mac):
- Revise to support updated hybrid run parameters.
- Rename option --setup to --prerun.
+ 5/22/17 (mac): Fix processing of boolean option --redirect.
+ 10/11/17 (pjf): Add --switchwaittime option.
+ 01/05/18 (pjf): Sort arguments into groups.
+ 02/11/18 (pjf):
- Pass through MCSCRIPT_INSTALL_HOME.
- Use job_environ for submission.
+ 07/06/18 (pjf):
- Pass queue via MCSCRIPT_RUN_QUEUE.
- Remove MCSCRIPT_HYBRID_NODESIZE.
+ 06/04/19 (pjf):
- Add hook for individual configurations to add command-line arguments.
- Move --switchwaittime option into config-slurm-nersc.py.
+ 09/11/19 (pjf): Add expert mode argument.
"""
import argparse
import os
import shutil
import subprocess
import sys
import mcscript.config # local configuration (usually symlink)
import mcscript.utils
################################################################
# argument parsing
################################################################
parser = argparse.ArgumentParser(
description="Queue submission for numbered run.",
usage=
"%(prog)s [option] run queue|RUN wall [var1=val1, ...]\n",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=
"""Simply omit the queue name and leave off the wall time for a
local interactive run.
Environment variables for qsubm are described in INSTALL.md.
Note that qsubm relies upon code in the local `config.py`
configuration file for the system or cluster you are running on, in
order to interpret the following arguments and translate them into
arguments for your local batch system. Your local configuration
file might not make use of or support all the parallel environment
options listed below.
"""
)
# general arguments
parser.add_argument("run", help="Run number (e.g., 0000 for run0000)")
# latter arguments are made optional to simplify bare-bones syntax for --toc, etc., calls
parser.add_argument("queue", nargs='?', help="Submission queue, or RUN for direct interactive run", default="RUN")
parser.add_argument("wall", type=int, nargs='?', help="Wall time (minutes)", default=60)
##parser.add_argument("vars", nargs="?", help="Environment variables to pass to script, with optional values, comma delimited (e.g., METHOD2, PARAM=1.0)")
parser.add_argument("--here", action="store_true", help="Force run in current working directory")
parser.add_argument("--vars", help="Environment variables to pass to script, with optional values, comma delimited (e.g., --vars=METHOD2, PARAM=1.0)")
## parser.add_argument("--stat", action="store_true", help="Display queue status information")
parser.add_argument("--num", type=int, default=1, help="Number of repetitions")
parser.add_argument("--opt", action="append", help="Additional option arguments to be passed to job submission command (e.g., --opt=\"-m ae\" or --opt=\"--mail-type=END,FAIL\"), may be repeated (e.g., --opt=\"-A acct\" --opt=\"-a 1200\"); beware the spaces may be important to the job submission command")
parser.add_argument("--expert", action="store_true", help="Run mcscript in expert mode")
# serial run parallelization parameters
serial_group = parser.add_argument_group("serial run options (single-node, non-MPI)")
serial_group.add_argument("--serialthreads", type=int, default=1, help="OMP threads")
# hybrid run parallelization parameters
#
# Not all local configuration files need necessarily require or
# respect all of the following parameters.
hybrid_group = parser.add_argument_group("hybrid run options")
hybrid_group.add_argument("--nodes", type=int, default=1, help="number of nodes")
hybrid_group.add_argument("--ranks", type=int, default=1, help="number of MPI ranks")
hybrid_group.add_argument("--threads", type=int, default=1, help="OMP threads per rank)")
hybrid_group.add_argument("--nodesize", type=int, default=0, help="logical threads available per node"
" (might instead be interpreted physical CPUs depending on local config file)")
##hybrid_group.add_argument("--undersubscription", type=int, default=1, help="undersubscription factor (e.g., spread=2 requests twice the cores needed)")
# multi-task interface: invocation modes
task_mode_group = parser.add_mutually_exclusive_group()
task_mode_group.add_argument("--toc", action="store_true", help="Invoke run script to generate task table of contents")
task_mode_group.add_argument("--unlock", action="store_true", help="Delete any .lock or .fail flags for tasks")
task_mode_group.add_argument("--archive", action="store_true", help="Invoke archive-generation run")
task_mode_group.add_argument("--prerun", action="store_true", help="Invoke prerun mode, for argument validation and file staging only")
task_mode_group.add_argument("--offline", action="store_true", help="Invoke offline mode, to create batch scripts for later submission instead of running compute codes")
# multi-task interface: task selection
task_selection_group = parser.add_argument_group("multi-task run options")
task_selection_group.add_argument("--pool", help="Set task pool (or ALL) for task selection")
task_selection_group.add_argument("--phase", type=int, default=0, help="Set task phase for task selection")
task_selection_group.add_argument("--start", type=int, help="Set starting task number for task selection")
task_selection_group.add_argument("--limit", type=int, help="Set task count limit for task selection")
task_selection_group.add_argument("--redirect", default="True", choices=["True", "False"], help="Allow redirection of standard"
" output/error to file (may want to disable for interactive debugging)")
# some special options (deprecated?)
##parser.add_argument("--epar", type=int, default=None, help="Width for embarassingly parallel job")
##parser.add_argument("--nopar", action="store_true", help="Disable parallel resource requests (for use on special serial queues)")
# site-local options
try:
mcscript.config.qsubm_arguments(parser)
except AttributeError:
# local config doesn't provide arguments, ignore gracefully
pass
##parser.print_help()
##print
args = parser.parse_args()
##printargs
################################################################
# special mode: status display
################################################################
# TODO
# will have to modify argument processing to allow no arguments, local
# customization for qstat
# @ i = 0
# while (($i == 0) || ($loop))
# @ i++
# clear
# echo "****************************************************************"
# qstat -u $user
# if ($loop) sleep 5
# end
## if (args.stat):
## pass
################################################################
# environment processing
################################################################
if (args.here):
run_home = os.environ["PWD"]
elif ("MCSCRIPT_RUN_HOME" in os.environ):
run_home = os.environ["MCSCRIPT_RUN_HOME"]
else:
print("MCSCRIPT_RUN_HOME not found in environment")
exit(1)
if (args.here):
work_home = os.environ["PWD"]
elif ("MCSCRIPT_WORK_HOME" in os.environ):
work_home = os.environ["MCSCRIPT_WORK_HOME"]
else:
print("MCSCRIPT_WORK_HOME not found in environment")
exit(1)
if (args.here):
launch_home = os.environ["PWD"]
elif ("MCSCRIPT_LAUNCH_HOME" in os.environ):
launch_home = os.environ["MCSCRIPT_LAUNCH_HOME"]
else:
launch_home = work_home
if ("MCSCRIPT_RUN_PREFIX" in os.environ):
run_prefix = os.environ["MCSCRIPT_RUN_PREFIX"]
else:
print("MCSCRIPT_RUN_PREFIX not found in environment")
exit(1)
if ("MCSCRIPT_PYTHON" in os.environ):
python_executable = os.environ["MCSCRIPT_PYTHON"]
else:
print("MCSCRIPT_PYTHON not found in environment")
exit(1)
if ("MCSCRIPT_DIR" in os.environ):
qsubm_path = os.environ["MCSCRIPT_DIR"]
else:
print("MCSCRIPT_DIR not found in environment")
exit(1)
################################################################
# argument processing
################################################################
# set run name
run = run_prefix + args.run
print("Run:", run)
# ...and process run file
script_extensions = [".py", ".csh"]
job_file = None
for extension in script_extensions:
filename = os.path.join(run_home, run+extension)
if (filename):
job_file = filename
job_extension = extension
break
print(" Run home:", run_home) # useful to report now, in case job file missing
if (job_file is None):
print("No job file %s.* found with an extension in the set %s." % (run, script_extensions))
exit(1)
print(" Job file:", job_file)
# set queue and flag batch or local mode
# force local run for task.py toc mode
if ((args.queue == "RUN") or args.toc or args.unlock):
run_mode = "local"
run_queue = "local"
print(" Mode:", run_mode)
else:
run_mode = "batch"
run_queue = args.queue
print(" Mode:", run_mode, "(%s)" % args.queue)
# set wall time
wall_time_min = args.wall
print(" Wall time (min): {:d}".format(wall_time_min))
wall_time_sec = wall_time_min*60
# environment definitions: general run parameters
environment_definitions = [
"MCSCRIPT_RUN={:s}".format(run),
"MCSCRIPT_JOB_FILE={:s}".format(job_file),
"MCSCRIPT_RUN_MODE={:s}".format(run_mode),
"MCSCRIPT_RUN_QUEUE={:s}".format(run_queue),
"MCSCRIPT_WALL_SEC={:d}".format(wall_time_sec)
]
# environment definitions: serial run parameters
environment_definitions += [
"MCSCRIPT_SERIAL_THREADS={:d}".format(args.serialthreads)
]
# environment definitions: hybrid run parameters
environment_definitions += [
"MCSCRIPT_HYBRID_NODES={:d}".format(args.nodes),
"MCSCRIPT_HYBRID_RANKS={:d}".format(args.ranks),
"MCSCRIPT_HYBRID_THREADS={:d}".format(args.threads),
]
# set multi-task run parameters
if (args.toc):
task_mode = mcscript.task.TaskMode.kTOC
elif (args.unlock):
task_mode = mcscript.task.TaskMode.kUnlock
elif (args.archive):
task_mode = mcscript.task.TaskMode.kArchive
elif (args.prerun):
task_mode = mcscript.task.TaskMode.kPrerun
elif (args.offline):
task_mode = mcscript.task.TaskMode.kOffline
else:
task_mode = mcscript.task.TaskMode.kRun
# TODO (mac): neaten up so that these arguments are always provided
# (and simplify this code to a simple list += as above)
environment_definitions.append("MCSCRIPT_TASK_MODE={:d}".format(task_mode.value))
if (args.pool is not None):
environment_definitions.append("MCSCRIPT_TASK_POOL={:s}".format(args.pool))
if (args.phase is not None):
environment_definitions.append("MCSCRIPT_TASK_PHASE={:d}".format(args.phase))
if (args.start is not None):
environment_definitions.append("MCSCRIPT_TASK_START_INDEX={:d}".format(args.start))
if (args.limit is not None):
environment_definitions.append("MCSCRIPT_TASK_COUNT_LIMIT={:d}".format(args.limit))
environment_definitions.append("MCSCRIPT_TASK_REDIRECT={:s}".format(args.redirect))
# pass through install directory
if os.environ.get("MCSCRIPT_INSTALL_HOME"):
environment_definitions += [
"MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_HOME"])
]
elif os.environ.get("MCSCRIPT_INSTALL_DIR"):
# TODO remove deprecated environment variable
print("****************************************************************")
print("MCSCRIPT_INSTALL_DIR is now MCSCRIPT_INSTALL_HOME.")
print("Please update your environment variables.")
print("****************************************************************")
environment_definitions += [
"MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_DIR"])
]
else:
print("MCSCRIPT_INSTALL_HOME not found in environment")
exit(1)
# include additional environment setup if defined
if os.environ.get("MCSCRIPT_SOURCE"):
environment_definitions += [
"MCSCRIPT_SOURCE={:s}".format(os.environ["MCSCRIPT_SOURCE"])
]
# set user-specified variable definitions
# Note conditional is required since "".split(", ") is [""] rather than [].
if (args.vars is None):
user_environment_definitions = []
else:
user_environment_definitions = args.vars.split(",")
print(" User environment definitions:", user_environment_definitions)
environment_definitions += user_environment_definitions
################################################################
# directory setup
################################################################
# set up scratch directory (for batch job work)
# name is defined here, but creation is left up to job script,
# in case scratch is local to the compute note
work_dir = os.path.join(work_home, run)
## if ( not os.path.exists(work_dir)):
## mcscript.utils.mkdir(work_dir)
environment_definitions.append("MCSCRIPT_WORK_DIR=%s" % work_dir)
# set up run launch directory (for batch job output logging)
launch_dir_parent = os.path.join(launch_home, run)
if ( not os.path.exists(launch_home)):
mcscript.utils.mkdir(launch_home)
if ( not os.path.exists(launch_dir_parent)):
mcscript.utils.mkdir(launch_dir_parent)
if (args.archive):
# archive mode
# launch in archive directory rather than usual batch job output directory
# (important since if batch job server directs output to the
# regular output directory while tar is archiving that directory,
# tar will return with an error code, torpedoing the archive task)
launch_dir = os.path.join(launch_home, run, "archive")
else:
# standard run mode
launch_dir = os.path.join(launch_home, run, "batch")
if ( not os.path.exists(launch_dir)):
mcscript.utils.mkdir(launch_dir)
environment_definitions.append("MCSCRIPT_LAUNCH_DIR=%s" % launch_dir)
################################################################
# job environment setup
################################################################
# construct job name
job_name = "%s" % run
##job_name += "-w%d" % args.width
if (args.pool is not None):
job_name += "-%s" % args.pool
job_name += "-%s" % args.phase
print(" Job name:", job_name)
# process environment definitions
# regularize environment definitions
# Convert all plain variable name definitions "VAR" into definition
# as null string "VAR=". Note that "VAR" would be an environment
# variable pass-through request to qsub, but it causes trouble with
# defining an environment for local execution. So doing this
# regularization simplifies further processing and ensures
# uniformity of the environment between batch and local runs.
for i in range(len(environment_definitions)):
if (not "=" in environment_definitions[i]):
environment_definitions[i] += "="
print()
print("Vars:", ",".join(environment_definitions))
# for local run
job_environ=os.environ
environment_keyvalues = [
entry.split("=")
for entry in environment_definitions
]
job_environ.update(dict(environment_keyvalues))
################################################################
# run invocation
################################################################
# flush script output before invoking job
print()
sys.stdout.flush()
# handle batch run
if (run_mode == "batch"):
# set local qsub arguments
(submission_args, submission_input_string, repetitions) = mcscript.config.submission(job_name, job_file, qsubm_path, environment_definitions, args)
# notes: options must come before command on some platforms (e.g., Univa)
print(" ".join(submission_args))
print(submission_input_string)
print()
print("-"*64)
for i in range(repetitions):
process = subprocess.Popen(
submission_args,
stdin=subprocess.PIPE, # to take input from communicate
stdout=subprocess.PIPE, # to send output to communicate -- default merged stderr
env=job_environ,
cwd=launch_dir
)
stdout_bytes = process.communicate(input=submission_input_string)[0]
stdout_string = stdout_bytes.decode("utf-8")
print(stdout_string)
# handle interactive run
# Note: We call interpreter rather than trying to directly execute
# job file since this saves us from bothering with execute permissions.
# But, beware the interpreter enforced by the script's shebang line might
# be different from the version of the interpreter found in the below invocation,
# especially in a "module" environment.
elif (run_mode == "local"):
if (extension == ".py"):
popen_args = [python_executable, job_file]
elif (extension == ".csh"):
popen_args = ["csh", job_file]
print()
print("-"*64)
process = subprocess.Popen(popen_args, cwd=launch_dir, env=job_environ)
process.wait()
| 40.77521 | 305 | 0.679273 |
f73ea882b3c478b64d849ace9aad77a4fd64c642 | 504 | py | Python | trees.py | dmancevo/trees | a76a8d9c8e11c67042e3d947d58a84fee83ad6b5 | [
"Apache-2.0"
] | null | null | null | trees.py | dmancevo/trees | a76a8d9c8e11c67042e3d947d58a84fee83ad6b5 | [
"Apache-2.0"
] | null | null | null | trees.py | dmancevo/trees | a76a8d9c8e11c67042e3d947d58a84fee83ad6b5 | [
"Apache-2.0"
] | null | null | null | from ctypes import *
Node._fields_ = [
("leaf", c_int),
("g", c_float),
("min_samples", c_int),
("split_ind", c_int),
("split", c_float),
("left", POINTER(Node)),
("right", POINTER(Node))]
trees = CDLL("./trees.so")
trees.get_root.argtypes = (c_int, )
trees.get_root.restype = POINTER(Node)
if __name__ == '__main__':
tree = Tree()
| 21 | 47 | 0.621032 |
f73eadae4fdc856f5258f55231ef39bc6666f5e3 | 2,657 | py | Python | qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 198 | 2019-08-16T15:09:23.000Z | 2022-03-30T12:44:00.000Z | qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 13 | 2021-01-07T10:15:19.000Z | 2022-03-29T13:01:47.000Z | qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 29 | 2019-08-16T15:21:28.000Z | 2022-02-23T09:53:49.000Z | # Copyright 2016-present CERN European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.time_in_force import TimeInForce
| 39.073529 | 113 | 0.640572 |
f73f526fb320491a4e8c361c6ccf86f4cd4462be | 8,080 | py | Python | purity_fb/purity_fb_1dot12/models/multi_protocol_rule.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot12/models/multi_protocol_rule.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot12/models/multi_protocol_rule.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.12 Python SDK
Pure Storage FlashBlade REST 1.12 Python SDK. Compatible with REST API versions 1.0 - 1.12. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.12
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MultiProtocolRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 53.509934 | 1,242 | 0.688738 |
f73f780a7a7ee00c38db35dff1b7df923b5843be | 3,789 | py | Python | webdriver/tests/actions/mouse_dblclick.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | null | null | null | webdriver/tests/actions/mouse_dblclick.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | null | null | null | webdriver/tests/actions/mouse_dblclick.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from tests.actions.support.mouse import assert_move_to_coordinates, get_center
from tests.actions.support.refine import get_events, filter_dict
_DBLCLICK_INTERVAL = 640
# Using local fixtures because we want to start a new session between
# each test, otherwise the clicks in each test interfere with each other.
def test_dblclick_with_pause_after_second_pointerdown(dblclick_session, mouse_chain):
outer = dblclick_session.find.css("#outer", all=False)
center = get_center(outer.rect)
mouse_chain \
.pointer_move(int(center["x"]), int(center["y"])) \
.click() \
.pointer_down() \
.pause(_DBLCLICK_INTERVAL + 10) \
.pointer_up() \
.perform()
events = get_events(dblclick_session)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "dblclick", "button": 0},
]
assert len(events) == 8
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
def test_no_dblclick(dblclick_session, mouse_chain):
outer = dblclick_session.find.css("#outer", all=False)
center = get_center(outer.rect)
mouse_chain \
.pointer_move(int(center["x"]), int(center["y"])) \
.click() \
.pause(_DBLCLICK_INTERVAL + 10) \
.click() \
.perform()
events = get_events(dblclick_session)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
]
assert len(events) == 7
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
| 34.761468 | 92 | 0.594088 |
f741f9a134a29cde4417d89a622f8515e0c9db99 | 2,842 | py | Python | actions.py | ratnasankeerthanreddy/Chatbot-for-Personal-assisatance | 6c584601af4c98a3bebedf7073e0ccf2ad8ecf76 | [
"MIT"
] | 1 | 2020-10-28T15:57:44.000Z | 2020-10-28T15:57:44.000Z | actions.py | ratnasankeerthanreddy/Chatbot-for-Personal-assisatance | 6c584601af4c98a3bebedf7073e0ccf2ad8ecf76 | [
"MIT"
] | null | null | null | actions.py | ratnasankeerthanreddy/Chatbot-for-Personal-assisatance | 6c584601af4c98a3bebedf7073e0ccf2ad8ecf76 | [
"MIT"
] | null | null | null | from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from utils import convert_timestamp
from rasa_sdk.events import AllSlotsReset
import datetime
from datetime import timedelta, date
import dateutil.parser
import boto3
from boto3.dynamodb.conditions import Key
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# dispatcher.utter_message(text="Hello World!")
# return []
# class ActionSearchRestaurant(Action):
# def name(self) -> Text:
# return "action_search_restaurant"
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# entities = tracker.latest_message['entities']
# print(entities)
# for e in entities:
# if e['entity'] == 'type':
# name = e['value']
# if name == 'indian':
# message = "Items: Indian1, Indian2, Indian3, Indian4"
# dispatcher.utter_message(text=message)
# return []
param_arr = ["salinity", "solarRad", "airTemp", "aeration", "potassium", "moisture", "soilTemp", "respiration", "pressure", "phosphorus", "pH", "humidity", "nitrogen", "evapotranspiration(ET)"]
| 35.974684 | 363 | 0.678044 |
f7421c61dc4a9f2905083616a76cf5c9a110855b | 4,688 | py | Python | main.py | Davidswinkels/DownloadWalkingRoutes | 9ceaee0b96507149086aef7081790a09ab6b3653 | [
"Apache-2.0"
] | null | null | null | main.py | Davidswinkels/DownloadWalkingRoutes | 9ceaee0b96507149086aef7081790a09ab6b3653 | [
"Apache-2.0"
] | null | null | null | main.py | Davidswinkels/DownloadWalkingRoutes | 9ceaee0b96507149086aef7081790a09ab6b3653 | [
"Apache-2.0"
] | null | null | null | from scripts.downloader import *
import fiona
from shapely.geometry import shape
import geopandas as gpd
import matplotlib.pyplot as plt
from pprint import pprint
import requests
import json
import time
import os
# Constant variables
input_min_lat = 50.751797561
input_min_lon = 5.726110232
input_max_lat = 50.938216069
input_max_lon = 6.121604582
route_search_url = "https://api.routeyou.com/2.0/json/Route/k-9aec2fc1705896b901c3ea17d6223f0a/mapSearch"
route_search_headers = {"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "nl,en-US;q=0.7,en;q=0.3",
"Connection": "keep-alive",
"Content-Length": "331",
"Content-Type": "text/plain;charset=UTF-8",
"DNT": "1",
"Host": "api.routeyou.com",
"Origin": "https://www.routeyou.com",
"Referer": "https://www.routeyou.com/route/search/2/walking-route-search",
"TE": "Trailers",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"}
default_headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "nl,en-US;q=0.7,en;q=0.3",
"Connection": "test",
"Cookie": "rtysid=5gf59rik6gf8o7b5an7nalcsh0; "
"_ga=GA1.2.1811204879.1553438381; _"
"gid=GA1.2.1815573989.1553438381; __"
"gads=ID=fab95f7aaf65227e:T=1553438384:S=ALNI_MaIjkdo1dKpYiyQKfWZEymqT7HgUQ",
"Host": "download.routeyou.com",
"Referer": "https://www.routeyou.com/nl-be/route/view/5653357/wandelroute/"
"in-het-spoor-van-napoleon-kasteel-reinhardstein-en-de-stuwdam-van-robertville",
"TE": "Trailers",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0"}
# # Setup script
# bounding_boxes_list = create_bounding_boxes(input_min_lat, input_min_lon, input_max_lat, input_max_lon,
# nr_of_rows=12, nr_of_columns=12)
# for index, bounding_box in enumerate(bounding_boxes_list):
# route_search_data = '{"jsonrpc":"2.0","id":"3","method":"searchAdvanced","params":' \
# '[{"bounds":{"min":{"lat":%s,"lon":%s},"max":{"lat":%s,"lon":%s}},' \
# '"type.id":2,"score.min":0.5,"bounds.comparator":"geometry"},null,100,0,' \
# '{"clusters":false,"addLanguage":"en","media":false,"description":false}]}' \
# % (bounding_box['min_lat'], bounding_box['min_lon'], bounding_box['max_lat'], bounding_box['max_lon'])
# response = requests.post(url=route_search_url, headers=route_search_headers,
# data=route_search_data)
# with open("D:/Wandelroutes/Text/routes_{}.txt".format(index), "wb") as file:
# file.write(response.content)
# data = json.loads(response.content)
# print("Index / routes count / total routes: ", index, "/", len(data['result']['routes']), "/", data['result']['total'])
#
# for route in data['result']['routes']:
# time.sleep(0.5)
# route_url = "https://download.routeyou.com/k-9aec2fc1705896b901c3ea17d6223f0a/route/{}.gpx?language=nl".format(route['id'])
# filepath = "D:/Wandelroutes/GPX/{}.gpx".format(route['id'])
# download_to_file(route_url, default_headers, filepath)
dir_filepath = "D:/Wandelroutes/GPX"
filenames = os.listdir(dir_filepath)
rows_list = []
for filename in filenames:
layer = fiona.open(os.path.join(dir_filepath, filename), layer='tracks')
geom = layer[0]
route_name = geom['properties']['name']
route_geodata = {'type': 'MultiLineString',
'coordinates': geom['geometry']['coordinates']}
route_geometry = shape(route_geodata)
route_id = os.path.splitext(os.path.basename(filename))[0]
route_dict = {'id': str(route_id),
'name': route_name,
'url': "https://www.routeyou.com/nl-nl/route/view/" + str(route_id),
'geometry': route_geometry}
rows_list.append(route_dict)
routes_gdf = gpd.GeoDataFrame(rows_list)
routes_gdf.crs = {'init': 'epsg:4326', 'no_defs': True}
routes_gdf.to_file("D:/Wandelroutes/walking_routes.shp")
| 53.272727 | 133 | 0.588097 |
f7441602c73a268dd40291e6397b91ed0f1027f6 | 3,185 | py | Python | seq2seq-chatbot/chat_web.py | rohitkujur1997/chatbot | 76cd460b09f75532a7259d783114d8cf3dda246f | [
"MIT"
] | 104 | 2018-03-28T20:30:25.000Z | 2022-02-18T19:43:21.000Z | seq2seq-chatbot/chat_web.py | rohitkujur1997/chatbot | 76cd460b09f75532a7259d783114d8cf3dda246f | [
"MIT"
] | 37 | 2018-04-16T15:39:17.000Z | 2021-05-29T11:28:26.000Z | seq2seq-chatbot/chat_web.py | rohitkujur1997/chatbot | 76cd460b09f75532a7259d783114d8cf3dda246f | [
"MIT"
] | 63 | 2018-05-18T09:52:20.000Z | 2021-07-26T08:11:17.000Z | """
Script for serving a trained chatbot model over http
"""
import datetime
import click
from os import path
from flask import Flask, request, send_from_directory
from flask_cors import CORS
from flask_restful import Resource, Api
import general_utils
import chat_command_handler
from chat_settings import ChatSettings
from chatbot_model import ChatbotModel
from vocabulary import Vocabulary
app = Flask(__name__)
CORS(app) | 37.916667 | 141 | 0.661538 |
f744580853ac0dc47dbf987d1497464099a8f500 | 1,907 | py | Python | tests/test-checkbox.py | JonathanRRogers/twill | e1afc10366dcd29b82eeae2d586e49ca7737039a | [
"MIT"
] | null | null | null | tests/test-checkbox.py | JonathanRRogers/twill | e1afc10366dcd29b82eeae2d586e49ca7737039a | [
"MIT"
] | null | null | null | tests/test-checkbox.py | JonathanRRogers/twill | e1afc10366dcd29b82eeae2d586e49ca7737039a | [
"MIT"
] | null | null | null | import twilltestlib
import twill
from twill import namespaces, commands
from twill.errors import TwillAssertionError
from mechanize import BrowserStateError
| 26.859155 | 77 | 0.631358 |
f74471527dea41ff8d2f932ecbb41c7a4779f9c6 | 894 | py | Python | native/release/test.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 6 | 2015-09-19T18:22:33.000Z | 2020-11-29T15:21:17.000Z | native/release/test.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2015-08-04T08:03:46.000Z | 2015-08-04T08:03:46.000Z | native/release/test.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2019-12-09T08:27:09.000Z | 2019-12-09T08:27:09.000Z | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import pybind
a = pybind.vec3(1.0, 2.0, 3.0)
b = pybind.vec3(2.0, 1.0, 3.0)
t1 = test(pybind.dot)
t2 = test(dummy)
print t1/t2
| 22.35 | 74 | 0.710291 |
f7451e04b5509a3b3a1ecacc27d8ca29e4bae31f | 657 | py | Python | 4.py | Andrey543/Prack_10 | 263cae3204ed624b68d5797bd8d8833dd88e4682 | [
"PSF-2.0"
] | null | null | null | 4.py | Andrey543/Prack_10 | 263cae3204ed624b68d5797bd8d8833dd88e4682 | [
"PSF-2.0"
] | null | null | null | 4.py | Andrey543/Prack_10 | 263cae3204ed624b68d5797bd8d8833dd88e4682 | [
"PSF-2.0"
] | null | null | null | enru=open('en-ru.txt','r')
input=open('input.txt','r')
output=open('output.txt','w')
s=enru.read()
x=''
prov={'q','w','e','r','t','y','u','i','o','p','a','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m'}
slovar={}
s=s.replace('\t-\t',' ')
while len(s)>0:
slovar[s[:s.index(' ')]]=s[s.index(' '):s.index('\n')]
s=s[s.index('\n')+1:]
print(slovar)
s=input.read()
s=s.lower()
while len(s)>0:
a=s[0]
if a not in prov:
if x in slovar:
print(slovar[x],a, file=output, sep='',end='')
else:
print(x,a, file=output, sep='',end='')
x=''
else:
x+=a
s=s[1:]
| 22.655172 | 110 | 0.438356 |
f74545257d4ee21af8ad7aae2149ad290991f512 | 2,485 | py | Python | examples/siamese_mnist.py | DmitryUlyanov/deeppy | c9644d348e22b78d32ea049fb0ac14bf3b750941 | [
"MIT"
] | 1 | 2015-09-16T08:01:21.000Z | 2015-09-16T08:01:21.000Z | examples/siamese_mnist.py | rajat1994/deeppy | 79cc7cb552f30bc70eeea9ee7ff4976b0899ea66 | [
"MIT"
] | null | null | null | examples/siamese_mnist.py | rajat1994/deeppy | 79cc7cb552f30bc70eeea9ee7ff4976b0899ea66 | [
"MIT"
] | 2 | 2020-04-05T21:41:14.000Z | 2021-09-28T18:05:49.000Z | #!/usr/bin/env python
"""
Siamese networks
================
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
import deeppy as dp
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Generate image pairs
n_pairs = 100000
x1 = np.empty((n_pairs, 28*28), dtype=dp.float_)
x2 = np.empty_like(x1, dtype=dp.float_)
y = np.empty(n_pairs, dtype=dp.int_)
n_imgs = x_train.shape[0]
n = 0
while n < n_pairs:
i = random.randint(0, n_imgs-1)
j = random.randint(0, n_imgs-1)
if i == j:
continue
x1[n, ...] = x_train[i]
x2[n, ...] = x_train[j]
if y_train[i] == y_train[j]:
y[n] = 1
else:
y[n] = 0
n += 1
# Prepare network inputs
train_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)
# Setup network
w_gain = 1.5
w_decay = 1e-4
net = dp.SiameseNetwork(
siamese_layers=[
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=2,
weights=dp.Parameter(dp.AutoFiller(w_gain)),
),
],
loss=dp.ContrastiveLoss(margin=1.0),
)
# Train network
trainer = dp.StochasticGradientDescent(
max_epochs=15,
learn_rule=dp.RMSProp(learn_rate=0.01),
)
trainer.train(net, train_input)
# Plot 2D embedding
test_input = dp.Input(x_test)
x_test = np.reshape(x_test, (-1,) + dataset.img_shape)
feat = net.features(test_input)
feat -= np.min(feat, 0)
feat /= np.max(feat, 0)
plt.figure()
ax = plt.subplot(111)
shown_images = np.array([[1., 1.]])
for i in range(feat.shape[0]):
dist = np.sum((feat[i] - shown_images)**2, 1)
if np.min(dist) < 6e-4:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [feat[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(x_test[i], zoom=0.6, cmap=plt.cm.gray_r),
xy=feat[i], frameon=False
)
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
plt.title('Embedding from the last layer of the network')
| 24.60396 | 78 | 0.639437 |
f7455caca893431c335911322deda0eadbce921b | 1,580 | py | Python | py/tests/tests_integ_yarn.py | My-Technical-Architect/sparkling-water | b1381891baefd63fd15f8dc2a73b049828a919bc | [
"Apache-2.0"
] | null | null | null | py/tests/tests_integ_yarn.py | My-Technical-Architect/sparkling-water | b1381891baefd63fd15f8dc2a73b049828a919bc | [
"Apache-2.0"
] | null | null | null | py/tests/tests_integ_yarn.py | My-Technical-Architect/sparkling-water | b1381891baefd63fd15f8dc2a73b049828a919bc | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for pySparkling for Spark running in YARN mode
"""
from integ_test_utils import IntegTestSuite
import test_utils
if __name__ == '__main__':
test_utils.run_tests([YarnIntegTestSuite], file_name="py_integ_yarn_tests_report")
| 39.5 | 97 | 0.725316 |
f747016ef477946806c1d818d38cadb8c27e681e | 12,988 | py | Python | exchanges/virtualExchangeService.py | AshWorkshop/Trandash | a96b523fbd171ba2d43a6720ef2e2a496a0cf75a | [
"MIT"
] | 1 | 2018-07-25T03:43:35.000Z | 2018-07-25T03:43:35.000Z | exchanges/virtualExchangeService.py | AshWorkshop/Trandash | a96b523fbd171ba2d43a6720ef2e2a496a0cf75a | [
"MIT"
] | null | null | null | exchanges/virtualExchangeService.py | AshWorkshop/Trandash | a96b523fbd171ba2d43a6720ef2e2a496a0cf75a | [
"MIT"
] | null | null | null | from twisted.internet import defer, task
from twisted.python.failure import Failure
from exchanges.base import ExchangeService
from exchange import calcVirtualOrderBooks
import copy
import time
if __name__ == '__main__':
from exchanges.bitfinex.BitfinexService import bitfinex
VirtualExchange(bitfinex, ('ETH',) ) | 34.542553 | 97 | 0.495303 |
f7479f1a05ace202d6234fbd90b428551eb021a1 | 7,989 | py | Python | testplan/testing/cpp/cppunit.py | Morgan-Stanley/Testplan | 9374d6e0da6ae9aa7a1b5e08b42cd21993485837 | [
"Apache-2.0"
] | null | null | null | testplan/testing/cpp/cppunit.py | Morgan-Stanley/Testplan | 9374d6e0da6ae9aa7a1b5e08b42cd21993485837 | [
"Apache-2.0"
] | null | null | null | testplan/testing/cpp/cppunit.py | Morgan-Stanley/Testplan | 9374d6e0da6ae9aa7a1b5e08b42cd21993485837 | [
"Apache-2.0"
] | null | null | null | import os
from schema import Or
from testplan.common.config import ConfigOption
from ..base import ProcessRunnerTest, ProcessRunnerTestConfig
from ...importers.cppunit import CPPUnitResultImporter, CPPUnitImportedResult
| 35.506667 | 80 | 0.620729 |
f747a38fbb26a5157c21b6d60ed98e858e4c0dcd | 8,191 | py | Python | commands/song.py | Princ3x/ddmbot | 088eb6b46447a1ec184b1bc7fea493b66ee35284 | [
"MIT"
] | 8 | 2016-12-13T17:52:51.000Z | 2019-06-23T22:11:42.000Z | commands/song.py | Princ3x/ddmbot | 088eb6b46447a1ec184b1bc7fea493b66ee35284 | [
"MIT"
] | 13 | 2016-12-13T17:35:09.000Z | 2017-07-08T10:53:51.000Z | commands/song.py | Princ3x/ddmbot | 088eb6b46447a1ec184b1bc7fea493b66ee35284 | [
"MIT"
] | 4 | 2016-12-13T17:52:53.000Z | 2019-01-01T17:43:33.000Z | import discord.ext.commands as dec
import database.song
from commands.common import *
| 56.881944 | 120 | 0.652545 |
f747c5f4148789ffa72b834beacbd044a5cb6421 | 2,468 | py | Python | linear_error_analysis/src/main.py | spacesys-finch/Science | 623c9d77de6a52e87571debf7970cea7af591f2a | [
"MIT"
] | null | null | null | linear_error_analysis/src/main.py | spacesys-finch/Science | 623c9d77de6a52e87571debf7970cea7af591f2a | [
"MIT"
] | null | null | null | linear_error_analysis/src/main.py | spacesys-finch/Science | 623c9d77de6a52e87571debf7970cea7af591f2a | [
"MIT"
] | 1 | 2021-10-09T19:35:26.000Z | 2021-10-09T19:35:26.000Z | """
main.py
Main driver for the Linear Error Analysis program.
Can be run using `lea.sh`.
Can choose which plots to see by toggling on/off `show_fig` param.
Author(s): Adyn Miles, Shiqi Xu, Rosie Liang
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import config
import libs.gta_xch4 as gta_xch4
import libs.photon_noise as pn
from errors import Errors
from forward import Forward
from isrf import ISRF
from optim import Optim
if __name__ == "__main__":
cfg = config.parse_config()
forward = Forward(cfg)
surface, molec, atm, sun_lbl = forward.get_atm_params()
optics = forward.opt_properties()
(
wave_meas,
rad_tot,
rad_ch4,
rad_co2,
rad_h2o,
d_rad_ch4,
d_rad_co2,
d_rad_h2o,
rad_conv_tot,
rad_conv_ch4,
rad_conv_co2,
rad_conv_h2o,
dev_conv_ch4,
dev_conv_co2,
dev_conv_h2o,
) = forward.plot_transmittance(show_fig=False)
state_vector = forward.produce_state_vec()
isrf = ISRF(cfg)
isrf_func = isrf.define_isrf(show_fig=False)
isrf_conv = isrf.convolve_isrf(rad_tot, show_fig=False)
lea = Errors(cfg, wave_meas)
sys_errors = lea.sys_errors()
rand_errors = lea.rand_errors()
# sys_nonlinearity = lea.sys_err_vector(1)
# sys_stray_light = lea.sys_err_vector(2)
# sys_crosstalk = lea.sys_err_vector(3)
# sys_flat_field = lea.sys_err_vector(4)
# sys_bad_px = lea.sys_err_vector(5)
# sys_key_smile = lea.sys_err_vector(6)
# sys_striping = lea.sys_err_vector(7)
# sys_memory = lea.sys_err_vector(8)
ecm = lea.error_covariance()
path_root = os.path.dirname(os.path.dirname(__file__))
np.savetxt(os.path.join(path_root, "outputs", "ecm.csv"), ecm, delimiter=",")
optim = Optim(cfg, wave_meas)
jacobian = optim.jacobian(dev_conv_ch4, dev_conv_co2, dev_conv_h2o, show_fig=False)
gain = optim.gain(ecm)
modified_meas_vector = optim.modify_meas_vector(state_vector, rad_conv_tot, ecm)
spectral_res, snr = optim.state_estimate(ecm, modified_meas_vector, sys_errors)
print("Estimated Solution: " + str(spectral_res))
print("Uncertainty of Solution: " + str(snr))
# plot interpolated photon noise
# plt.plot(lea.wave_meas, lea.photon_noise_interp)
# plt.title("Interpolated Photon Noise")
# plt.xlabel("Wavelength (nm)")
# plt.ylabel("Photon Noise (UNITS?)") # TODO
# plt.show()
| 28.697674 | 87 | 0.687601 |
f74835f3182443e8b2003e77b093fbfc09c67fcf | 7,153 | py | Python | src/anu/constants/amino_acid.py | ankitskvmdam/anu | 699598fb60dcc23f6cccd5abb30a03b294d21598 | [
"MIT"
] | null | null | null | src/anu/constants/amino_acid.py | ankitskvmdam/anu | 699598fb60dcc23f6cccd5abb30a03b294d21598 | [
"MIT"
] | null | null | null | src/anu/constants/amino_acid.py | ankitskvmdam/anu | 699598fb60dcc23f6cccd5abb30a03b294d21598 | [
"MIT"
] | null | null | null | """Enum for amino acid."""
from enum import Enum
from typing import Dict, TypedDict
amino_acid: Dict[str, AminoAcidProperty] = {
"A": {
"code": AminoAcidToInt["A"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 1.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 89.09,
"isoelectric_point": 6.00,
"charge": Charge["U"].value,
},
"C": {
"code": AminoAcidToInt["C"].value,
"hydropathy": Hydropathy["M"].value,
"hydropathy_index": 2.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 121.16,
"isoelectric_point": 5.02,
"charge": Charge["U"].value,
},
"D": {
"code": AminoAcidToInt["D"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["A"].value,
"mass": 133.10,
"isoelectric_point": 2.77,
"charge": Charge["N"].value,
},
"E": {
"code": AminoAcidToInt["E"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["A"].value,
"mass": 147.13,
"isoelectric_point": 3.22,
"charge": Charge["N"].value,
},
"F": {
"code": AminoAcidToInt["F"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 2.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 165.19,
"isoelectric_point": 5.44,
"charge": Charge["U"].value,
},
"G": {
"code": AminoAcidToInt["G"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -0.4,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 75.07,
"isoelectric_point": 5.97,
"charge": Charge["U"].value,
},
"H": {
"code": AminoAcidToInt["H"].value,
"hydropathy": Hydropathy["M"].value,
"hydropathy_index": -3.2,
"acidity_basicity": AcidityBasicity["B"].value,
"mass": 155.16,
"isoelectric_point": 7.47,
"charge": Charge["P"].value,
},
"I": {
"code": AminoAcidToInt["I"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 4.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 131.8,
"isoelectric_point": 5.94,
"charge": Charge["U"].value,
},
"K": {
"code": AminoAcidToInt["K"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.9,
"acidity_basicity": AcidityBasicity["B"].value,
"mass": 146.19,
"isoelectric_point": 9.59,
"charge": Charge["P"].value,
},
"L": {
"code": AminoAcidToInt["L"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 3.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 131.18,
"isoelectric_point": 5.98,
"charge": Charge["U"].value,
},
"M": {
"code": AminoAcidToInt["M"].value,
"hydropathy": Hydropathy["M"].value,
"hydropathy_index": 1.9,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 149.21,
"isoelectric_point": 5.74,
"charge": Charge["U"].value,
},
"N": {
"code": AminoAcidToInt["N"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 132.12,
"isoelectric_point": 5.41,
"charge": Charge["U"].value,
},
"P": {
"code": AminoAcidToInt["P"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -1.6,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 115.13,
"isoelectric_point": 6.30,
"charge": Charge["U"].value,
},
"Q": {
"code": AminoAcidToInt["Q"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 146.15,
"isoelectric_point": 5.65,
"charge": Charge["N"].value,
},
"R": {
"code": AminoAcidToInt["R"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -4.5,
"acidity_basicity": AcidityBasicity["B"].value,
"mass": 174.20,
"isoelectric_point": 11.15,
"charge": Charge["P"].value,
},
"S": {
"code": AminoAcidToInt["S"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -0.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 165.09,
"isoelectric_point": 5.68,
"charge": Charge["U"].value,
},
"T": {
"code": AminoAcidToInt["T"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -0.7,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 119.12,
"isoelectric_point": 5.64,
"charge": Charge["U"].value,
},
"V": {
"code": AminoAcidToInt["V"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 4.2,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 117.15,
"isoelectric_point": 5.96,
"charge": Charge["U"].value,
},
"W": {
"code": AminoAcidToInt["W"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -0.9,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 204.23,
"isoelectric_point": 5.89,
"charge": Charge["U"].value,
},
"Y": {
"code": AminoAcidToInt["Y"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -1.3,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 181.19,
"isoelectric_point": 5.66,
"charge": Charge["U"].value,
},
}
| 28.612 | 57 | 0.533622 |
f74b1a4debef16881b74152eff013915a6f5da94 | 8,550 | py | Python | user_chainload.py | Phidica/sublime-execline | a3c1b76de0c9a420ae73467c28f445b698f7f508 | [
"MIT"
] | 2 | 2020-08-28T16:04:37.000Z | 2020-08-28T20:06:21.000Z | user_chainload.py | Phidica/sublime-execline | a3c1b76de0c9a420ae73467c28f445b698f7f508 | [
"MIT"
] | null | null | null | user_chainload.py | Phidica/sublime-execline | a3c1b76de0c9a420ae73467c28f445b698f7f508 | [
"MIT"
] | null | null | null | import logging
import os
import re
import sublime
# external dependencies (see dependencies.json)
import jsonschema
import yaml # pyyaml
# This plugin generates a hidden syntax file containing rules for additional
# chainloading commands defined by the user. The syntax is stored in the cache
# directory to avoid the possibility of it falling under user version control in
# the usual packages directory
userSyntaxName = 'execline-user-chainload.sublime-syntax'
pkgName = 'execline'
settingsName = 'execline.sublime-settings'
mainSyntaxPath = 'Packages/{}/execline.sublime-syntax'.format(pkgName)
schemaPath = 'Packages/{}/execline.sublime-settings.schema.json'.format(pkgName)
ruleNamespaces = {
'keyword': 'keyword.other',
'function': 'support.function',
}
ruleContexts = {
'argument': {
'generic': 'command-call-common-arg-aside-&pop',
'variable': 'command-call-common-variable-&pop',
'pattern': 'command-call-common-glob-&pop',
},
'block': {
'program': 'block-run-prog',
'arguments': 'block-run-arg',
'trap': 'block-trap',
'multidefine': 'block-multidefine',
},
'options': {
'list': 'command-call-common-opt-list-&pop',
'list-with-args': {
'match': '(?=-[{}])',
'push': 'command-call-common-opt-arg-&pop',
'include': 'command-call-common-opt-list-&pop',
},
},
}
logging.basicConfig()
logger = logging.getLogger(__name__)
# Fully resolve the name of a context in the main syntax file
# Create a match rule describing a command of a certain type, made of a list of
# elements
| 31.090909 | 109 | 0.68 |
f74b23abf6614d599940d2f82ff8df5980edce4e | 378 | py | Python | nmutant_model/retrain_mu_test0.py | asplos2020/DRTest | c3de497142d9b226e518a1a0f95f7350d2f7acd6 | [
"MIT"
] | 1 | 2021-04-01T07:31:17.000Z | 2021-04-01T07:31:17.000Z | nmutant_model/retrain_mu_test0.py | Justobe/DRTest | 85c3c9b2a46cafa7184130f2596c5f9eb3b20bff | [
"MIT"
] | null | null | null | nmutant_model/retrain_mu_test0.py | Justobe/DRTest | 85c3c9b2a46cafa7184130f2596c5f9eb3b20bff | [
"MIT"
] | 1 | 2020-12-24T12:12:54.000Z | 2020-12-24T12:12:54.000Z | import os
import numpy as np
import sys
sys.path.append("../")
for model in ['lenet1', 'lenet4', 'lenet5']:
for attack in ['fgsm', 'cw', 'jsma']:
for mu_var in ['gf', 'nai', 'ns', 'ws']:
os.system('CUDA_VISIBLE_DEVICES=0 python retrain_mu_mnist.py --datasets=mnist --attack=' + attack + ' --model_name=' + model + ' --mu_var=' + mu_var + ' --epochs=50') | 42 | 178 | 0.600529 |
f74bfae14ea8e361bfd9a147cec9f55e5eecb5a2 | 5,197 | py | Python | characterise_inauthentic_tweets.py | weberdc/socmed_repeatability | 85da18cbffa53f18279844117b2aed226104ce11 | [
"Apache-2.0"
] | 2 | 2021-06-30T07:29:10.000Z | 2022-01-20T15:17:26.000Z | characterise_inauthentic_tweets.py | weberdc/socmed_repeatability | 85da18cbffa53f18279844117b2aed226104ce11 | [
"Apache-2.0"
] | null | null | null | characterise_inauthentic_tweets.py | weberdc/socmed_repeatability | 85da18cbffa53f18279844117b2aed226104ce11 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import gzip
import json
import re
import sys
# import time
from argparse import ArgumentParser
# from datetime import datetime
TWITTER_TS_FORMAT = '%a %b %d %H:%M:%S +0000 %Y' #Tue Apr 26 08:57:55 +0000 2011
# def parse_ts(ts_str, fmt=TWITTER_TS_FORMAT):
# try:
# time_struct = time.strptime(ts_str, fmt)
# except TypeError:
# return int(ts_str) # epoch millis
# return datetime.fromtimestamp(time.mktime(time_struct))
def extract_text(tweet):
"""Gets the full text from a tweet if it's short or long (extended)."""
if 'retweeted_status' in tweet:
rt = tweet['retweeted_status']
return extract_text(rt)
# return 'RT @%s: %s' % (rt['user']['screen_name'], extract_text(rt))
# if 'quoted_status' in tweet:
# qt = tweet['quoted_status']
# return get_available_text(tweet) + " --> " + extract_text(qt)
return get_available_text(tweet)
def fetch_lines(file=None):
"""Gets the lines from the given file or stdin if it's None or '' or '-'."""
if file and file != '-':
with gzip.open(file, 'rt') if file[-1] in 'zZ' else open(file, 'r', encoding='utf-8') as f:
return [l.strip() for l in f.readlines()]
else:
return [l.strip() for l in sys.stdin]
def eprint(*args, **kwargs):
"""Print to stderr"""
print(*args, file=sys.stderr, **kwargs)
DEBUG=False
if __name__=='__main__':
options = Options()
opts = options.parse(sys.argv[1:])
DEBUG=opts.verbose
tweets_file = opts.tweets_file
# pretty = opts.pretty
tweets = [json.loads(l) for l in fetch_lines(tweets_file)]
log(f'read: {len(tweets)} tweets')
hashtags_only = 0
hashtags_plus_url = 0
mentions_plus_hashtags = 0
mentions_hashtags_plus_url = 0
ht_splitter_re = '[a-zA-Z#]+'
me_splitter_re = '[a-zA-Z@]+'
htme_splitter_re = '[a-zA-Z#@]+'
X = 0
for t in tweets:
text = extract_text(t)
# hashtag(s) only
if '#' in text:
tokens = extract_tokens(ht_splitter_re, text)
if len(tokens) == count_tokens_starting_with('#', tokens):
hashtags_only += 1
log(tokens)
# hashtag(s) and URL
if '#' in text and 'http' in text:
tokens = extract_tokens(htme_splitter_re, text[:text.index('http')])
if len(tokens) == count_tokens_starting_with('#', tokens):
hashtags_plus_url += 1
# print(tokens)
log(text)
# mention(s) and hashtag(s)
if '#' in text and '@' in text:
tokens = extract_tokens(htme_splitter_re, text)
if len(tokens) == count_tokens_starting_with('#@', tokens):
mentions_plus_hashtags += 1
log(tokens)
# mention(s), hashtag(s) and URL
if '#' in text and '@' in text and 'http' in text:
tokens = extract_tokens(htme_splitter_re, text[:text.index('http')])
if len(tokens) == count_tokens_starting_with('#@', tokens):
mentions_hashtags_plus_url += 1
# print(tokens)
log(text)
print(f'All: {len(tweets):,}')
print(f'HT: {hashtags_only:>6} ({float(hashtags_only)/len(tweets):.1%})')
print(f'HT+URL: {hashtags_plus_url:>6} ({float(hashtags_plus_url)/len(tweets):.1%})')
print(f'@m+HT: {mentions_plus_hashtags:>6} ({float(mentions_plus_hashtags)/len(tweets):.1%})')
print(f'@m+HT+URL: {mentions_hashtags_plus_url:>6} ({float(mentions_hashtags_plus_url)/len(tweets):.1%})')
| 31.307229 | 110 | 0.583414 |
f74ca0e4b20b83d509bc3a77fa6331062c311e10 | 3,577 | py | Python | localshop/apps/packages/utils.py | rcoup/localshop | b7d0803afd9335862accfc79dee047a6b0e67ad6 | [
"BSD-3-Clause"
] | null | null | null | localshop/apps/packages/utils.py | rcoup/localshop | b7d0803afd9335862accfc79dee047a6b0e67ad6 | [
"BSD-3-Clause"
] | null | null | null | localshop/apps/packages/utils.py | rcoup/localshop | b7d0803afd9335862accfc79dee047a6b0e67ad6 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import hashlib
import logging
import os
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.db.models import FieldDoesNotExist
from django.db.models.fields.files import FileField
from django.http import QueryDict
from django.utils.datastructures import MultiValueDict
logger = logging.getLogger(__name__)
def parse_distutils_request(request):
"""Parse the `request.body` and update the request POST and FILES
attributes .
"""
sep = request.body.splitlines()[1]
request.POST = QueryDict('', mutable=True)
try:
request._files = MultiValueDict()
except Exception:
pass
for part in filter(lambda e: e.strip(), request.body.split(sep)):
try:
header, content = part.lstrip().split('\n', 1)
except Exception:
continue
if content.startswith('\n'):
content = content[1:]
if content.endswith('\n'):
content = content[:-1]
headers = parse_header(header)
if "name" not in headers:
continue
if "filename" in headers and headers['name'] == 'content':
dist = TemporaryUploadedFile(name=headers["filename"],
size=len(content),
content_type="application/gzip",
charset='utf-8')
dist.write(content)
dist.seek(0)
request.FILES.appendlist('distribution', dist)
else:
# Distutils sends UNKNOWN for empty fields (e.g platform)
# [russell.sim@gmail.com]
if content == 'UNKNOWN':
content = None
request.POST.appendlist(headers["name"], content)
def delete_files(sender, **kwargs):
"""Signal callback for deleting old files when database item is deleted"""
for fieldname in sender._meta.get_all_field_names():
try:
field = sender._meta.get_field(fieldname)
except FieldDoesNotExist:
continue
if isinstance(field, FileField):
instance = kwargs['instance']
fieldfile = getattr(instance, fieldname)
if not hasattr(fieldfile, 'path'):
return
if not os.path.exists(fieldfile.path):
return
# Check if there are other instances which reference this fle
is_referenced = (
instance.__class__._default_manager
.filter(**{'%s__exact' % fieldname: fieldfile})
.exclude(pk=instance._get_pk_val())
.exists())
if is_referenced:
return
try:
field.storage.delete(fieldfile.path)
except Exception:
logger.exception(
'Error when trying to delete file %s of package %s:' % (
instance.pk, fieldfile.path))
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest()
| 29.808333 | 78 | 0.559966 |
f74d7b65bf1d537a02c073dba7f2c762c4daaaf9 | 4,228 | py | Python | bika/lims/browser/worksheet/views/analyses_transposed.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/browser/worksheet/views/analyses_transposed.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/browser/worksheet/views/analyses_transposed.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | # coding=utf-8
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims.browser.bika_listing import BikaListingTable
from bika.lims.browser.worksheet.views.analyses import AnalysesView
| 41.45098 | 82 | 0.585856 |
f74df4ac592a375e715e992b4854e0bf766ac654 | 865 | py | Python | lab1_rest/project/apps/core/views.py | mratkovic/RZNU-Lab | 2930b249994619c2f17493544db2c0d471ca6cbc | [
"MIT"
] | null | null | null | lab1_rest/project/apps/core/views.py | mratkovic/RZNU-Lab | 2930b249994619c2f17493544db2c0d471ca6cbc | [
"MIT"
] | null | null | null | lab1_rest/project/apps/core/views.py | mratkovic/RZNU-Lab | 2930b249994619c2f17493544db2c0d471ca6cbc | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from .models import User, Photo
from .serializers import UserSerializer, PhotoSerializer
from .mixins import RequestLogViewMixin
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
| 41.190476 | 84 | 0.834682 |
f74f0070abe0b831d8cd12d2943b6e264b00e54d | 215 | py | Python | arc/arc009/arc009b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | arc/arc009/arc009b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | arc/arc009/arc009b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null |
b = input().split()
N = int(input())
a = [input() for _ in range(N)]
t = {b[i]: str(i) for i in range(10)}
a.sort(key = lambda x: conv(x))
print(*a, sep='\n')
| 19.545455 | 40 | 0.548837 |
f75197db0d5043fe351ad2be154d400c859209b0 | 965 | py | Python | setup.py | refnode/spartakiade-2021-session-effective-python | 6b1a25c4ec79261de4ed6385a81b6a31a06d6b58 | [
"Apache-2.0"
] | 1 | 2021-06-04T14:05:31.000Z | 2021-06-04T14:05:31.000Z | setup.py | refnode/spartakiade-2021-session-effective-python | 6b1a25c4ec79261de4ed6385a81b6a31a06d6b58 | [
"Apache-2.0"
] | null | null | null | setup.py | refnode/spartakiade-2021-session-effective-python | 6b1a25c4ec79261de4ed6385a81b6a31a06d6b58 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open("README.adoc") as fh_readme:
readme = fh_readme.read()
install_reqs = []
setup(
author="Sven Wilhelm",
author_email='refnode@gmail.com',
python_requires='>=3.8',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
description="Spartakiade 2021 Session Effective Python",
install_requires=install_reqs,
long_description=readme,
include_package_data=True,
keywords='spartakiade-2021-session-effective-python',
name='spartakiade-2021-session-effective-python',
packages=find_packages(where="src"),
url='https://github.com/refnode/spartakiade-2021-session-effective-python',
version='0.1.0',
zip_safe=False,
)
| 28.382353 | 79 | 0.675648 |
f75630cbc7b1eef703d5e902537d65487d1b7612 | 4,126 | py | Python | sim/pid.py | jmagine/rf-selection | ba9dcb5ca550916873ce68baa71da983f2dd4be5 | [
"MIT"
] | 1 | 2020-05-06T01:28:06.000Z | 2020-05-06T01:28:06.000Z | sim/pid.py | jmagine/multiuav-rf | ba9dcb5ca550916873ce68baa71da983f2dd4be5 | [
"MIT"
] | null | null | null | sim/pid.py | jmagine/multiuav-rf | ba9dcb5ca550916873ce68baa71da983f2dd4be5 | [
"MIT"
] | null | null | null | '''*-----------------------------------------------------------------------*---
Author: Jason Ma
Date : Oct 18 2018
TODO
File Name : pid.py
Description: TODO
---*-----------------------------------------------------------------------*'''
import time
import matplotlib.animation as anim
import matplotlib.pyplot as plt
import threading
import math
import numpy as np
'''[Global Vars]------------------------------------------------------------'''
ORIGIN_X = 0.0
ORIGIN_Y = 0.0
C_R = 10
#plt.autoscale(enable=True, axis="both")
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
scat = ax.scatter([], [])
ax.set_xlim([-1 * C_R - 1, C_R + 1])
ax.set_ylim([-1 * C_R - 1, C_R + 1])
scat.set_facecolors(['g', 'r'])
scat.set_sizes([31, 31])
prev_time = time.time()
vel = np.array([0.0, 0.0])
errors = [0, 1]
error_plot, = ax2.plot([i for i in range(len(errors))], errors, color="g")
d = drone([ORIGIN_X + C_R, ORIGIN_Y], [0.0, 0.0])
#def pid_angle(x, y, ref_x, ref_y, d):
# return math.atan(-1 * (C_R - dist(x, y, ORIGIN_X, ORIGIN_Y)) / d) + math.atan((y - ORIGIN_Y) / (x - ORIGIN_X)) + math.pi / 2
if __name__ == '__main__':
#main()
a = anim.FuncAnimation(fig, update, range(1000), interval=1, blit=False, repeat=False)
plt.show()
| 25.469136 | 127 | 0.537082 |
f756ddf67db85cf74cd46b5e90ed19ef6fd17367 | 32,091 | py | Python | gazoo_device/fire_manager.py | isabella232/gazoo-device | 0e1e276d72333e713b47152815708b9c74c45409 | [
"Apache-2.0"
] | null | null | null | gazoo_device/fire_manager.py | isabella232/gazoo-device | 0e1e276d72333e713b47152815708b9c74c45409 | [
"Apache-2.0"
] | 1 | 2021-06-24T19:20:50.000Z | 2021-06-24T19:20:50.000Z | gazoo_device/fire_manager.py | isabella232/gazoo-device | 0e1e276d72333e713b47152815708b9c74c45409 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fire Manager module.
Used for CLI-specific commands and flags.
Built to work with Python Fire: https://github.com/google/python-fire.
"""
import codecs
import enum
import inspect
import json
import logging
import os.path
import pydoc
import re
import sys
import textwrap
import time
from typing import Any, Collection, Optional, Type
from gazoo_device import config
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device import manager
from gazoo_device import package_registrar
from gazoo_device import testbed
from gazoo_device.utility import parallel_utils
from gazoo_device.utility import usb_utils
logger = gdm_logger.get_logger()
HEALTHY_DEVICE_HEALTH = {
"is_healthy": True,
"unhealthy_reason": "",
"err_type": "",
"checks_passed": [],
"properties": {}
}
MAX_TIME_TO_WAIT_FOR_INITATION = 5
_DOC_INDENT_SIZE = 4
# Capability attributes visible on device summary man page
# (e.g. "man cambrionix").
_VISIBLE_CAPABILITY_ATTRIBUTES = [
AttributeClassification.PROPERTY, AttributeClassification.PUBLIC_METHOD
]
def _log_man_warning_for_multiple_flavors(
capability_classes: Collection[Type[Any]],
capability_name: str,
device_type: str,
capability_class: Type[Any]) -> None:
"""Logs 'gdm man' warning when multiple capability flavors are available.
Args:
capability_classes: All available capability flavors.
capability_name: Name of the capability.
device_type: Type of the device with this capability.
capability_class: Capability flavor selected to print documentation for.
Capabilities can have multiple flavors in one device class, although this is
somewhat rare. The flavor used is determined based on device firmware at
runtime. Since we don't know which flavor will be used without an attached
device, log a warning and print documentation for any single flavor.
"""
flavors = [a_cls.__name__ for a_cls in capability_classes]
logger.warning(
f"{len(flavors)} flavors ({flavors}) of capability {capability_name!r} "
f"are available for {device_type}.\n"
f"Showing documentation for flavor {capability_class}.\n")
| 38.249106 | 109 | 0.675361 |
f7583f3c89da3d4e9ea5d5c4fffa4b29559b7e57 | 4,814 | py | Python | py/desispec/scripts/humidity_corrected_fiberflat.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/scripts/humidity_corrected_fiberflat.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/scripts/humidity_corrected_fiberflat.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division
import os
import fitsio
import argparse
import numpy as np
from desiutil.log import get_logger
from desispec.io import read_fiberflat,write_fiberflat,findfile,read_frame
from desispec.io.fiberflat_vs_humidity import get_humidity,read_fiberflat_vs_humidity
from desispec.calibfinder import CalibFinder
from desispec.fiberflat_vs_humidity import compute_humidity_corrected_fiberflat
| 42.60177 | 162 | 0.706689 |
f759437ada91a66b6fb489d96b3bf88ad2d186f2 | 1,856 | py | Python | config.py | jfernan2/PRInspector | c09aad3b49263d3c679fd6cfd307de404425f924 | [
"MIT"
] | null | null | null | config.py | jfernan2/PRInspector | c09aad3b49263d3c679fd6cfd307de404425f924 | [
"MIT"
] | null | null | null | config.py | jfernan2/PRInspector | c09aad3b49263d3c679fd6cfd307de404425f924 | [
"MIT"
] | null | null | null | IS_TEST = True
REPOSITORY = 'cms-sw/cmssw'
CERN_SSO_CERT_FILE = 'private/cert.pem'
CERN_SSO_KEY_FILE = 'private/cert.key'
CERN_SSO_COOKIES_LOCATION = 'private/'
TWIKI_CONTACTS_URL = 'https://ppdcontacts.web.cern.ch/PPDContacts/ppd_contacts'
TWIKI_TAG_COLLECTOR_URL = 'https://twiki.cern.ch/twiki/bin/edit/CMS/DQMP5TagCollector?nowysiwyg=1'
TWIKI_TAG_COLLECTOR_CANCEL_EDIT_URL = 'https://twiki.cern.ch/twiki/bin/save/CMS/DQMP5TagCollector'
CATEGORIES_MAP_URL = 'https://raw.githubusercontent.com/cms-sw/cms-bot/master/categories_map.py'
TWIKI_TIMEOUT_SECONDS = 10
__github_client_id = None
__github_client_secret = None
| 27.294118 | 98 | 0.581358 |
f75aab6d4d19a6e9c9dca13b0cb77061bc6e8325 | 5,748 | py | Python | trainer/ml/utils.py | Telcrome/ai-trainer | 54bca3252e194c054bdd3af2b94d6dde940a2a86 | [
"MIT"
] | 1 | 2021-05-05T12:57:42.000Z | 2021-05-05T12:57:42.000Z | trainer/ml/utils.py | Telcrome/ai-trainer | 54bca3252e194c054bdd3af2b94d6dde940a2a86 | [
"MIT"
] | null | null | null | trainer/ml/utils.py | Telcrome/ai-trainer | 54bca3252e194c054bdd3af2b94d6dde940a2a86 | [
"MIT"
] | null | null | null | from enum import Enum
from typing import Generator, Tuple, Iterable, Dict, List
import cv2
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.ndimage import label, generate_binary_structure
from scipy.ndimage.morphology import distance_transform_edt as dist_trans
import trainer.lib as lib
def split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:
"""
Splits an array into its coherent regions.
:param mode: 0 for orthogonal connection, 1 for full connection
:param arr: Numpy array with shape [W, H]
:return: A list with length #NumberOfRegions of arrays with shape [W, H]
"""
res = []
if mode == 0:
rs, num_regions = label(arr)
elif mode == 1:
rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))
else:
raise Exception("Please specify a valid Neighborhood mode for split_into_regions")
for i in range(1, num_regions + 1):
res.append(rs == i)
return res
def normalize_im(im: np.ndarray, norm_type=ImageNormalizations.UnitRange) -> np.ndarray:
"""
Currently just normalizes an image with pixel intensities in range [0, 255] to [-1, 1]
:return: The normalized image
"""
if norm_type == ImageNormalizations.UnitRange:
return (im.astype(np.float32) / 127.5) - 1
else:
raise Exception("Unknown Normalization type")
def one_hot_to_cont(x: np.ndarray) -> np.ndarray:
"""
Convert a one hot encoded image into the same image with integer representations.
:param x: np.ndarray with (C, W, H)
:return: np.ndarray with (W, H)
"""
return np.argmax(x, axis=len(x.shape) - 3)
def reduce_by_attention(arr: np.ndarray, att: np.ndarray):
"""
Reduce an array by a field of attention, such that the result is a rectangle with the empty borders cropped.
:param arr: Target array. The last two dimensions need to be of the same shape as the attention field
:param att: field of attention
:return: cropped array
"""
assert arr.shape[-2] == att.shape[0] and arr.shape[-1] == att.shape[1]
ones = np.argwhere(att)
lmost, rmost = np.min(ones[:, 0]), np.max(ones[:, 0]) + 1
bmost, tmost = np.min(ones[:, 1]), np.max(ones[:, 1]) + 1
grid_slice = [slice(None) for _ in range(len(arr.shape) - 2)]
grid_slice.extend([slice(lmost, rmost), slice(bmost, tmost)])
return arr[tuple(grid_slice)], att[lmost:rmost, bmost:tmost], (lmost, rmost, bmost, tmost)
if __name__ == '__main__':
fit = insert_np_at(np.ones((10, 10)), np.ones((3, 3)) * 2, (2, 3))
too_big1 = insert_np_at(np.ones((10, 10)), np.ones((3, 10)) * 2, (2, 3))
too_big = insert_np_at(np.ones((10, 10)), np.ones((10, 10)) * 2, (2, 3))
# def put_array(big_arr: np.ndarray, small_arr: np.ndarray, offset=(0, 0)) -> np.ndarray:
# """
# Puts the small array into the big array. Ignores problems and does its best to fulfill the task
# """
# b, t =
# big_arr[]
# big_arr = np.putmask(big_arr, )
# if __name__ == '__main__':
# # a = np.zeros((10, 10))
# # b = np.random.random((4, 4))
# # c = put_array(a, b)
# # lib.logger.debug_var(c)
| 35.04878 | 117 | 0.636047 |
f75b61928d5ab139ffa7aac0fa8d2448bbd25e2c | 7,613 | py | Python | widgets/ImageDetailArea.py | JaySon-Huang/SecertPhotos | e741cc26c19a5b249d45cc70959ac6817196cb8a | [
"MIT"
] | null | null | null | widgets/ImageDetailArea.py | JaySon-Huang/SecertPhotos | e741cc26c19a5b249d45cc70959ac6817196cb8a | [
"MIT"
] | 3 | 2015-05-19T08:43:46.000Z | 2015-06-10T17:55:28.000Z | widgets/ImageDetailArea.py | JaySon-Huang/SecertPhotos | e741cc26c19a5b249d45cc70959ac6817196cb8a | [
"MIT"
] | null | null | null | from PyQt5.QtCore import Qt, pyqtSignal, QSize
from PyQt5.QtWidgets import (
QLabel, QWidget, QTreeWidgetItem, QHeaderView,
QVBoxLayout, QHBoxLayout,
)
from .ImageLabel import ImageLabel
from .AdaptiveTreeWidget import AdaptiveTreeWidget
| 38.64467 | 73 | 0.620255 |
f75bd46e6e679f347d07fe04e940962382046dd8 | 3,005 | py | Python | predict.py | zhyq/cws_lstm | 326980e0971482fc712602d3a79069e69a11c8fc | [
"Apache-2.0"
] | 7 | 2018-04-28T02:32:51.000Z | 2020-02-11T07:14:51.000Z | predict.py | zhyq/cws_lstm | 326980e0971482fc712602d3a79069e69a11c8fc | [
"Apache-2.0"
] | null | null | null | predict.py | zhyq/cws_lstm | 326980e0971482fc712602d3a79069e69a11c8fc | [
"Apache-2.0"
] | null | null | null | import argparse
import data_helper
from sklearn.model_selection import train_test_split
import re
import lstm
from lstm import *
import time
from viterbi import Viterbi
xrange = range
def simple_cut(text,dh,lm,viterbi):
"""text"""
if text:
#print("text: %s" %text)
text_len = len(text)
X_batch = dh.text2ids(text) # batch
fetches = [lm.y_pred]
feed_dict = {lm.X_inputs:X_batch, lm.lr:1.0, lm.batch_size:1, lm.keep_prob:1.0}
_y_pred = sess.run(fetches, feed_dict)[0][:text_len] # padding
nodes = [dict(zip(['s','b','m','e'], each[1:])) for each in _y_pred]
#print(type(dh.labels))
#print(dh.labels)
tags = viterbi.viterbi(nodes)
words = []
for i in range(len(text)):
if tags[i] in ['s', 'b']:
words.append(text[i])
else:
words[-1] += text[i]
return words
else:
return []
def cut_word(sentence,dh,lm,viterbi):
"""sentence/text"""
not_cuts = re.compile(u'([0-9\da-zA-Z ]+)|[.\.\?,!]')
result = []
start = 0
for seg_sign in not_cuts.finditer(sentence):
result.extend(simple_cut(sentence[start:seg_sign.start()],dh,lm,viterbi))
result.append(sentence[seg_sign.start():seg_sign.end()])
start = seg_sign.end()
result.extend(simple_cut(sentence[start:],dh,lm,viterbi))
return result
if __name__ == "__main__":
main()
| 33.388889 | 125 | 0.628619 |
f75c066d3ec31ec2f99d70612e1572ff45c4ae07 | 930 | py | Python | Regression/multiple_linear_regression.py | Rupii/Machine-Learning | 2b00698815efb04346d5cb980b68af76f27a5ca6 | [
"MIT"
] | null | null | null | Regression/multiple_linear_regression.py | Rupii/Machine-Learning | 2b00698815efb04346d5cb980b68af76f27a5ca6 | [
"MIT"
] | null | null | null | Regression/multiple_linear_regression.py | Rupii/Machine-Learning | 2b00698815efb04346d5cb980b68af76f27a5ca6 | [
"MIT"
] | 1 | 2019-09-04T05:43:31.000Z | 2019-09-04T05:43:31.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 24 23:18:54 2018
@author: Rupesh
"""
# Multiple Linear Regression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
# loading dependies
df = pd.read_csv("50_Startups.csv")
df.head()
X = df.iloc[:, :-1].values
y = df.iloc[:, 4].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
X_cat = LabelEncoder()
X[:, 3] = X_cat.fit_transform(X[:, 3])
onehot = OneHotEncoder(categorical_features = [3])
X = onehot.fit_transform(X).toarray()
# avoiding the dummy variable trap
X = X[:, 1:]
# train test split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# model
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X_train, y_train)
# predict
y_pred = reg.predict(X_test)
import skl | 19.787234 | 74 | 0.731183 |
f75c6aa4c0bc9e6a0583632570a241f0d5700804 | 2,945 | py | Python | 2021/day09/part01/smoke_basin.py | fpmosley/advent-of-code | 507bd89795ff6a0824284c3c8d2123cf19a932a3 | [
"MIT"
] | null | null | null | 2021/day09/part01/smoke_basin.py | fpmosley/advent-of-code | 507bd89795ff6a0824284c3c8d2123cf19a932a3 | [
"MIT"
] | null | null | null | 2021/day09/part01/smoke_basin.py | fpmosley/advent-of-code | 507bd89795ff6a0824284c3c8d2123cf19a932a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Advent of Code 2021 - Day 9: Smoke Basin (Part 1)
https://adventofcode.com/2021/day/9
'''
import numpy as np
if __name__ == "__main__":
main()
| 27.523364 | 94 | 0.55382 |
f75d3544ffa19cc489ce532ee8d14ab4f09c6953 | 3,830 | py | Python | datar/base/trig_hb.py | stjordanis/datar | 4e2b5db026ad35918954576badef9951928c0cb1 | [
"MIT"
] | 110 | 2021-03-09T04:10:40.000Z | 2022-03-13T10:28:20.000Z | datar/base/trig_hb.py | sthagen/datar | 1218a549e2f0547c7b5a824ca6d9adf1bf96ba46 | [
"MIT"
] | 54 | 2021-06-20T18:53:44.000Z | 2022-03-29T22:13:07.000Z | datar/base/trig_hb.py | sthagen/datar | 1218a549e2f0547c7b5a824ca6d9adf1bf96ba46 | [
"MIT"
] | 11 | 2021-06-18T03:03:14.000Z | 2022-02-25T11:48:26.000Z | """Trigonometric and Hyperbolic Functions"""
from typing import Callable
import numpy
from pipda import register_func
from ..core.contexts import Context
from ..core.types import FloatOrIter
from .constants import pi
def _register_trig_hb_func(name: str, np_name: str, doc: str) -> Callable:
"""Register trigonometric and hyperbolic function"""
np_fun = getattr(numpy, np_name)
if name.endswith("pi"):
func = lambda x: np_fun(x * pi)
else:
# ufunc cannot set context
func = lambda x: np_fun(x)
func = register_func(None, context=Context.EVAL, func=func)
func.__name__ = name
func.__doc__ = doc
return func
sin = _register_trig_hb_func(
"sin",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable
Returns:
The sine value of `x`
""",
)
cos = _register_trig_hb_func(
"cos",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable
Returns:
The cosine value of `x`
""",
)
tan = _register_trig_hb_func(
"tan",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable
Returns:
The tangent value of `x`
""",
)
acos = _register_trig_hb_func(
"acos",
"arccos",
doc="""The arc-cosine function
Args:
x: a numeric value or iterable
Returns:
The arc-cosine value of `x`
""",
)
asin = _register_trig_hb_func(
"acos",
"arcsin",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
atan = _register_trig_hb_func(
"acos",
"arctan",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
sinpi = _register_trig_hb_func(
"sinpi",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The sine value of `x`
""",
)
cospi = _register_trig_hb_func(
"cospi",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The cosine value of `x`
""",
)
tanpi = _register_trig_hb_func(
"tanpi",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The tangent value of `x`
""",
)
cosh = _register_trig_hb_func(
"cosh",
"cosh",
doc="""Hyperbolic cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic cosine value of `x`
""",
)
sinh = _register_trig_hb_func(
"sinh",
"sinh",
doc="""Hyperbolic sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic sine value of `x`
""",
)
tanh = _register_trig_hb_func(
"tanh",
"tanh",
doc="""Hyperbolic tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic tangent value of `x`
""",
)
acosh = _register_trig_hb_func(
"acosh",
"arccosh",
doc="""Hyperbolic arc-cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-cosine value of `x`
""",
)
asinh = _register_trig_hb_func(
"asinh",
"arcsinh",
doc="""Hyperbolic arc-sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-sine value of `x`
""",
)
atanh = _register_trig_hb_func(
"atanh",
"arctanh",
doc="""Hyperbolic arc-tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-tangent value of `x`
""",
)
| 16.228814 | 76 | 0.636031 |
f75e1e987d9f182ed96fa5d1a87db15f1d90fda6 | 1,463 | py | Python | randomised_selection.py | neerajp99/algorithms | 1d6885d2a895821ac511fa8a46913d34db2511ca | [
"MIT"
] | 1 | 2021-06-17T07:59:42.000Z | 2021-06-17T07:59:42.000Z | randomised_selection.py | neerajp99/algorithms | 1d6885d2a895821ac511fa8a46913d34db2511ca | [
"MIT"
] | null | null | null | randomised_selection.py | neerajp99/algorithms | 1d6885d2a895821ac511fa8a46913d34db2511ca | [
"MIT"
] | 1 | 2022-01-13T08:42:31.000Z | 2022-01-13T08:42:31.000Z | # Implementation of Randomised Selection
"""
Naive Approach
---------
Parameters
---------
An arry with n distinct numbers
---------
Returns
---------
i(th) order statistic, i.e: i(th) smallest element of the input array
---------
Time Complexity
---------
O(n.logn)
---------
Test Cases
---------
[1, 20, 6, 4, 5]
=> [1, 4, 5, 6, 20]
"""
import random
if __name__ == "__main__":
# user_input = input("Enter the list of numbers: \n").strip()
# unsorted_array = [int(item) for item in user_input.split(",")]
print(randomised_selection([1, 23, 3, 43, 5], 5, 3))
| 26.6 | 141 | 0.601504 |
f75e9b9f00f9f84646e14e8f9e1b2be7266630e1 | 365 | py | Python | fate/labeling/txt.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | fate/labeling/txt.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | fate/labeling/txt.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | import re
from .common import regex_labels, re_number, re_string
keywords = ['TODO']
re_keyword = re.compile(r'\b({})\b'.format('|'.join(keywords)))
| 22.8125 | 88 | 0.70137 |
f7603cc1049048de6e8b2c24b0acac1e2d8f0746 | 490 | py | Python | Conteudo das Aulas/146/cgi-bin/cgi4_css_2.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/146/cgi-bin/cgi4_css_2.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/146/cgi-bin/cgi4_css_2.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import cgitb; cgitb.enable()
print('Content-type: text/html\n')
print(
"""<html>
<head>
<title>CGI 4 - CSS</title>
<link rel="stylesheet" type="text/css" href="../css/estilo1.css">
</head>
<body>
<h1>Colocando CSS em um script a parte</h1>
<hr>
<p>Ola imagens CGI!</p>
<div class="wraptocenter">
<img id="imagem" src="../imagens/evil.jpg" border=1 alt="Piadinha idiota" width=350 height=500>
</div>
<hr>
</body>
</html>
"""
)
| 18.846154 | 101 | 0.604082 |
f7610420f3cae75b3b9d0169cdf9f686ba220b80 | 4,769 | py | Python | neural-navigation-with-lstm/MARCO/plastk/examples/gngsom.py | ronaldahmed/SLAM-for-ugv | 52e3241b8b737a0cfe5682c0aa87ec8c27d6a33d | [
"MIT"
] | 14 | 2016-04-03T19:25:13.000Z | 2022-01-05T07:03:07.000Z | neural-navigation-with-lstm/MARCO/plastk/examples/gngsom.py | ronaldahmed/SLAM-for-ugv | 52e3241b8b737a0cfe5682c0aa87ec8c27d6a33d | [
"MIT"
] | null | null | null | neural-navigation-with-lstm/MARCO/plastk/examples/gngsom.py | ronaldahmed/SLAM-for-ugv | 52e3241b8b737a0cfe5682c0aa87ec8c27d6a33d | [
"MIT"
] | 5 | 2018-06-21T12:58:58.000Z | 2020-02-15T05:33:39.000Z | """
GNG vs SOM comparison example for PLASTK.
This script shows how to:
- Train PLASTK vector quantizers (SOM and GNG)
- Set default parameters
- Create a simple agent and environment.
- Run an interaction between the agent and the environment
with a GUI.
$Id: gngsom.py,v 1.3 2006/02/17 19:40:09 jp Exp $
"""
# Import what we need from PLASTK
# All the top-level modules
from plastk import *
# Kohonen SOMs
from plastk.vq.som import SOM,SOM2DDisplay
# Growing Neural Gas
from plastk.vq.gng import EquilibriumGNG, GNG2DDisplay
# the python debugger
import pdb
###################################################################
# Set the PLASTK parameter defaults
# [ help('plastk.params') for more info on parameters ]
#
# SOM Defaults: 10x10 SOM, with 2-D inputs
#
SOM.xdim = 10
SOM.ydim = 10
SOM.dim = 2
#
# GNG defaults: 2-D inputs, maintain average discounted error below
# 0.002, grow at most every 200 steps, max connection age 100.
#
EquilibriumGNG.dim = 2
EquilibriumGNG.rmin = 0
EquilibriumGNG.rmax = 100
EquilibriumGNG.error_threshold = 0.002
EquilibriumGNG.lambda_ = 200
EquilibriumGNG.max_age = 50
EquilibriumGNG.e_b = 0.05
EquilibriumGNG.e_n = 0.001
EquilibriumGNG.print_level = base.VERBOSE
# Overwrite old data files, instead of renaming it.
LoggingRLI.rename_old_data = False
################################################################
# Create the agent and environment
#
################################################
# Run the an interaction between the agent and environment.
#
# Instantiate an agent and an environment
agent = SOMTestAgent()
env = SOMTestEnvironment()
# Instantiate a Reinforcement Learning Interface. An RLI controls the
# interaction between agent and environment, passing sensation and
# reward from the environment to the agent, and actions from the agent
# to the environment. In this experiment, the actions and reward are
# meaningless, and only the sensations, 2D vectors, are important.
#
# The LoggingRLI class includes variable logging and GUI capabilities.
rli = LoggingRLI(name = 'GNGvSOM_experiment')
# Init the RLI
rli.init(agent,env)
# Run the RLI gui with two components, a SOM display and a GNG
# display. The RLI gui takes a list of functions that take two
# parameters, a the rli's GUI frame (root) and the rli object (rli), and return
# instances of Tkinter.Frame that can be packed into the RLI's GUI frame.
#
rli.gui(lambda root,rli:SOM2DDisplay(root,rli.agent.som),
lambda root,rli:GNG2DDisplay(root,gng=rli.agent.gng))
| 29.257669 | 79 | 0.662403 |
f761849b5a4f4a9a3e0e3b79a8c5c9b1f726ae8e | 3,444 | py | Python | projects/ide/sublime/src/Bolt/api/inspect/highlighting.py | boltjs/bolt | c2666c876b34b1a61486a432eef3141ca8d1e411 | [
"BSD-3-Clause"
] | 11 | 2015-09-29T19:19:34.000Z | 2020-11-20T09:14:46.000Z | projects/ide/sublime/src/Bolt/api/inspect/highlighting.py | boltjs/bolt | c2666c876b34b1a61486a432eef3141ca8d1e411 | [
"BSD-3-Clause"
] | null | null | null | projects/ide/sublime/src/Bolt/api/inspect/highlighting.py | boltjs/bolt | c2666c876b34b1a61486a432eef3141ca8d1e411 | [
"BSD-3-Clause"
] | null | null | null | import sublime
from ui.read import settings as read_settings
from ui.write import write, highlight as write_highlight
from lookup import file_type as lookup_file_type
from ui.read import x as ui_read
from ui.read import spots as read_spots
from ui.read import regions as ui_regions
from core.read import read as core_read
from structs.general_thread import *
from structs.thread_handler import *
from structs.highlight_list import *
from structs.flag_region import *
from core.analyse import analyse
| 29.689655 | 116 | 0.654472 |
f762be92cbd4d0af01d0dd42ecc1fb37b29c7ade | 3,745 | py | Python | gratipay/models/exchange_route.py | stefb965/gratipay.com | 5f3b5922d6b3a7ff64f51574a1087bab2378cbd8 | [
"CC0-1.0"
] | null | null | null | gratipay/models/exchange_route.py | stefb965/gratipay.com | 5f3b5922d6b3a7ff64f51574a1087bab2378cbd8 | [
"CC0-1.0"
] | null | null | null | gratipay/models/exchange_route.py | stefb965/gratipay.com | 5f3b5922d6b3a7ff64f51574a1087bab2378cbd8 | [
"CC0-1.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import braintree
from postgres.orm import Model
| 34.357798 | 94 | 0.594393 |
f763746331e345f22b7c5a33a4edda7eac385dea | 805 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Defines a form to provide validations for course-specific configuration.
"""
from django import forms
from openedx.core.djangoapps.video_config.forms import CourseSpecificFlagAdminBaseForm
from openedx.core.djangoapps.video_pipeline.models import (
CourseVideoUploadsEnabledByDefault,
VEMPipelineIntegration,
)
| 26.833333 | 86 | 0.756522 |
f76393b04c4eca590f51e2e26126536b11d54d6f | 1,779 | py | Python | tests/integration/test_create_from_full_info.py | superannotateai/superannotate-python-sdk | e2ce848b61efed608265fa64f3781fd5a17c929b | [
"MIT"
] | 26 | 2020-09-25T06:25:06.000Z | 2022-01-30T16:44:07.000Z | tests/integration/test_create_from_full_info.py | superannotateai/superannotate-python-sdk | e2ce848b61efed608265fa64f3781fd5a17c929b | [
"MIT"
] | 12 | 2020-12-21T19:59:48.000Z | 2022-01-21T10:32:07.000Z | tests/integration/test_create_from_full_info.py | superannotateai/superannotate-python-sdk | e2ce848b61efed608265fa64f3781fd5a17c929b | [
"MIT"
] | 11 | 2020-09-17T13:39:19.000Z | 2022-03-02T18:12:29.000Z | import os
from os.path import dirname
from unittest import TestCase
import src.superannotate as sa
| 37.0625 | 109 | 0.720067 |
f7639fdfd1c81876235b0d816ccef91c2a2888bb | 903 | py | Python | spellingcorrector/utils/count.py | NazcaLines/spelling-corrector | ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b | [
"MIT"
] | null | null | null | spellingcorrector/utils/count.py | NazcaLines/spelling-corrector | ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b | [
"MIT"
] | null | null | null | spellingcorrector/utils/count.py | NazcaLines/spelling-corrector | ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b | [
"MIT"
] | null | null | null | import os
import functools
CORPUS_DIR = str(os.getcwd())[:str(os.getcwd()).index('spellingcorrector/')] \
+ 'data/corpus.txt'
NWORD = {}
def getTrain():
"""
simple singleton implement
"""
global NWORD
if len(NWORD) == 0:
train()
return NWORD
if __name__ == "__main__":
getTrain()
print CORPUS_DIR
print os.path.isfile(CORPUS_DIR)
print len(NWORD)
| 19.630435 | 78 | 0.572536 |
f764d5863df085c67cf462549442d82ef895d117 | 653 | py | Python | rhasspy_weather/parser/nlu_intent.py | arniebarni/rhasspy_weather | 6a9df72adad3e5dafa7962c2be37c824dc04137b | [
"MIT"
] | 5 | 2020-03-29T01:00:30.000Z | 2022-02-06T20:00:00.000Z | rhasspy_weather/parser/nlu_intent.py | arniebarni/rhasspy_weather | 6a9df72adad3e5dafa7962c2be37c824dc04137b | [
"MIT"
] | 12 | 2020-04-02T15:09:05.000Z | 2021-10-11T00:44:21.000Z | rhasspy_weather/parser/nlu_intent.py | arniebarni/rhasspy_weather | 6a9df72adad3e5dafa7962c2be37c824dc04137b | [
"MIT"
] | 5 | 2020-03-25T08:33:02.000Z | 2021-05-18T08:47:41.000Z | import logging
from rhasspy_weather.data_types.request import WeatherRequest
from rhasspy_weather.parser import rhasspy_intent
from rhasspyhermes.nlu import NluIntent
log = logging.getLogger(__name__)
def parse_intent_message(intent_message: NluIntent) -> WeatherRequest:
"""
Parses any of the rhasspy weather intents.
Args:
intent_message: a Hermes NluIntent
Returns: WeatherRequest object
"""
return rhasspy_intent.parse_intent_message(intent_message.to_rhasspy_dict())
| 26.12 | 80 | 0.793262 |
f767fc179ce62571eb82287782f1d69c78d494fd | 1,028 | py | Python | 415-add-strings/add_strings.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | 415-add-strings/add_strings.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | 415-add-strings/add_strings.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | """
59.40%
"""
| 23.906977 | 73 | 0.430934 |
f769f929849f6994908fa8a9ca653f7ebe8e0e87 | 2,770 | py | Python | plugins/qdb.py | x89/raziel-irc-bot | 122a5de858a84e018549e0a7fd0be11bb33f2eb3 | [
"MIT"
] | null | null | null | plugins/qdb.py | x89/raziel-irc-bot | 122a5de858a84e018549e0a7fd0be11bb33f2eb3 | [
"MIT"
] | null | null | null | plugins/qdb.py | x89/raziel-irc-bot | 122a5de858a84e018549e0a7fd0be11bb33f2eb3 | [
"MIT"
] | null | null | null | import logging
log = logging.getLogger(__name__)
import json
import requests
import requests.exceptions
import botologist.plugin
BASE_URL = 'https://qdb.lutro.me'
| 23.277311 | 83 | 0.636462 |
f76d62143b8e1fa514207d6381b4adbf58120f1a | 2,889 | py | Python | skos/method.py | edmondchuc/voc-view | 57bd965facacc77f40f218685c88e8b858d4925c | [
"MIT"
] | 3 | 2021-07-31T16:23:26.000Z | 2022-01-24T01:28:17.000Z | skos/method.py | edmondchuc/voc-view | 57bd965facacc77f40f218685c88e8b858d4925c | [
"MIT"
] | null | null | null | skos/method.py | edmondchuc/voc-view | 57bd965facacc77f40f218685c88e8b858d4925c | [
"MIT"
] | 1 | 2019-08-07T06:02:52.000Z | 2019-08-07T06:02:52.000Z | from pyldapi.renderer import Renderer
from pyldapi.view import View
from flask import render_template, Response
from rdflib import Graph, URIRef, BNode
import skos
from skos.common_properties import CommonPropertiesMixin
from config import Config
| 40.125 | 128 | 0.602631 |
f76e35161b8285ae39943d6522c5085c519cc9cf | 22,791 | py | Python | wwt_api_client/communities.py | WorldWideTelescope/wwt_api_client | cfc42728eb2428f17e711f7527fd97150e629296 | [
"BSD-3-Clause"
] | null | null | null | wwt_api_client/communities.py | WorldWideTelescope/wwt_api_client | cfc42728eb2428f17e711f7527fd97150e629296 | [
"BSD-3-Clause"
] | 8 | 2019-04-28T17:27:44.000Z | 2020-11-05T20:24:21.000Z | wwt_api_client/communities.py | WorldWideTelescope/wwt_api_client | cfc42728eb2428f17e711f7527fd97150e629296 | [
"BSD-3-Clause"
] | 1 | 2019-04-28T17:25:06.000Z | 2019-04-28T17:25:06.000Z | # Copyright 2019-2020 the .NET Foundation
# Distributed under the terms of the revised (3-clause) BSD license.
"""Interacting with the WWT Communities APIs."""
import json
import os.path
import requests
import sys
from urllib.parse import parse_qs, urlparse
from . import APIRequest, Client, enums
__all__ = '''
CommunitiesAPIRequest
CommunitiesClient
CreateCommunityRequest
DeleteCommunityRequest
GetCommunityInfoRequest
GetLatestCommunityRequest
GetMyProfileRequest
GetProfileEntitiesRequest
IsUserRegisteredRequest
interactive_communities_login
'''.split()
LIVE_OAUTH_AUTH_SERVICE = "https://login.live.com/oauth20_authorize.srf"
LIVE_OAUTH_TOKEN_SERVICE = "https://login.live.com/oauth20_token.srf"
LIVE_OAUTH_DESKTOP_ENDPOINT = "https://login.live.com/oauth20_desktop.srf"
LIVE_AUTH_SCOPES = ['wl.emails', 'wl.signin']
WWT_CLIENT_ID = '000000004015657B'
OAUTH_STATE_BASENAME = 'communities-oauth.json'
CLIENT_SECRET_BASENAME = 'communities-client-secret.txt'
# TODO: we're not implementing the "isEdit" mode where you can update
# community info.
# Command-line utility for initializing the OAuth state.
def interactive_communities_login(args):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--secret-file',
metavar = 'PATH',
help = 'Path to a file from which to read the WWT client secret',
)
parser.add_argument(
'--secret-env',
metavar = 'ENV-VAR-NAME',
help = 'Name of an environment variable containing the WWT client secret',
)
settings = parser.parse_args(args)
# Make sure we actually have a secret to work with.
if settings.secret_file is not None:
with open(settings.secret_file) as f:
client_secret = f.readline().strip()
elif settings.secret_env is not None:
client_secret = os.environ.get(settings.secret_env)
else:
print('error: the WWT \"client secret\" must be provided; '
'use --secret-file or --secret-env', file=sys.stderr)
sys.exit(1)
if not client_secret:
print('error: the WWT \"client secret\" is empty or unset', file=sys.stderr)
sys.exit(1)
# Ready to go ...
CommunitiesClient(
Client(),
oauth_client_secret = client_secret,
interactive_login_if_needed = True,
)
print('OAuth flow successfully completed.')
if __name__ == '__main__':
interactive_communities_login(sys.argv[1:])
| 30.966033 | 125 | 0.5833 |
f76f8e181e6635c86576107fa1d30d62af17c114 | 158 | py | Python | FictionTools/amitools/test/suite/vprintf.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 38 | 2021-06-18T12:56:15.000Z | 2022-03-12T20:38:40.000Z | FictionTools/amitools/test/suite/vprintf.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 2 | 2021-06-20T16:28:12.000Z | 2021-11-17T21:33:56.000Z | FictionTools/amitools/test/suite/vprintf.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 6 | 2021-06-18T18:18:36.000Z | 2021-12-22T08:01:32.000Z | import pytest
| 19.75 | 44 | 0.689873 |
f7704b3dce5cd94981cb7391a19b755c0df22b68 | 304 | py | Python | test/socket_client.py | suxb201/Socks5_DNS_Test | a1cb8b5d8d998c6a029dc0b329418ecbb9a2fc72 | [
"MIT"
] | 1 | 2020-11-09T02:08:04.000Z | 2020-11-09T02:08:04.000Z | test/socket_client.py | suxb201/Socks5_DNS_Test | a1cb8b5d8d998c6a029dc0b329418ecbb9a2fc72 | [
"MIT"
] | null | null | null | test/socket_client.py | suxb201/Socks5_DNS_Test | a1cb8b5d8d998c6a029dc0b329418ecbb9a2fc72 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import socket
HOST = '127.0.0.1' # IP
PORT = 10009 #
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print(s)
s.connect((HOST, PORT))
s.sendall(b'Hello, world')
print(s)
data = s.recv(1024)
print('Received', repr(data))
| 19 | 60 | 0.651316 |
f770883d6109ebd548cc44852ed0b4db7874c963 | 752 | py | Python | util/statuschanger.py | MarkThe/Mark-Tools | c755d2b2e095b9f83fcbaba3ac74ec927bcddf26 | [
"MIT"
] | 1 | 2022-01-04T18:09:50.000Z | 2022-01-04T18:09:50.000Z | util/statuschanger.py | MarkThe/Mark-Tools | c755d2b2e095b9f83fcbaba3ac74ec927bcddf26 | [
"MIT"
] | null | null | null | util/statuschanger.py | MarkThe/Mark-Tools | c755d2b2e095b9f83fcbaba3ac74ec927bcddf26 | [
"MIT"
] | null | null | null | import requests
import Mark
from colorama import Fore
from util.plugins.common import print_slow, getheaders, proxy | 44.235294 | 152 | 0.676862 |
f77168d5a15a1187d94edfc593ed43416e3d8946 | 1,979 | py | Python | recipes/recipe_modules/cloudbuildhelper/test_api.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/cloudbuildhelper/test_api.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/cloudbuildhelper/test_api.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from hashlib import sha256
from recipe_engine import recipe_test_api
| 30.921875 | 80 | 0.632137 |
f7716d154c4129f506d04590e1524fcb8b2888bb | 7,011 | py | Python | tabbi/gmm.py | Yu-AnChen/tabbi | bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5 | [
"MIT"
] | null | null | null | tabbi/gmm.py | Yu-AnChen/tabbi | bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5 | [
"MIT"
] | null | null | null | tabbi/gmm.py | Yu-AnChen/tabbi | bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5 | [
"MIT"
] | null | null | null | import sklearn.mixture
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
import matplotlib.patheffects as mpatheffects
| 31.581081 | 97 | 0.601341 |
f7745a348fc7e9affea625ab9bda06298308eebf | 4,627 | py | Python | openGaussBase/testcase/SQL/DML/upsert/Opengauss_Function_DML_Upsert_Case0131.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DML/upsert/Opengauss_Function_DML_Upsert_Case0131.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DML/upsert/Opengauss_Function_DML_Upsert_Case0131.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : upsert
Case Name : upsertfor update
Description :
1
2session1for update20s
3session15ssession2session1update
4session1session2
5session1
6session2session1update
Expect :
1
2session1for update20ssession1
3session15ssession2session1updatesession2
4session1session2session210s
5session1
6selectsession1updatesession2
History :
"""
import time
import unittest
from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
| 39.211864 | 84 | 0.653555 |
f7746001d5f89f7418b92bab28a281a421e0564a | 6,892 | py | Python | multi_traductor.py | Jalagarto/translator | d35cde0934c4ab94204d6dfdf4e7d6c0bcd6291b | [
"Apache-2.0"
] | null | null | null | multi_traductor.py | Jalagarto/translator | d35cde0934c4ab94204d6dfdf4e7d6c0bcd6291b | [
"Apache-2.0"
] | null | null | null | multi_traductor.py | Jalagarto/translator | d35cde0934c4ab94204d6dfdf4e7d6c0bcd6291b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import tkinter as tk
from tkinter import messagebox as msg
from tkinter.ttk import Notebook
from tkinter import ttk
import tkinter.font as font
import requests
if __name__ == "__main__":
translatebook = TranslateBook()
english_tab = LanguageTab(translatebook, "Ingls", "en")
translatebook.add_new_tab(english_tab)
# german_tab = LanguageTab(translatebook, "Alemn", "de")
# translatebook.add_new_tab(german_tab)
translatebook.mainloop()
# cdigos de lenguages --> https://www.labnol.org/code/19899-google-translate-languages | 43.075 | 130 | 0.642339 |
f775c9f689bfc087dae8bdb25d1cd48072e78520 | 1,138 | py | Python | root/settings/__init__.py | daniel-waruo/e-commerse-api | 6b080039398fb4099a34335317d649dd67783f63 | [
"Apache-2.0"
] | 6 | 2019-11-21T10:09:49.000Z | 2021-06-19T09:52:59.000Z | root/settings/__init__.py | daniel-waruo/e-commerse-api | 6b080039398fb4099a34335317d649dd67783f63 | [
"Apache-2.0"
] | null | null | null | root/settings/__init__.py | daniel-waruo/e-commerse-api | 6b080039398fb4099a34335317d649dd67783f63 | [
"Apache-2.0"
] | null | null | null | """
This is a django-split-settings main file.
For more information read this:
https://github.com/sobolevn/django-split-settings
Default environment is `development`.
To change settings file:
`DJANGO_ENV=production python manage.py runserver`
"""
import django_heroku
from split_settings.tools import include
base_settings = [
'components/middleware.py', # middleware configuration
'components/apps.py', # installed applications
'components/database.py', # database settings
'components/pyuploadcare.py', # pyuploadcare settings
'components/rest_framework.py', # rest framework settings
'components/allauth.py', # allauth rest_auth settings
'components/currency.py', # currency settings
'components/email.py', # email settings
'components/rest_framework.py', # rest framework settings
'components/common.py', # standard django settings
'components/cors_configuration.py',
# configuration for Access Control Allow Origin
'components/graphene.py',
# sendy config
'components/sendy.py'
]
# Include settings:
include(*base_settings)
django_heroku.settings(locals())
| 30.756757 | 62 | 0.745167 |
f776be33a00e9a7a7de35e919a22d945de72c2c3 | 688 | py | Python | testapp/urls.py | danigosa/django-simple-seo | 17610e50148c6672cb34e96654df1d3515b0444f | [
"BSD-3-Clause"
] | 11 | 2015-01-02T15:44:31.000Z | 2021-07-27T06:54:35.000Z | testapp/urls.py | danigosa/django-simple-seo | 17610e50148c6672cb34e96654df1d3515b0444f | [
"BSD-3-Clause"
] | 8 | 2016-02-03T07:07:04.000Z | 2022-01-13T00:42:32.000Z | testapp/urls.py | danigosa/django-simple-seo | 17610e50148c6672cb34e96654df1d3515b0444f | [
"BSD-3-Clause"
] | 8 | 2015-02-20T13:51:51.000Z | 2021-06-24T19:11:30.000Z | from django.conf.urls import patterns, url, include
from django.contrib import admin
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .views import template_test
urlpatterns = patterns(
'',
url(r'^test/', template_test, name='template_test'),
url(r'^test2/', include('testapp.another_urls', namespace='foo', app_name='faa'))
)
admin.autodiscover()
urlpatterns += patterns(
'',
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
import debug_toolbar
urlpatterns += patterns(
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| 23.724138 | 85 | 0.713663 |
f7796a989bdd258bec7902dd5290b418ab45666b | 1,645 | py | Python | continual_learning/scenarios/utils.py | jaryP/ContinualAI | 7d9b7614066d219ebd72049692da23ad6ec132b0 | [
"MIT"
] | null | null | null | continual_learning/scenarios/utils.py | jaryP/ContinualAI | 7d9b7614066d219ebd72049692da23ad6ec132b0 | [
"MIT"
] | null | null | null | continual_learning/scenarios/utils.py | jaryP/ContinualAI | 7d9b7614066d219ebd72049692da23ad6ec132b0 | [
"MIT"
] | null | null | null | from typing import Sequence, Union
import numpy as np
from scipy.ndimage.interpolation import rotate as np_rotate
from PIL.Image import Image
from torch import Tensor, tensor
from torchvision.transforms.functional import rotate
| 33.571429 | 73 | 0.612766 |
f77bd48d7ad8370a1142d05db86188aea9cfe2af | 14,355 | py | Python | d_graph.py | MohamedAl-Hussein/pyGraphs | 43346b1f25332dd7ab80cdd9656b3ed7af21d4d2 | [
"MIT"
] | null | null | null | d_graph.py | MohamedAl-Hussein/pyGraphs | 43346b1f25332dd7ab80cdd9656b3ed7af21d4d2 | [
"MIT"
] | null | null | null | d_graph.py | MohamedAl-Hussein/pyGraphs | 43346b1f25332dd7ab80cdd9656b3ed7af21d4d2 | [
"MIT"
] | null | null | null | # Course: CS261 - Data Structures
# Author: Mohamed Al-Hussein
# Assignment: 06
# Description: Directed graph implementation.
from collections import deque
import heapq
| 33.618267 | 117 | 0.585023 |
f77befa83cf2914313d51ff9e7931425c66499dd | 6,847 | py | Python | src/code_submission/2_pasanju/preprocessing/prepredict.py | NehzUx/AutoGraph-KDDCup2020 | d2fc228f4ccc5785db3129cca0445a80b6fef11d | [
"MIT"
] | 1 | 2021-12-06T14:59:55.000Z | 2021-12-06T14:59:55.000Z | src/code_submission/2_pasanju/preprocessing/prepredict.py | NehzUx/AutoGraph-Benchmark | d2fc228f4ccc5785db3129cca0445a80b6fef11d | [
"MIT"
] | null | null | null | src/code_submission/2_pasanju/preprocessing/prepredict.py | NehzUx/AutoGraph-Benchmark | d2fc228f4ccc5785db3129cca0445a80b6fef11d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2020/5/14 20:41
# @Author: Mecthew
import time
import numpy as np
import pandas as pd
import scipy
from sklearn.svm import LinearSVC
from sklearn.linear_model import logistic
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
import scipy.sparse as sp
from utils.logger import get_logger
logger = get_logger("INFO")
def prepredict(graph_df, train_indices, use_valid, use_ohe=False):
t1 = time.time()
fea_table = graph_df['fea_table'].set_index(keys="node_index")
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']]
x_train, y_train = fea_table.loc[train_indices].to_numpy(), train_label.to_numpy()
x_test = fea_table.loc[test_indices].to_numpy()
lr = LR()
lr.fit(x_train, y_train)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(y_train.reshape(-1, 1))
x_train_feat, x_test_feat = ohe.transform(np.argmax(lr.predict(x_train), axis=1).reshape(-1, 1)).toarray(), \
ohe.transform(np.argmax(lr.predict(x_test), axis=1).reshape(-1, 1)).toarray()
else:
x_train_feat, x_test_feat = lr.predict(x_train), \
lr.predict(x_test)
pre_feat = np.concatenate([x_train_feat, x_test_feat], axis=0)
total_indices = np.concatenate([train_indices, test_indices], axis=0)
train_predict = np.argmax(x_train_feat, axis=1)
train_acc = accuracy_score(y_true=y_train, y_pred=train_predict)
t2 = time.time()
logger.info("Time cost for training {}: {}s, train acc {}".format(lr.name, t2-t1, train_acc))
return pd.DataFrame(data=pre_feat, index=total_indices)
def lpa_predict(graph_df, n_class, train_indices, use_valid, max_iter=100, tol=1e-3, use_ohe=False):
t1 = time.time()
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']].to_numpy()
print("Train label shape {}".format(train_label.shape))
train_label = train_label.reshape(-1)
edges = graph_df['edge_file'][['src_idx', 'dst_idx', 'edge_weight']].to_numpy()
edge_index = edges[:, :2].astype(np.int).transpose() # transpose to (2, num_edges)
edge_weight = edges[:, 2].astype(np.float)
num_nodes = len(train_indices) + len(test_indices)
t2 = time.time()
total_indices = np.concatenate([train_indices, test_indices], axis=0)
adj = sp.coo_matrix((edge_weight, edge_index), shape=(num_nodes, num_nodes)).tocsr()
adj = adj[total_indices] # reorder
adj = adj[:, total_indices]
t3 = time.time()
logger.debug("Time cost for transform adj {}s".format(t3 - t2))
row_sum = np.array(adj.sum(axis=1), dtype=np.float)
d_inv = np.power(row_sum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
normal_adj = sp.diags(d_inv).dot(adj).tocsr().transpose()
Pll = normal_adj[:len(train_indices), :len(train_indices)].copy()
Plu = normal_adj[:len(train_indices), len(train_indices):].copy()
Pul = normal_adj[len(train_indices):, :len(train_indices)].copy()
Puu = normal_adj[len(train_indices):, len(train_indices):].copy()
label_mat = np.eye(n_class)[train_label]
label_mat_prob = label_mat.copy()
print("Pul shape {}, label_mat shape {}".format(Pul.shape, label_mat_prob.shape))
Pul_dot_lable_mat = Pul.dot(label_mat)
unlabel_mat = np.zeros(shape=(len(test_indices), n_class))
iter, changed = 0, np.inf
t4 = time.time()
logger.debug("Time cost for prepare matrix {}s".format(t4-t3))
while iter < max_iter and changed > tol:
if iter % 10 == 0:
logger.debug("---> Iteration %d/%d, changed: %f" % (iter, max_iter, changed))
iter += 1
pre_unlabel_mat = unlabel_mat
unlabel_mat = Puu.dot(unlabel_mat) + Pul_dot_lable_mat
label_mat_prob = Pll.dot(label_mat_prob) + Plu.dot(pre_unlabel_mat)
changed = np.abs(pre_unlabel_mat - unlabel_mat).sum()
logger.debug("Time cost for training lpa {}".format(time.time() - t4))
# preds = np.argmax(np.array(unlabel_mat), axis=1)
# unlabel_mat = np.eye(n_class)[preds]
train_acc = accuracy_score(y_true=train_label, y_pred=np.argmax(label_mat_prob, axis=1))
logger.info("LPA training acc {}".format(train_acc))
logger.info("Time cost for LPA {}s".format(time.time() - t1))
total_indices = np.concatenate([train_indices, test_indices], axis=0)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(train_label.reshape(-1, 1))
label_mat_ohe = ohe.transform(np.argmax(label_mat_prob, axis=1).reshape(-1, 1)).toarray()
unlabel_mat_ohe = ohe.transform(np.argmax(unlabel_mat, axis=1).reshape(-1, 1)).toarray()
lu_mat_ohe = np.concatenate([label_mat_ohe, unlabel_mat_ohe], axis=0)
return pd.DataFrame(data=lu_mat_ohe, index=total_indices), train_acc
else:
unlabel_mat_prob = unlabel_mat
lu_mat_prob = np.concatenate([label_mat_prob, unlabel_mat_prob], axis=0)
return pd.DataFrame(data=lu_mat_prob, index=total_indices), train_acc
def is_nonnegative_integer(x_feats):
is_nonnegative = (x_feats >= 0).all()
is_integer = True
for feat in x_feats:
feat_int_sum = np.array(feat, dtype=np.int).sum()
feat_sum = np.array(feat, dtype=np.float).sum()
is_integer = (feat_int_sum == feat_sum)
if is_integer is False:
break
return is_nonnegative and is_integer
| 43.062893 | 117 | 0.676355 |
f77cba016c9db38a8357e9b79839c267bbbde362 | 4,754 | py | Python | disaster_data/sources/noaa_coast/spider.py | cognition-gis/cognition-disaster-data | 5441bd282d36b2d998d1d366d714d38fc5b92c8f | [
"Apache-2.0"
] | null | null | null | disaster_data/sources/noaa_coast/spider.py | cognition-gis/cognition-disaster-data | 5441bd282d36b2d998d1d366d714d38fc5b92c8f | [
"Apache-2.0"
] | 1 | 2022-03-02T14:58:21.000Z | 2022-03-02T14:58:21.000Z | disaster_data/sources/noaa_coast/spider.py | cognition-gis/cognition-disaster-data | 5441bd282d36b2d998d1d366d714d38fc5b92c8f | [
"Apache-2.0"
] | null | null | null | import os
import scrapy
from scrapy.crawler import CrawlerProcess
import requests
from disaster_data.sources.noaa_coast.utils import get_geoinfo, get_fgdcinfo
| 34.955882 | 121 | 0.446782 |
f77cc067eb5667c5dadfdaf7622c60b024ae8bc5 | 2,004 | py | Python | rlo/test/rlo/test_factory.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 31 | 2021-09-09T16:09:55.000Z | 2022-02-20T02:15:19.000Z | rlo/test/rlo/test_factory.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 40 | 2021-08-06T14:30:08.000Z | 2022-01-19T08:49:52.000Z | rlo/test/rlo/test_factory.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 5 | 2021-08-06T11:20:31.000Z | 2022-01-07T19:39:40.000Z | import pytest
from rlo import factory
| 31.809524 | 68 | 0.657685 |
f77db444ca4d359ed2a89460019181e2cac7a2bd | 1,285 | py | Python | src/setup_mac.py | dittert/pyprobe | 1b0d0e403645ed204332c70c8a89e094f860023a | [
"Apache-2.0"
] | null | null | null | src/setup_mac.py | dittert/pyprobe | 1b0d0e403645ed204332c70c8a89e094f860023a | [
"Apache-2.0"
] | null | null | null | src/setup_mac.py | dittert/pyprobe | 1b0d0e403645ed204332c70c8a89e094f860023a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Dirk Dittert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
base = 'Console'
executables = [
Executable('probe.py', copyDependentFiles=True)
]
includefiles = []
packages = ['pyprobe', 'psutil']
includes = []
setup(name='pyprobe',
version='1.0',
description='x',
options={
'build_exe': {
'include_files': includefiles,
'packages': packages,
'excludes': [],
'includes': ['requests']
},
'bdist_mac': {
'bundle_name': 'pyprobe'
}
},
executables=executables, requires=['requests', 'psutil'])
| 27.340426 | 74 | 0.649805 |
f77db8b6066d045b501403f79c57e6ba8e7db030 | 6,432 | py | Python | src/gui/view_menu/layer_list.py | jeremiahws/DLAE | 5005d1c275279cc283c59f226732f073cf340a52 | [
"Apache-2.0"
] | 2 | 2021-05-25T12:23:23.000Z | 2021-06-20T11:40:40.000Z | src/gui/view_menu/layer_list.py | jeremiahws/DLAE | 5005d1c275279cc283c59f226732f073cf340a52 | [
"Apache-2.0"
] | null | null | null | src/gui/view_menu/layer_list.py | jeremiahws/DLAE | 5005d1c275279cc283c59f226732f073cf340a52 | [
"Apache-2.0"
] | 4 | 2019-10-16T07:52:41.000Z | 2021-11-20T17:28:25.000Z | # Copyright 2019 Jeremiah Sanders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dlae/gui/view_menu/layer_list.py"""
import tkinter as tk
| 50.25 | 211 | 0.704291 |
f77e315e6c8b0a904e3ca8fb92860fcdc824f09d | 977 | py | Python | preprocessing.py | Prakhar-Bhartiya/SentimentAnalysis | 8fa2664a57b01e7303ef26d1226a81c0e25be4b7 | [
"MIT"
] | null | null | null | preprocessing.py | Prakhar-Bhartiya/SentimentAnalysis | 8fa2664a57b01e7303ef26d1226a81c0e25be4b7 | [
"MIT"
] | null | null | null | preprocessing.py | Prakhar-Bhartiya/SentimentAnalysis | 8fa2664a57b01e7303ef26d1226a81c0e25be4b7 | [
"MIT"
] | null | null | null | """
DATA DESCRIPTION
sentiment140 dataset. It contains 1,600,000 tweets extracted using the twitter api . The tweets have been annotated (0 = negative, 4 = positive) and they can be used to detect sentiment .
It contains the following 6 fields:
target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
ids: The id of the tweet ( 2087)
date: the date of the tweet (Sat May 16 23:58:44 UTC 2009)
flag: The query (lyx). If there is no query, then this value is NO_QUERY.
user: the user that tweeted (robotickilldozr)
text: the text of the tweet (Lyx is cool)
"""
#import libraries
import pandas as pd
data = pd.read_csv('training.1600000.processed.noemoticon.csv',encoding = 'latin', header=None, nrows=25)
#Adding header to data
data = data.rename(columns={0: 'target', 1: 'id', 2: 'TimeStamp', 3: 'query', 4: 'username', 5: 'content'})
#Dropping unncessary columns
data.drop(['id','TimeStamp','query'], axis=1, inplace=True)
print(data.to_string())
| 32.566667 | 187 | 0.721597 |
f77f2fcfbb893554c9dac95eda0dc9991fd25b40 | 1,803 | py | Python | indico/modules/events/static/controllers.py | tobiashuste/indico | c1e6ec0c8c84745988e38c9b1768142a6feb9e0e | [
"MIT"
] | null | null | null | indico/modules/events/static/controllers.py | tobiashuste/indico | c1e6ec0c8c84745988e38c9b1768142a6feb9e0e | [
"MIT"
] | null | null | null | indico/modules/events/static/controllers.py | tobiashuste/indico | c1e6ec0c8c84745988e38c9b1768142a6feb9e0e | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import redirect, request, session
from werkzeug.exceptions import NotFound
from indico.core.db import db
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.static.models.static import StaticSite, StaticSiteState
from indico.modules.events.static.tasks import build_static_site
from indico.modules.events.static.views import WPStaticSites
from indico.web.flask.util import url_for
| 31.631579 | 104 | 0.742651 |
f77f91aa533c688d45149adae8643805965bb2c7 | 622 | py | Python | kruptos/csapp/api.py | ashwani762/Kruptos | 9cd04ee6147c2dc14764e45c3481690ae399e664 | [
"Apache-2.0"
] | null | null | null | kruptos/csapp/api.py | ashwani762/Kruptos | 9cd04ee6147c2dc14764e45c3481690ae399e664 | [
"Apache-2.0"
] | null | null | null | kruptos/csapp/api.py | ashwani762/Kruptos | 9cd04ee6147c2dc14764e45c3481690ae399e664 | [
"Apache-2.0"
] | null | null | null | from csapp.models import Kruptos
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from rest_framework import status
from .serializers import KruptosSerializer
| 29.619048 | 59 | 0.726688 |
f7809285eb96f9645d677756834951e018513264 | 7,612 | py | Python | test_autolens/simulators/imaging/instrument_util.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | test_autolens/simulators/imaging/instrument_util.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | test_autolens/simulators/imaging/instrument_util.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | from os import path
import autolens as al
import autolens.plot as aplt
from test_autogalaxy.simulators.imaging import instrument_util
test_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "..", "..")
def pixel_scale_from_instrument(instrument):
"""
Returns the pixel scale from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return (0.2, 0.2)
elif instrument in "euclid":
return (0.1, 0.1)
elif instrument in "hst":
return (0.05, 0.05)
elif instrument in "hst_up":
return (0.03, 0.03)
elif instrument in "ao":
return (0.01, 0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def grid_from_instrument(instrument):
"""
Returns the `Grid` from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.GridIterate.uniform(shape_2d=(80, 80), pixel_scales=0.2)
elif instrument in "euclid":
return al.GridIterate.uniform(shape_2d=(120, 120), pixel_scales=0.1)
elif instrument in "hst":
return al.GridIterate.uniform(shape_2d=(200, 200), pixel_scales=0.05)
elif instrument in "hst_up":
return al.GridIterate.uniform(shape_2d=(300, 300), pixel_scales=0.03)
elif instrument in "ao":
return al.GridIterate.uniform(shape_2d=(800, 800), pixel_scales=0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def psf_from_instrument(instrument):
"""
Returns the *PSF* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.5, pixel_scales=0.2, renormalize=True
)
elif instrument in "euclid":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.1, pixel_scales=0.1, renormalize=True
)
elif instrument in "hst":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.05, renormalize=True
)
elif instrument in "hst_up":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.03, renormalize=True
)
elif instrument in "ao":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.025, pixel_scales=0.01, renormalize=True
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulator_from_instrument(instrument):
"""
Returns the *Simulator* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
grid = grid_from_instrument(instrument=instrument)
psf = psf_from_instrument(instrument=instrument)
if instrument in "vro":
return al.SimulatorImaging(
exposure_time=100.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "euclid":
return al.SimulatorImaging(
exposure_time=2260.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst_up":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "ao":
return al.SimulatorImaging(
exposure_time=1000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
| 33.982143 | 110 | 0.633605 |
f78287f3fba8bcf7a557dbad608ff12faa053899 | 687 | py | Python | tests/test_gpcontrolset.py | waider/gopro-py-api | b18b5458f5bbe689f468842d6888104317786de8 | [
"MIT"
] | 1 | 2019-05-06T21:48:54.000Z | 2019-05-06T21:48:54.000Z | tests/test_gpcontrolset.py | waider/gopro-py-api | b18b5458f5bbe689f468842d6888104317786de8 | [
"MIT"
] | null | null | null | tests/test_gpcontrolset.py | waider/gopro-py-api | b18b5458f5bbe689f468842d6888104317786de8 | [
"MIT"
] | null | null | null | from .conftest import GoProCameraTest
from socket import timeout
from urllib import error
| 34.35 | 76 | 0.676856 |
f782e665db7375deff9d1e85d757a68033315dd2 | 1,727 | py | Python | ca_bc_abbotsford/people.py | djac/scrapers-ca | 2e16a85ff8a05ea49031a11ede66fa452631f8da | [
"MIT"
] | null | null | null | ca_bc_abbotsford/people.py | djac/scrapers-ca | 2e16a85ff8a05ea49031a11ede66fa452631f8da | [
"MIT"
] | null | null | null | ca_bc_abbotsford/people.py | djac/scrapers-ca | 2e16a85ff8a05ea49031a11ede66fa452631f8da | [
"MIT"
] | null | null | null | from utils import CanadianScraper, CanadianPerson as Person
COUNCIL_PAGE = 'http://www.abbotsford.ca/city_hall/mayor_and_council/city_council.htm'
CONTACT_PAGE = 'http://www.abbotsford.ca/contact_us.htm'
| 43.175 | 130 | 0.609728 |
f783069506127a9b55df9ae0fb7a072477dcbc3b | 32 | py | Python | tests/unit/cli/test_repo.py | tehlingchu/anchore-cli | b0df36337f443749991a49263227c1d40989debb | [
"Apache-2.0"
] | 110 | 2017-09-14T02:15:15.000Z | 2022-03-30T20:14:21.000Z | tests/unit/cli/test_repo.py | tehlingchu/anchore-cli | b0df36337f443749991a49263227c1d40989debb | [
"Apache-2.0"
] | 115 | 2017-09-22T12:15:30.000Z | 2022-01-17T12:31:21.000Z | tests/unit/cli/test_repo.py | tehlingchu/anchore-cli | b0df36337f443749991a49263227c1d40989debb | [
"Apache-2.0"
] | 56 | 2017-09-22T11:26:25.000Z | 2022-03-03T14:14:58.000Z | from anchorecli.cli import repo
| 16 | 31 | 0.84375 |
f7836ff545709d136c298d62a1c6e262234ad38c | 2,692 | py | Python | Python/Activies/Classroom10-1.py | FranciscoMends/Python_Codes | fd0b33443d67b56b092beeea0e778285be6a42a9 | [
"MIT"
] | null | null | null | Python/Activies/Classroom10-1.py | FranciscoMends/Python_Codes | fd0b33443d67b56b092beeea0e778285be6a42a9 | [
"MIT"
] | null | null | null | Python/Activies/Classroom10-1.py | FranciscoMends/Python_Codes | fd0b33443d67b56b092beeea0e778285be6a42a9 | [
"MIT"
] | null | null | null | '''
nome = input('Insira seu nome: ')
if nome == 'Mendes':
print('Que nome lindo voc tem!')
else:
print('Seu nome to normal!')
print('Bom dia {}!'.format(nome))
'''
#DESAFIO_28
'''
from random import randint
from time import sleep
x = randint(0,5)
y = int(input('Digite um nmero de 0 5: '))
print('Loading...')
sleep(2)
if x == y:
print('Parabns, voc venceu!')
else:
print('Tente novamente, voc perdeu!')
print(x)
'''
#DESAFIO_29
'''
velocity = int(input('Qual a velocidade atual do seu carro em Km/h? '))
if velocity > 80:
print('Voc foi multado por excesso de velocidade!')
print('Velocidade permitia: 80km/h')
print('Velocidade ultrapassada: {}km/h'.format(velocity))
infraction = (velocity - 80) * 7
print('Valor da multa: R${},00'.format(infraction))
'''
#DESAFIO_30
'''
number = int(input('Insira um nmero inteiro: '))
if number % 2 == 0:
print('Seu nmero PAR!')
else:
print('Seu nmero MPAR!')
'''
#DESAFIO_31
'''
distance = int(input('Qual a distncia em Km que deseja viajar? '))
if distance <= 200:
final_value = distance * 0.50
else:
final_value = distance * 0.45
print('Valor da passagem: R${:.2f}'.format(final_value))
'''
#DESAFIO_32
'''
from datetime import date
year = int(input('Insira um ano (Coloque "0" caso queira analisar a data atual): '))
if year == 0:
year = date.today().year
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
print(year, ' um ano BISSEXTO!')
else:
print(year, 'no um ano BISSEXTO!')
'''
#DESAFIO_33
'''
x = int(input('Digite o primeiro nmero: '))
y = int(input('Digite o segundo nmero: '))
z = int(input('Digite o terceiro nmero: '))
number_max = max(x,y,z)
number_min = min(x,y,z)
print('Maior nmero:',number_max)
print('Menor nmero:',number_min)
'''
#DESAFIO_34
'''
wage = float(input('Insira seu salrio: R$'))
if wage > 1250:
salary_increase = ((10/100) * wage) + wage
percent = 10
else:
salary_increase = ((15/100) * wage) + wage
percent = 15
print()
print('Salrio atual: R${:.2f}'.format(wage))
print('Aumento de {}%'.format(percent))
print('Salrio final: R${:.2f}'.format(salary_increase))
'''
#DESAFIO_35
'''
line1 = float(input('Insira o comprimento da primeira reta: '))
line2 = float(input('Insira o comprimento da segunda reta: '))
line3 = float(input('Insira o comprimento da terceira reta: '))
if line1 < line2 + line3 and line2 < line1 + line3 and line3 < line1 + line2:
print('Podem formar um tringulo!')
else:
print('No podem formar um tringulo!')
'''
#PROVA
'''
s = 'prova de python'
x = len(s)
print(x)
x = 'curso de python no cursoemvideo'
y = x[:5]
print(y)
'''
x = 3 * 5 + 4 ** 2
print(x) | 24.697248 | 84 | 0.643759 |