hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be4201706e45a3d4dd6cd9622ea3645d54ac325f | 440 | py | Python | users/models.py | makutas/CocktailWebsite | c5192e5fc2b750a32500f5c3421ed07e89c9c7e1 | [
"MIT"
] | null | null | null | users/models.py | makutas/CocktailWebsite | c5192e5fc2b750a32500f5c3421ed07e89c9c7e1 | [
"MIT"
] | null | null | null | users/models.py | makutas/CocktailWebsite | c5192e5fc2b750a32500f5c3421ed07e89c9c7e1 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
| 31.428571 | 77 | 0.747727 |
be43dfd884e7a14b827d8c59b29470159f680616 | 5,332 | py | Python | deploy/trained_model.py | Samyak005/Multi-Hop-QG | 15cc794a48ac9df058689c410007ea52b0e12a6a | [
"MIT"
] | null | null | null | deploy/trained_model.py | Samyak005/Multi-Hop-QG | 15cc794a48ac9df058689c410007ea52b0e12a6a | [
"MIT"
] | null | null | null | deploy/trained_model.py | Samyak005/Multi-Hop-QG | 15cc794a48ac9df058689c410007ea52b0e12a6a | [
"MIT"
] | null | null | null |
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subjectverbobject)."
# t5_supp_inference(review_text, md2, device)
| 33.534591 | 225 | 0.69036 |
be44513cd298d38b88ee6e7730ed73cc8a97d105 | 5,979 | py | Python | parlai/agents/drqa/config.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | 1 | 2017-06-26T07:46:33.000Z | 2017-06-26T07:46:33.000Z | parlai/agents/drqa/config.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | null | null | null | parlai/agents/drqa/config.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import sys
import logging
| 51.102564 | 80 | 0.616993 |
be44bd30d7e94517cda605e3c7b74f2c0cefb67c | 4,919 | py | Python | gen4service/gen4bean.py | yongli82/CodeGenerator | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | [
"MIT"
] | null | null | null | gen4service/gen4bean.py | yongli82/CodeGenerator | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | [
"MIT"
] | null | null | null | gen4service/gen4bean.py | yongli82/CodeGenerator | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
reload(sys)
sys.path.append("..")
sys.setdefaultencoding('utf-8')
from jinja2 import Environment
from jinja2 import Template
import re
from sqlalchemy import schema, types
from sqlalchemy.engine import create_engine
import yyutil
import CodeGen
project_name = "budget"
data_name = "BudgetReport"
table_name = "FC_BudgetBaseInfo"
searchBeanPackage="com.dianping.ba.finance.budget.api.beans"
searchBeanName="BudgetReportSearchBean"
searchBeanField="""
private int budgetTypeId;
private int costDepartmentId;
private String budgetOwnerNo;
private boolean exceedBudget;
private boolean withExpenseType;
private int beginYear;
private int beginMonth;
private int endYear;
private int endMonth;
"""
dataBeanPackage="com.dianping.ba.finance.budget.api.beans"
dataBeanName="BudgetYearReportDisplayBean"
dataBeanField="""
private int budgetYear;
private int budgetTypeId;
private String budgetTypeNo;
private String budgetTypeName;
private int costDepartmentId;
private String costDepartmentName;
private String budgetOwnerNo;
private String budgetOwnerName;
private int budgetStatus;
private String budgetStatusName;
private int budgetPlanId;
private String budgetPlanNo;
private int strategyId;
private int strategyPeriodType;
private String strategyPeriodTypeName;
private BigDecimal yearTotalAmount;
private BigDecimal yearAvailableAmount;
private BigDecimal yearUsedAmount;
private BigDecimal yearFrozenAmount;
private BigDecimal quarterTotalAmount1;
private BigDecimal quarterAvailableAmount1;
private BigDecimal quarterUsedAmount1;
private BigDecimal quarterFrozenAmount1;
private BigDecimal quarterTotalAmount2;
private BigDecimal quarterAvailableAmount2;
private BigDecimal quarterUsedAmount2;
private BigDecimal quarterFrozenAmount2;
private BigDecimal quarterTotalAmount3;
private BigDecimal quarterAvailableAmount3;
private BigDecimal quarterUsedAmount3;
private BigDecimal quarterFrozenAmount3;
private BigDecimal quarterTotalAmount4;
private BigDecimal quarterAvailableAmount4;
private BigDecimal quarterUsedAmount4;
private BigDecimal quarterFrozenAmount4;
private BigDecimal monthTotalAmount1;
private BigDecimal monthAvailableAmount1;
private BigDecimal monthUsedAmount1;
private BigDecimal monthFrozenAmount1;
private BigDecimal monthTotalAmount2;
private BigDecimal monthAvailableAmount2;
private BigDecimal monthUsedAmount2;
private BigDecimal monthFrozenAmount2;
private BigDecimal monthTotalAmount3;
private BigDecimal monthAvailableAmount3;
private BigDecimal monthUsedAmount3;
private BigDecimal monthFrozenAmount3;
private BigDecimal monthTotalAmount4;
private BigDecimal monthAvailableAmount4;
private BigDecimal monthUsedAmount4;
private BigDecimal monthFrozenAmount4;
private BigDecimal monthTotalAmount5;
private BigDecimal monthAvailableAmount5;
private BigDecimal monthUsedAmount5;
private BigDecimal monthFrozenAmount5;
private BigDecimal monthTotalAmount6;
private BigDecimal monthAvailableAmount6;
private BigDecimal monthUsedAmount6;
private BigDecimal monthFrozenAmount6;
private BigDecimal monthTotalAmount7;
private BigDecimal monthAvailableAmount7;
private BigDecimal monthUsedAmount7;
private BigDecimal monthFrozenAmount7;
private BigDecimal monthTotalAmount8;
private BigDecimal monthAvailableAmount8;
private BigDecimal monthUsedAmount8;
private BigDecimal monthFrozenAmount8;
private BigDecimal monthTotalAmount9;
private BigDecimal monthAvailableAmount9;
private BigDecimal monthUsedAmount9;
private BigDecimal monthFrozenAmount9;
private BigDecimal monthTotalAmount10;
private BigDecimal monthAvailableAmount10;
private BigDecimal monthUsedAmount10;
private BigDecimal monthFrozenAmount10;
private BigDecimal monthTotalAmount11;
private BigDecimal monthAvailableAmount11;
private BigDecimal monthUsedAmount11;
private BigDecimal monthFrozenAmount11;
private BigDecimal monthTotalAmount12;
private BigDecimal monthAvailableAmount12;
private BigDecimal monthUsedAmount12;
private BigDecimal monthFrozenAmount12;
"""
columns = yyutil.convert_bean_to_columns(dataBeanField)
search_columns = yyutil.convert_bean_to_columns(searchBeanField)
jinja2_env = CodeGen.getEnvironment("gen4service")
template = jinja2_env.get_template("bean_code_template.md")
#snippet = template.render(table_name=table_name, data_name=data_name, columns=columns)
snippet = template.render(locals())
print snippet
with open(data_name + "_generate.md", 'wb') as f:
f.write(snippet)
f.flush()
f.close()
os.system("open " + data_name + "_generate.md")
| 32.793333 | 87 | 0.795893 |
be451445b545eb79b0e3f43bb3bb14e581f5720c | 2,333 | py | Python | Log_tao.py | zigzax/Basic_Python | d9d3256f2ac627e6e98991f73ab67ef8fcc4172d | [
"MIT"
] | null | null | null | Log_tao.py | zigzax/Basic_Python | d9d3256f2ac627e6e98991f73ab67ef8fcc4172d | [
"MIT"
] | null | null | null | Log_tao.py | zigzax/Basic_Python | d9d3256f2ac627e6e98991f73ab67ef8fcc4172d | [
"MIT"
] | null | null | null | Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import turtle
>>> tao = turtle.Turtle()
>>> tao.shape('turtle')
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> for i in range(4)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)tao.left(90)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)
tao.left(90)
>>> range (4)
range(0, 4)
>>> list (range(4))
[0, 1, 2, 3]
>>> for i in range(5)
SyntaxError: invalid syntax
>>> for i in range(5):
print(i)
0
1
2
3
4
\
>>> for i in range(5):
print(i)
0
1
2
3
4
>>> for i in range[10,50,90]:
print(i)
Traceback (most recent call last):
File "<pyshell#28>", line 1, in <module>
for i in range[10,50,90]:
TypeError: 'type' object is not subscriptable
>>> for i in[10,50,90]:
print(i)
10
50
90
>>> range (1,10)
range(1, 10)
>>> list (range(1,10))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> tao.reset()
>>> for i in range (4):
tao.forward(100)
tao.left(90)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> regtangle()
>>> tao.reset()
>>> for i in range(10):
regtangle()
tao.left(36)
>>> tao.reset()
>>> | 16.089655 | 95 | 0.562366 |
be451a5cb8b5c7262021b6003b4a6ffdd2ef5a5f | 424 | py | Python | run.py | pome-ta/CodeMirror | ef39c3032ea128d988c263ed97851860db9f977c | [
"MIT"
] | null | null | null | run.py | pome-ta/CodeMirror | ef39c3032ea128d988c263ed97851860db9f977c | [
"MIT"
] | null | null | null | run.py | pome-ta/CodeMirror | ef39c3032ea128d988c263ed97851860db9f977c | [
"MIT"
] | null | null | null | """
Pythonista3 app CodeMirror
"""
import pythonista.wkwebview as wkwebview
import ui
import pathlib
uri = pathlib.Path('./main_index.html')
_view = View()
_view.present(style='fullscreen', orientations=['portrait'])
| 16.96 | 60 | 0.707547 |
be466292d2d3ccf1cddc1f8ecf7d02c60e49df95 | 1,363 | py | Python | gen_cnn_dataset.py | NPCai/graphene-py | 50163eb65f55c25a3d090bad03e34304b1cb3037 | [
"MIT"
] | 5 | 2018-09-10T15:33:51.000Z | 2020-07-28T05:46:59.000Z | gen_cnn_dataset.py | NPCai/graphene-py | 50163eb65f55c25a3d090bad03e34304b1cb3037 | [
"MIT"
] | null | null | null | gen_cnn_dataset.py | NPCai/graphene-py | 50163eb65f55c25a3d090bad03e34304b1cb3037 | [
"MIT"
] | null | null | null | import wrapper as w
from multiprocessing import Process
import atexit
import time
from queue import Queue
''' 8 Processes, 24 threads per process = 192 threads '''
NUM_PROCESSES = 8
workerList = [] # Worker processes
queues = [] # Use seperate queues to avoid waiting for locks
with open("data/all_news.txt", "r") as news:
for line in news[::len(news) / NUM_PROCESSES]:
queue = Queue()
queue.put(line.strip())
print("Queue populated")
for i in range(NUM_PROCESSES):
worker = Worker(queues[i])
worker.daemon = True
worker.start()
workerList.append(worker)
atexit.register(close_running_threads)
print("All threads registered and working.")
while True:
print(queue.qsize() " sentences remaining to be requested")
time.sleep(2) # Print every two seconds | 26.72549 | 74 | 0.726339 |
be47030ab919977e3706aa43ef448dd537100bbd | 2,702 | py | Python | torch/_prims/context.py | EikanWang/pytorch | 823ddb6e87e8111c9b5a99523503172e5bf62c49 | [
"Intel"
] | null | null | null | torch/_prims/context.py | EikanWang/pytorch | 823ddb6e87e8111c9b5a99523503172e5bf62c49 | [
"Intel"
] | 1 | 2022-01-10T18:39:28.000Z | 2022-01-10T19:15:57.000Z | torch/_prims/context.py | HaoZeke/pytorch | 4075972c2675ef34fd85efd60c9bad75ad06d386 | [
"Intel"
] | null | null | null | from typing import Callable, Sequence, Any, Dict
import functools
import torch
import torch.overrides
from torch._prims.utils import torch_function_passthrough
import torch._refs as refs
import torch._refs
import torch._refs.nn
import torch._refs.nn.functional
import torch._refs.special
import torch._prims
# TODO: automap torch operations to references
# (need to throw a good assertion if the mapping doesn't exist)
_torch_to_reference_map = {
torch.add: refs.add,
# torch.div: refs.div,
torch.mul: refs.mul,
torch.ge: refs.ge,
torch.gt: refs.gt,
torch.le: refs.le,
torch.lt: refs.lt,
}
class TorchRefsMode(torch.overrides.TorchFunctionMode):
"""
Switches the interpretation of torch.* functions and Tensor methods to
use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.)
>>> with TorchRefsMode.push():
... torch.add(x, y) # calls torch._refs.add(x, y)
By default, this context manager will fall back on the torch.* if the
ref does not exist; set strict=True to error if this occurs.
"""
| 28.442105 | 81 | 0.650259 |
be47dbc95464f47bb2c554b62349cf2699343260 | 1,868 | py | Python | search/tests/test_read_similarities.py | cotsog/pathways-backend | 9231731359fc97833dbdbca33ac23eebeac4f715 | [
"BSD-3-Clause"
] | null | null | null | search/tests/test_read_similarities.py | cotsog/pathways-backend | 9231731359fc97833dbdbca33ac23eebeac4f715 | [
"BSD-3-Clause"
] | null | null | null | search/tests/test_read_similarities.py | cotsog/pathways-backend | 9231731359fc97833dbdbca33ac23eebeac4f715 | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from search.read_similarities import build_manual_similarity_map
from common.testhelpers.random_test_values import a_string, a_float
| 32.206897 | 81 | 0.571734 |
be47eadfdaf03e7261eb7070f1efcdf27e299506 | 7,535 | py | Python | fortuna/fortuna.py | Zabamund/HackCPH18 | 3855547824c6277ca6f4e7b97c3ad0b3829e266b | [
"MIT"
] | 3 | 2018-06-09T08:03:31.000Z | 2018-11-23T20:18:06.000Z | fortuna/fortuna.py | Zabamund/HackCPH18 | 3855547824c6277ca6f4e7b97c3ad0b3829e266b | [
"MIT"
] | 1 | 2020-03-30T20:23:17.000Z | 2020-03-30T20:23:17.000Z | fortuna/fortuna.py | Zabamund/HackCPH18 | 3855547824c6277ca6f4e7b97c3ad0b3829e266b | [
"MIT"
] | 2 | 2018-06-09T06:45:53.000Z | 2018-06-09T15:36:36.000Z | """
Fortuna
Python project to visualize uncertatinty in probabilistic exploration models.
Created on 09/06/2018
@authors: Natalia Shchukina, Graham Brew, Marco van Veen, Behrooz Bashokooh, Tobias Stl, Robert Leckenby
"""
# Import libraries
import numpy as np
import glob
from matplotlib import pyplot as plt
import pandas as pd
import xarray as xr
import pyproj as proj
from scipy.stats import norm
| 34.56422 | 145 | 0.584871 |
be47eb6ac22a5716a180d2587e75ad448943ea4f | 1,104 | py | Python | resize.py | Linx3/6.867-Final-Project | 374d7093159be0bc524b291bacad52741f6bdc95 | [
"MIT"
] | 3 | 2019-12-27T12:18:29.000Z | 2020-02-10T22:40:36.000Z | resize.py | Linx3/6.867-Final-Project | 374d7093159be0bc524b291bacad52741f6bdc95 | [
"MIT"
] | null | null | null | resize.py | Linx3/6.867-Final-Project | 374d7093159be0bc524b291bacad52741f6bdc95 | [
"MIT"
] | 2 | 2019-12-29T02:11:29.000Z | 2020-02-10T19:49:41.000Z | from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
# //imageFile = "03802.png"
import os
arr=os.listdir()
for imageFile in arr:
if "png" in imageFile:
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 416
height = 416
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment
# im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment
# im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter
ext = ".png"
# print(imageFile.split(".")[0])
num=imageFile.split(".")[0]
print(num)
print(type(num))
im2.save(imageFile)
# im2.save(imageFile+ ext)
# im3.save("BILINEAR" + ext)
# im4.save("BICUBIC" + ext)
# im5.save("ANTIALIAS" + ext)
| 36.8 | 113 | 0.600543 |
be4ff442cd8f9b517de533a73d5af1571d1d4790 | 2,517 | py | Python | src/pipeline/sentence-retrieval/run.py | simonepri/fever-transformers | 3e9c57b0b4e781f318438d48589a56db709124c4 | [
"MIT"
] | 8 | 2020-05-03T08:40:24.000Z | 2022-01-25T18:47:44.000Z | src/pipeline/sentence-retrieval/run.py | simonepri/fever-transformers | 3e9c57b0b4e781f318438d48589a56db709124c4 | [
"MIT"
] | null | null | null | src/pipeline/sentence-retrieval/run.py | simonepri/fever-transformers | 3e9c57b0b4e781f318438d48589a56db709124c4 | [
"MIT"
] | 3 | 2020-05-02T20:21:45.000Z | 2022-01-25T18:48:28.000Z | #!/usr/bin/env python3
import argparse
import bisect
import csv
import json
import os
from collections import defaultdict
from functools import reduce
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scores-file", type=str)
parser.add_argument("--in-file", type=str, help="input dataset")
parser.add_argument("--out-file", type=str,
help="path to save output dataset")
parser.add_argument("--max-sentences-per-claim", type=int,
help="number of top sentences to return for each claim")
args = parser.parse_args()
main(args.scores_file, args.in_file, args.out_file, max_sentences_per_claim=args.max_sentences_per_claim)
| 40.596774 | 109 | 0.65594 |
be514c5db015a36e1e21cf77afc4f28e841509a0 | 4,455 | py | Python | bot/__main__.py | KOTBOTS/Telegram-CloneBot | 446d66ba46817f784e8de2b8bd2966865ee1965f | [
"MIT"
] | 1 | 2021-11-10T05:06:00.000Z | 2021-11-10T05:06:00.000Z | bot/__main__.py | KOTBOTS/Telegram-CloneBot | 446d66ba46817f784e8de2b8bd2966865ee1965f | [
"MIT"
] | null | null | null | bot/__main__.py | KOTBOTS/Telegram-CloneBot | 446d66ba46817f784e8de2b8bd2966865ee1965f | [
"MIT"
] | 1 | 2022-01-30T08:50:28.000Z | 2022-01-30T08:50:28.000Z | from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
REPO_LINK = "https://t.me/KOT_BOTS"
# Soon to be used for direct updates from within the bot.
# TODO Cancel Clones with /cancel command.
def sleeper(value, enabled=True):
time.sleep(int(value))
return
main()
| 40.87156 | 265 | 0.655892 |
be520ba7720ed297f3538b6906896f4c66ca61d8 | 8,180 | py | Python | src/pyfinlab/risk_models.py | AnaSan27/pyfinlab | 509cc9544af5e1a5b2b642eca9ae02d383dd743c | [
"BSD-3-Clause"
] | 1 | 2021-10-05T19:34:34.000Z | 2021-10-05T19:34:34.000Z | src/pyfinlab/risk_models.py | AnaSan27/pyfinlab | 509cc9544af5e1a5b2b642eca9ae02d383dd743c | [
"BSD-3-Clause"
] | null | null | null | src/pyfinlab/risk_models.py | AnaSan27/pyfinlab | 509cc9544af5e1a5b2b642eca9ae02d383dd743c | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
from portfoliolab.utils import RiskMetrics
from portfoliolab.estimators import RiskEstimators
from pypfopt import risk_models as risk_models_
"""
Available covariance risk models in PortfolioLab library.
https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html
Available covariance risk models in PyPortfolioOpt library.
https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html#
These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one
function for ease of use.
"""
risk_met = RiskMetrics()
risk_estimators = RiskEstimators()
risk_models = [
# PyPortfolioOpt
'sample_cov',
'semicovariance',
'exp_cov',
'ledoit_wolf_constant_variance',
'ledoit_wolf_single_factor',
'ledoit_wolf_constant_correlation',
'oracle_approximating',
# PortfolioLab
'sample_covariance',
'minimum_covariance_determinant',
'empirical_covariance',
'shrinked_covariance_basic',
'shrinked_covariance_lw',
'shrinked_covariance_oas',
'semi_covariance',
'exponential_covariance',
'constant_residual_eigenvalues_denoised',
'constant_residual_spectral_denoised',
'targeted_shrinkage_denoised',
'targeted_shrinkage_detoned',
'constant_residual_detoned',
'hierarchical_filtered_complete',
'hierarchical_filtered_single',
'hierarchical_filtered_avg'
]
def risk_model(prices, model, kde_bwidth=0.01, basic_shrinkage=0.1):
"""
Calculates the covariance matrix for a dataframe of asset prices.
:param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset.
:param model: (str) Risk model to use. Should be one of:
PyPortfolioOpt
- 'sample_cov',
- 'semicovariance',
- 'exp_cov',
- 'ledoit_wolf_constant_variance',
- 'ledoit_wolf_single_factor'
- 'ledoit_wolf_constant_correlation',
- 'oracle_approximating'
PortfolioLab
- 'sample_covariance',
- 'minimum_covariance_determinant',
- 'empirical_covariance',
- 'shrinked_covariance_basic',
- 'shrinked_covariance_lw',
- 'shrinked_covariance_oas',
- 'semi_covariance',
- 'exponential_covariance',
- 'constant_residual_eigenvalues_denoised',
- 'constant_residual_spectral_denoised',
- 'targeted_shrinkage_denoised',
- 'targeted_shrinkage_detoned',
- 'constant_residual_detoned',
- 'hierarchical_filtered_complete',
- 'hierarchical_filtered_single',
- 'hierarchical_filtered_avg'
:param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default)
:param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage.
(0.1 by default)
:return: (pd.DataFrame) Estimated covariance matrix.
"""
tn_relation = prices.shape[0] / prices.shape[1]
sample_cov = prices.pct_change().dropna().cov()
empirical_cov = pd.DataFrame(risk_estimators.empirical_covariance(prices, price_data=True),
index=sample_cov.index, columns=sample_cov.columns)
empirical_corr = pd.DataFrame(risk_estimators.cov_to_corr(empirical_cov ** 2),
index=sample_cov.index, columns=sample_cov.columns)
std = np.diag(empirical_cov) ** (1 / 2)
if model == 'sample_covariance':
return prices.pct_change().dropna().cov()
elif model == 'minimum_covariance_determinant':
covariance_matrix = risk_estimators.minimum_covariance_determinant(prices, price_data=True)
elif model == 'empirical_covariance':
covariance_matrix = risk_estimators.empirical_covariance(prices, price_data=True)
elif model == 'shrinked_covariance_basic':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='basic', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_lw':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='lw', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_oas':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='oas', basic_shrinkage=basic_shrinkage)
elif model == 'semi_covariance':
covariance_matrix = risk_estimators.semi_covariance(prices, price_data=True, threshold_return=0)
elif model == 'exponential_covariance':
covariance_matrix = risk_estimators.exponential_covariance(prices, price_data=True, window_span=60)
elif model == 'constant_residual_eigenvalues_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=False, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_spectral_denoised':
covariance_matrix = risk_estimators.denoise_covariance(empirical_cov, tn_relation, denoise_method='spectral')
elif model == 'targeted_shrinkage_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=False, kde_bwidth=kde_bwidth)
elif model == 'targeted_shrinkage_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=True, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=True, market_component=1,
kde_bwidth=kde_bwidth)
elif model == 'hierarchical_filtered_complete':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='complete', draw_plot=False), std)
elif model == 'hierarchical_filtered_single':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='single', draw_plot=False), std)
elif model == 'hierarchical_filtered_avg':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='average', draw_plot=False), std)
elif model == 'sample_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.sample_cov(prices)) / 252
elif model == 'semicovariance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.semicovariance(prices)) / 252
elif model == 'exp_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.exp_cov(prices, span=180)) / 252
elif model == 'ledoit_wolf_constant_variance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_single_factor':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_constant_correlation':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'oracle_approximating':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
else:
raise NameError('You must input a risk model. Check spelling. Case-Sensitive.')
if not isinstance(covariance_matrix, pd.DataFrame):
covariance_matrix = pd.DataFrame(covariance_matrix, index=sample_cov.index, columns=sample_cov.columns).round(6)
return covariance_matrix * 252
| 49.575758 | 121 | 0.718093 |
be53ecbf1f6e947fe3a12409a789c5940cb5ceed | 2,516 | py | Python | gaussian_blur/gaussian_blur.py | Soft-illusion/ComputerVision | 9afaa9eafef8ac47fdb1023c5332cff98626f1bd | [
"MIT"
] | null | null | null | gaussian_blur/gaussian_blur.py | Soft-illusion/ComputerVision | 9afaa9eafef8ac47fdb1023c5332cff98626f1bd | [
"MIT"
] | null | null | null | gaussian_blur/gaussian_blur.py | Soft-illusion/ComputerVision | 9afaa9eafef8ac47fdb1023c5332cff98626f1bd | [
"MIT"
] | null | null | null | import cv2 as cv
import sys
import numpy as np
import random as r
import os
from PIL import Image as im
img = cv.imread(cv.samples.findFile("3.png"))
if img is None:
sys.exit("Could not read the image.")
else :
width , height , depth = img.shape
img_noisy = noisy("gauss",img)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.GaussianBlur(img_noisy,(kernal_size,kernal_size),0)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "gaussian_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
# dst = img_noisy
# for kernal_no in range (0,200):
# print(kernal_no)
# dst = cv.GaussianBlur(dst,(3,3),1)
# # print( cv.getGaussianKernel(kernal_size,3))
# file_name = "gaussian_blur" + str(kernal_no) + ".png"
# cv.imwrite(file_name, dst)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.bilateralFilter(img_noisy,kernal_size,300,300)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "bilateral_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
| 32.675325 | 91 | 0.598967 |
be55e1c8b12cbd1b4bd83120c737d0990e906ce2 | 3,223 | py | Python | citywok_ms/employee/routes.py | fossabot/CityWok-Manager | ccd31eb684ddeec5c741c9520c779d98eb0e3cc6 | [
"MIT"
] | null | null | null | citywok_ms/employee/routes.py | fossabot/CityWok-Manager | ccd31eb684ddeec5c741c9520c779d98eb0e3cc6 | [
"MIT"
] | null | null | null | citywok_ms/employee/routes.py | fossabot/CityWok-Manager | ccd31eb684ddeec5c741c9520c779d98eb0e3cc6 | [
"MIT"
] | null | null | null | from citywok_ms.file.models import EmployeeFile, File
import citywok_ms.employee.messages as employee_msg
import citywok_ms.file.messages as file_msg
from citywok_ms.employee.forms import EmployeeForm
from citywok_ms.file.forms import FileForm
from flask import Blueprint, flash, redirect, render_template, url_for
from citywok_ms.employee.models import Employee
employee = Blueprint("employee", __name__, url_prefix="/employee")
| 33.926316 | 85 | 0.714552 |
be5634f2d2873fa0b75fded2fda0cc44792517a3 | 9,041 | py | Python | kitsune/customercare/cron.py | safwanrahman/Ford | 87e91dea1cc22b1759eea81cef069359ccb5cd0b | [
"BSD-3-Clause"
] | 1 | 2017-07-03T12:11:03.000Z | 2017-07-03T12:11:03.000Z | kitsune/customercare/cron.py | feer56/Kitsune1 | 0b39cbc41cb7a067699ce8401d80205dd7c5138d | [
"BSD-3-Clause"
] | 8 | 2020-06-05T18:42:14.000Z | 2022-03-11T23:26:51.000Z | kitsune/customercare/cron.py | safwanrahman/Ford | 87e91dea1cc22b1759eea81cef069359ccb5cd0b | [
"BSD-3-Clause"
] | null | null | null | import calendar
from datetime import datetime, timedelta
import json
import logging
import re
import rfc822
from django.conf import settings
from django.db.utils import IntegrityError
import cronjobs
from multidb.pinning import pin_this_thread
from statsd import statsd
from twython import Twython
from kitsune.customercare.models import Tweet, TwitterAccount, Reply
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.utils import chunked
LINK_REGEX = re.compile('https?\:', re.IGNORECASE)
RT_REGEX = re.compile('^rt\W', re.IGNORECASE)
ALLOWED_USERS = [
{'id': 2142731, 'username': 'Firefox'},
{'id': 150793437, 'username': 'FirefoxBrasil'},
{'id': 107272435, 'username': 'firefox_es'},
]
log = logging.getLogger('k.twitter')
def get_word_blacklist_regex():
"""
Make a regex that looks kind of like r'\b(foo|bar|baz)\b'.
This is a function so that it isn't calculated at import time,
and so can be tested more easily.
This doesn't use raw strings (r'') because the "mismatched" parens
were confusing my syntax highlighter, which was confusing me.
"""
return re.compile(
'\\b(' +
'|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) +
')\\b')
def _get_oldest_tweet(locale, n=0):
"""Returns the nth oldest tweet per locale, defaults to newest."""
try:
return Tweet.objects.filter(locale=locale).order_by(
'-created')[n]
except IndexError:
return None
def _filter_tweet(item, allow_links=False):
"""
Apply some filters to an incoming tweet.
May modify tweet. If None is returned, tweet will be discarded.
Used to exclude replies and such from incoming tweets.
"""
text = item['text'].lower()
# No replies, except to ALLOWED_USERS
allowed_user_ids = [u['id'] for u in ALLOWED_USERS]
to_user_id = item.get('to_user_id')
if to_user_id and to_user_id not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No mentions, except of ALLOWED_USERS
for user in item['entities']['user_mentions']:
if user['id'] not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No retweets
if RT_REGEX.search(text) or text.find('(via ') > -1:
statsd.incr('customercare.tweet.rejected.retweet')
return None
# No links
if not allow_links and LINK_REGEX.search(text):
statsd.incr('customercare.tweet.rejected.link')
return None
screen_name = item['user']['screen_name']
# Django's caching system will save us here.
IGNORED_USERS = set(
TwitterAccount.objects
.filter(ignored=True)
.values_list('username', flat=True)
)
# Exclude filtered users
if screen_name in IGNORED_USERS:
statsd.incr('customercare.tweet.rejected.user')
return None
# Exlude users with firefox in the handle
if 'firefox' in screen_name.lower():
statsd.incr('customercare.tweet.rejected.firefox_in_handle')
return None
# Exclude problem words
match = get_word_blacklist_regex().search(text)
if match:
bad_word = match.group(1)
statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word)
return None
return item
| 32.289286 | 79 | 0.605243 |
be5740f5f8c7bb04c4a6f3ebc3c04afbcec0a250 | 1,333 | py | Python | setup.py | nrcmedia/pdfrw | 2a3c9caded906b7ca71f1a338673a24f90eb0e5c | [
"MIT"
] | 2 | 2015-01-16T18:07:34.000Z | 2015-11-01T05:07:15.000Z | setup.py | nrcmedia/pdfrw | 2a3c9caded906b7ca71f1a338673a24f90eb0e5c | [
"MIT"
] | null | null | null | setup.py | nrcmedia/pdfrw | 2a3c9caded906b7ca71f1a338673a24f90eb0e5c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
try:
import setuptools
except:
pass
setup(
name='pdfrw',
version='0.1',
description='PDF file reader/writer library',
long_description='''
pdfrw lets you read and write PDF files, including
compositing multiple pages together (e.g. to do watermarking,
or to copy an image or diagram from one PDF to another),
and can output by itself, or in conjunction with reportlab.
pdfrw will faithfully reproduce vector formats without
rasterization, so the rst2pdf package has used pdfrw
by default for PDF and SVG images by default since
March 2010. Several small examples are provided.
''',
author='Patrick Maupin',
author_email='pmaupin@gmail.com',
platforms='Independent',
url='http://code.google.com/p/pdfrw/',
packages=['pdfrw', 'pdfrw.objects'],
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
keywords='pdf vector graphics',
)
| 31 | 65 | 0.675919 |
be5b35007ab39510b966782ec2dccb27e2f0b068 | 2,429 | py | Python | checkAnnotation.py | ZZIDZZ/pytorch-ssd | 8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9 | [
"MIT"
] | null | null | null | checkAnnotation.py | ZZIDZZ/pytorch-ssd | 8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9 | [
"MIT"
] | null | null | null | checkAnnotation.py | ZZIDZZ/pytorch-ssd | 8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9 | [
"MIT"
] | null | null | null | import argparse
import sys
import cv2
import os
import os.path as osp
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--root', help='Dataset root directory path')
args = parser.parse_args()
CLASSES = ( # always index 0
'helmet', 'vest', 'no_helmet')
annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml"))
imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg"))
if __name__ == '__main__' :
i = 0
for name in sorted(os.listdir(osp.join(args.root,'Annotations'))):
# as we have only one annotations file per image
i += 1
img = cv2.imread(imgpath % (args.root,name.split('.')[0]))
height, width, channels = img.shape
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("path : {}".format(annopath % (args.root,name.split('.')[0])))
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("Total of annotations : {}".format(i)) | 29.987654 | 84 | 0.588308 |
be5b3780be0df5ceef2f2e2a8a4f5c6573838a4e | 3,215 | py | Python | src/oci/identity_data_plane/models/password_reset_authentication_request.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/identity_data_plane/models/password_reset_authentication_request.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/identity_data_plane/models/password_reset_authentication_request.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 31.519608 | 245 | 0.675894 |
be5d745da0aee12618b5456e7d8cbede2e23e222 | 656 | py | Python | venv/lib/python3.7/site-packages/convertdate/dublin.py | vchiapaikeo/prophet | e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/convertdate/dublin.py | vchiapaikeo/prophet | e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/convertdate/dublin.py | vchiapaikeo/prophet | e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the MIT license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
'''Convert to and from the Dublin day count'''
from . import daycount
EPOCH = 2415020 # Julian Day Count for Dublin Count 0
_dublin = daycount.DayCount(EPOCH)
to_gregorian = _dublin.to_gregorian
from_gregorian = _dublin.from_gregorian
to_jd = _dublin.to_jd
from_jd = _dublin.from_jd
from_julian = _dublin.from_julian
to_julian = _dublin.to_julian
to_datetime = _dublin.to_datetime
from_datetime = _dublin.from_datetime
| 19.878788 | 54 | 0.762195 |
be5dd7bfd950d236cdb2d9db1cde1c0dbae6c636 | 5,250 | py | Python | tests/functional/controllers/test_group_controller_superuser.py | roscisz/TensorHive | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | [
"Apache-2.0"
] | 129 | 2017-08-25T11:45:15.000Z | 2022-03-29T05:11:25.000Z | tests/functional/controllers/test_group_controller_superuser.py | roscisz/TensorHive | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | [
"Apache-2.0"
] | 251 | 2017-07-27T10:05:58.000Z | 2022-03-02T12:46:13.000Z | tests/functional/controllers/test_group_controller_superuser.py | roscisz/TensorHive | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | [
"Apache-2.0"
] | 20 | 2017-08-13T13:05:14.000Z | 2022-03-19T02:21:37.000Z | from tensorhive.models.Group import Group
from fixtures.controllers import API_URI as BASE_URI, HEADERS
from http import HTTPStatus
from importlib import reload
import json
import auth_patcher
ENDPOINT = BASE_URI + '/groups'
# POST /groups
# PUT /groups/{id}
# PUT /groups/{id} - nonexistent id
# DELETE /groups/{id}
# DELETE /groups/{id} - nonexistent id
# PUT /groups/{id}/users/{id}
# DELETE /groups/{id}/users/{id}
# PUT /groups/{id}/users/{id} - nonexistent user id
# PUT /groups/{id}/users/{id} - nonexistent group id
# DELETE /groups/{id}/users/{id} - nonexistent user id
# DELETE /groups/{id}/users/{id} - nonexistent group id
# PUT /groups/{id}
# PUT /groups/{id}
| 32.012195 | 117 | 0.717714 |
be5e4769d08439109a7dee5ae6c729de8b3ba612 | 1,232 | py | Python | code/generate_thought_vectors.py | midas-research/text2facegan | 3770333f16234fc9328d8254d1c1112fad15a16c | [
"MIT"
] | 23 | 2020-04-09T19:17:46.000Z | 2021-04-13T13:46:06.000Z | code/generate_thought_vectors.py | midas-research/text2facegan | 3770333f16234fc9328d8254d1c1112fad15a16c | [
"MIT"
] | 3 | 2020-02-16T16:21:38.000Z | 2021-05-22T13:18:57.000Z | code/generate_thought_vectors.py | midas-research/text2facegan | 3770333f16234fc9328d8254d1c1112fad15a16c | [
"MIT"
] | 7 | 2020-02-27T22:27:33.000Z | 2021-03-16T06:03:32.000Z | import os
from os.path import join, isfile
import re
import numpy as np
import pickle
import argparse
import skipthoughts
import h5py
if __name__ == '__main__':
main() | 30.8 | 115 | 0.728896 |
be5f92734068facbaab6ebcd59a70aae8bdb395f | 415 | py | Python | venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | """Parsing responses from the difficulty command."""
from mcipc.rcon.functions import boolmap
__all__ = ['parse']
SET = 'The difficulty has been set to (\\w+)'
UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)'
def parse(text: str) -> bool:
"""Parses a boolean value from the text
returned by the difficulty command.
"""
return boolmap(text, true=SET, false=UNCHANGED)
| 21.842105 | 72 | 0.684337 |
be6134b8d63935100cb7803033cbd22148a4202a | 1,558 | py | Python | eth/beacon/aggregation.py | Bhargavasomu/py-evm | ee8f72d5a70805575a967cde0a43942e1526264e | [
"MIT"
] | null | null | null | eth/beacon/aggregation.py | Bhargavasomu/py-evm | ee8f72d5a70805575a967cde0a43942e1526264e | [
"MIT"
] | null | null | null | eth/beacon/aggregation.py | Bhargavasomu/py-evm | ee8f72d5a70805575a967cde0a43942e1526264e | [
"MIT"
] | null | null | null | from typing import (
Iterable,
Tuple,
)
from cytoolz import (
pipe
)
from eth._utils import bls
from eth._utils.bitfield import (
set_voted,
)
from eth.beacon.enums import SignatureDomain
from eth.beacon.typing import (
BLSPubkey,
BLSSignature,
Bitfield,
CommitteeIndex,
)
def verify_votes(
message: bytes,
votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]],
domain: SignatureDomain
) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]:
"""
Verify the given votes.
vote: (committee_index, sig, public_key)
"""
sigs_with_committe_info = tuple(
(sig, committee_index)
for (committee_index, sig, public_key)
in votes
if bls.verify(message, public_key, sig, domain)
)
try:
sigs, committee_indices = zip(*sigs_with_committe_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Iterable[BLSSignature],
voting_sigs: Iterable[BLSSignature],
voting_committee_indices: Iterable[CommitteeIndex]
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(
set_voted(index=committee_index)
for committee_index in voting_committee_indices
)
)
return bitfield, bls.aggregate_signatures(sigs)
| 23.253731 | 68 | 0.662388 |
be6143e65d151cdd084aada126448567dcd0c1d7 | 7,090 | py | Python | src/server/bos/controllers/v1/components.py | Cray-HPE/bos | a4a7fc58c884d951b6051093e1a4e2aeaba6740f | [
"MIT"
] | 1 | 2022-03-15T18:17:11.000Z | 2022-03-15T18:17:11.000Z | src/server/bos/controllers/v1/components.py | Cray-HPE/bos | a4a7fc58c884d951b6051093e1a4e2aeaba6740f | [
"MIT"
] | null | null | null | src/server/bos/controllers/v1/components.py | Cray-HPE/bos | a4a7fc58c884d951b6051093e1a4e2aeaba6740f | [
"MIT"
] | 1 | 2022-03-06T12:47:06.000Z | 2022-03-06T12:47:06.000Z | # Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
import connexion
from datetime import datetime
import logging
from bos import redis_db_utils as dbutils
LOGGER = logging.getLogger('bos.controllers.v1.components')
DB = dbutils.get_wrapper(db='components')
def get_components_data(id_list=None, enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma separated list of ids.
"""
response = []
if id_list:
for component_id in id_list:
data = DB.get(component_id)
if data:
response.append(data)
else:
# TODO: On large scale systems, this response may be too large
# and require paging to be implemented
response = DB.get_all()
if enabled is not None:
response = [r for r in response if _matches_filter(r, enabled)]
return response
def _set_auto_fields(data):
data = _set_last_updated(data)
return data
def _set_last_updated(data):
timestamp = datetime.utcnow().isoformat()
for section in ['actualState', 'desiredState', 'lastAction']:
if section in data and type(data[section]) == dict:
data[section]['lastUpdated'] = timestamp
return data
def _update_handler(data):
# Allows processing of data during common patch operation
return data
| 36.173469 | 82 | 0.687729 |
be629e4dd47b9de924dd51caddb573587b68e29b | 268 | py | Python | cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py | angelusualle/algorithms | 86286a49db2a755bc57330cb455bcbd8241ea6be | [
"Apache-2.0"
] | null | null | null | cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py | angelusualle/algorithms | 86286a49db2a755bc57330cb455bcbd8241ea6be | [
"Apache-2.0"
] | null | null | null | cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py | angelusualle/algorithms | 86286a49db2a755bc57330cb455bcbd8241ea6be | [
"Apache-2.0"
] | null | null | null | import unittest
from find_x_in_listy import find_x_in_listy, Listy | 38.285714 | 60 | 0.761194 |
be62c8d9e725078536f0891cffbcc08c85ff6f54 | 979 | py | Python | my_general_helpers.py | arminbahl/drosophila_phototaxis_paper | e01dc95675f835926c9104b34bf6cfd7244dee2b | [
"MIT"
] | null | null | null | my_general_helpers.py | arminbahl/drosophila_phototaxis_paper | e01dc95675f835926c9104b34bf6cfd7244dee2b | [
"MIT"
] | null | null | null | my_general_helpers.py | arminbahl/drosophila_phototaxis_paper | e01dc95675f835926c9104b34bf6cfd7244dee2b | [
"MIT"
] | null | null | null | from scipy.signal import butter,filtfilt
from numba import jit
import bisect
# def butter_lowpass(cutoff, fs, order=5):
# nyq = 0.5 * fs
# normal_cutoff = cutoff / nyq
# b, a = butter(order, normal_cutoff, btype='low', analog=False)
# return b, a
| 27.971429 | 86 | 0.684372 |
be644a96343b814a2cf63e0bf374f535055ecf7e | 6,856 | py | Python | test/mitmproxy/addons/test_proxyserver.py | KarlParkinson/mitmproxy | fd5caf40c75ca73c4b767170497abf6a5bf016a0 | [
"MIT"
] | 24,939 | 2015-01-01T17:13:21.000Z | 2022-03-31T17:50:04.000Z | test/mitmproxy/addons/test_proxyserver.py | KarlParkinson/mitmproxy | fd5caf40c75ca73c4b767170497abf6a5bf016a0 | [
"MIT"
] | 3,655 | 2015-01-02T12:31:43.000Z | 2022-03-31T20:24:57.000Z | test/mitmproxy/addons/test_proxyserver.py | KarlParkinson/mitmproxy | fd5caf40c75ca73c4b767170497abf6a5bf016a0 | [
"MIT"
] | 3,712 | 2015-01-06T06:47:06.000Z | 2022-03-31T10:33:27.000Z | import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tserver_conn
def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
# not calling .running() here to avoid unnecessary socket
ps.options = tctx.options
ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert server.error == "Stopped mitmproxy from recursively connecting to itself."
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
| 35.895288 | 111 | 0.622666 |
be64e074af6729b6171d5eed328bc46d2d983abb | 19,608 | py | Python | tensorflow_probability/python/distributions/masked.py | mederrata/probability | bc6c411b0fbd83141f303f91a27343fe3c43a797 | [
"Apache-2.0"
] | 1 | 2022-03-22T11:56:31.000Z | 2022-03-22T11:56:31.000Z | tensorflow_probability/python/distributions/masked.py | robot0102/probability | 89d248c420b8ecabfd9d6de4a1aa8d3886920049 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/masked.py | robot0102/probability | 89d248c420b8ecabfd9d6de4a1aa8d3886920049 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
fn.__name__ = f'_{fn_name}'
return fn
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls)
| 41.719149 | 88 | 0.708588 |
be665281e674fbcee73480a5a06334a427283318 | 1,254 | py | Python | download.py | kaija/taiwan_stockloader | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | [
"MIT"
] | 2 | 2015-06-13T09:17:46.000Z | 2015-10-25T15:31:33.000Z | download.py | kaija/taiwan_stockloader | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | [
"MIT"
] | null | null | null | download.py | kaija/taiwan_stockloader | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | [
"MIT"
] | 3 | 2016-02-01T07:36:55.000Z | 2018-08-03T12:22:20.000Z | import datetime
import httplib
import urllib
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
today = datetime.date.today()
one_day = timedelta(days=1);
#start_day = datetime.date(2004, 2, 11);
start_day = datetime.date(2010, 8, 21);
print "Download from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
dl_date = start_day
while dl_date < today:
httpreq = httplib.HTTPConnection('www.twse.com.tw')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
date_str = str(dl_date.year - 1911 ) + dl_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
httpres = httpreq.getresponse()
stock_csv = httpres.read()
file_name = "data/" + dl_date.strftime("%Y%m%d") + ".csv"
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
dl_date += one_day
print "Download Finish!"
| 23.660377 | 93 | 0.692185 |
be665e63998c0015bc21386a7c5b3385196a6cfb | 5,403 | py | Python | heuristic/improvement/reopt/disruption_updater.py | annalunde/master | 2552d43713e8ebca0b0e57bc5bebd1eaeeac1875 | [
"MIT"
] | 1 | 2022-03-17T15:40:00.000Z | 2022-03-17T15:40:00.000Z | heuristic/improvement/reopt/disruption_updater.py | annalunde/master | 2552d43713e8ebca0b0e57bc5bebd1eaeeac1875 | [
"MIT"
] | null | null | null | heuristic/improvement/reopt/disruption_updater.py | annalunde/master | 2552d43713e8ebca0b0e57bc5bebd1eaeeac1875 | [
"MIT"
] | null | null | null | import copy
import pandas as pd
from decouple import config
from heuristic.construction.construction import ConstructionHeuristic
from config.construction_config import *
from simulation.simulator import Simulator
from heuristic.improvement.reopt.new_request_updater import NewRequestUpdater
| 40.931818 | 117 | 0.626504 |
be687c8fd20a0765459343471aaeb0dc60aa0c2b | 666 | py | Python | evennia/scripts/migrations/0013_auto_20191025_0831.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,544 | 2015-01-01T22:16:31.000Z | 2022-03-31T19:17:45.000Z | evennia/scripts/migrations/0013_auto_20191025_0831.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,686 | 2015-01-02T18:26:31.000Z | 2022-03-31T20:12:03.000Z | evennia/scripts/migrations/0013_auto_20191025_0831.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 867 | 2015-01-02T21:01:54.000Z | 2022-03-29T00:28:27.000Z | # Generated by Django 2.2.6 on 2019-10-25 12:31
from django.db import migrations, models
| 28.956522 | 150 | 0.587087 |
be6ac11cc08ea3cf2a70097fa4537b051b80fea9 | 834 | py | Python | tests/test_pyqrcodeng_issue13.py | dbajar/segno | f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5 | [
"BSD-3-Clause"
] | 254 | 2016-09-25T21:32:00.000Z | 2022-03-30T09:56:14.000Z | tests/test_pyqrcodeng_issue13.py | dbajar/segno | f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5 | [
"BSD-3-Clause"
] | 102 | 2016-08-04T12:18:44.000Z | 2022-03-23T09:09:51.000Z | tests/test_pyqrcodeng_issue13.py | dbajar/segno | f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5 | [
"BSD-3-Clause"
] | 34 | 2016-09-25T21:34:42.000Z | 2022-03-30T08:19:03.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| 21.947368 | 72 | 0.681055 |
be6b1f866bc5d3fdc38f4e9b6fd3e9f0bcf0235f | 384 | py | Python | qiskit/quantum_info/operators/__init__.py | jagunnels/qiskit-sdk-py | 153cdde972e65c0f23675bbe17c93e18be27bd51 | [
"Apache-2.0"
] | null | null | null | qiskit/quantum_info/operators/__init__.py | jagunnels/qiskit-sdk-py | 153cdde972e65c0f23675bbe17c93e18be27bd51 | [
"Apache-2.0"
] | null | null | null | qiskit/quantum_info/operators/__init__.py | jagunnels/qiskit-sdk-py | 153cdde972e65c0f23675bbe17c93e18be27bd51 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Quantum Operators."""
from .operator import Operator
from .unitary import Unitary
from .pauli import Pauli, pauli_group
from .channel import Choi, SuperOp, Kraus, Stinespring, Chi, PTM
| 27.428571 | 77 | 0.742188 |
be6d27c87017d3ff2b758a9a1954cf3e265b550c | 554 | py | Python | iocms/iocms/urls.py | Gaurav-Zaiswal/iw-acad-iocms-be | a133f120eed93433925608f08c5145d2d0d1db39 | [
"MIT"
] | null | null | null | iocms/iocms/urls.py | Gaurav-Zaiswal/iw-acad-iocms-be | a133f120eed93433925608f08c5145d2d0d1db39 | [
"MIT"
] | null | null | null | iocms/iocms/urls.py | Gaurav-Zaiswal/iw-acad-iocms-be | a133f120eed93433925608f08c5145d2d0d1db39 | [
"MIT"
] | 2 | 2021-09-16T04:44:59.000Z | 2021-09-16T05:45:31.000Z | from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('class/', include('classroom.urls')),
path('assignment-api/', include('assignment.urls', namespace='assignment')),
path('feed/', include('feed.urls', namespace='feed')),
path('users/', include('users.urls'), name="user-register")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.933333 | 80 | 0.720217 |
be6f16523ef2463524119c42f75567ed0f66d560 | 1,905 | py | Python | src/security/__init__.py | slippers/blogging_security_flatpage | 53644978b798c66369416b1e5625cc04d89c0a87 | [
"MIT"
] | 1 | 2018-12-31T05:30:13.000Z | 2018-12-31T05:30:13.000Z | src/security/__init__.py | slippers/blogging_security_flatpage | 53644978b798c66369416b1e5625cc04d89c0a87 | [
"MIT"
] | null | null | null | src/security/__init__.py | slippers/blogging_security_flatpage | 53644978b798c66369416b1e5625cc04d89c0a87 | [
"MIT"
] | null | null | null | from src import app, db
from .models import User, Role, RoleUsers
from .security_admin import UserAdmin, RoleAdmin
from flask_security import Security, SQLAlchemyUserDatastore, \
login_required, roles_accepted
from flask_security.utils import encrypt_password
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Create any database tables that don't exist yet.
db.create_all()
| 40.531915 | 83 | 0.752756 |
be6f25ab250ddab2ab944a4c759bdf74b87010ce | 12,251 | py | Python | usaspending_api/download/lookups.py | lenjonemcse/usaspending-api | cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce | [
"CC0-1.0"
] | 1 | 2022-01-28T16:08:04.000Z | 2022-01-28T16:08:04.000Z | usaspending_api/download/lookups.py | lenjonemcse/usaspending-api | cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce | [
"CC0-1.0"
] | null | null | null | usaspending_api/download/lookups.py | lenjonemcse/usaspending-api | cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce | [
"CC0-1.0"
] | null | null | null | """
This file defines a series of constants that represent the values used in
the API's "helper" tables.
Rather than define the values in the db setup scripts and then make db calls to
lookup the surrogate keys, we'll define everything here, in a file that can be
used by the db setup scripts *and* the application code.
"""
from collections import namedtuple, OrderedDict
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.accounts.v2.filters.account_download import account_download_filter
from usaspending_api.awards.models import Award, TransactionNormalized
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.download.helpers.elasticsearch_download_functions import (
AwardsElasticsearchDownload,
TransactionsElasticsearchDownload,
)
from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function
from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView
from usaspending_api.awards.v2.filters.idv_filters import (
idv_order_filter,
idv_transaction_filter,
idv_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.award_filters import (
awards_transaction_filter,
awards_subaward_filter,
awards_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.search import (
universal_award_matview_filter,
transaction_search_filter,
)
from usaspending_api.awards.v2.filters.sub_award import subaward_download
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.download.helpers.download_annotation_functions import (
transaction_search_annotations,
universal_award_matview_annotations,
subaward_annotations,
idv_order_annotations,
idv_transaction_annotations,
)
LookupType = namedtuple("LookupType", ["id", "name", "desc"])
JOB_STATUS = [
LookupType(1, "ready", "job is ready to be run"),
LookupType(2, "running", "job is currently in progress"),
LookupType(3, "finished", "job is complete"),
LookupType(4, "failed", "job failed to complete"),
LookupType(5, "queued", "job sent to queue for async processing"),
LookupType(6, "resumed", "job is being reprocessed after a failure"),
LookupType(7, "created", "job product has been created and stored locally"),
LookupType(8, "uploading", "job is being uploaded to public storage"),
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
VALUE_MAPPINGS = {
# Award Level
"awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": universal_award_matview_filter,
"annotations_function": universal_award_matview_annotations,
},
# Elasticsearch Award Level
"elasticsearch_awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": AwardsElasticsearchDownload.query,
"annotations_function": universal_award_matview_annotations,
},
# Transaction Level
"transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": transaction_search_filter,
"annotations_function": transaction_search_annotations,
},
# Elasticsearch Transaction Level
"elasticsearch_transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": TransactionsElasticsearchDownload.query,
"annotations_function": transaction_search_annotations,
},
# SubAward Level
"sub_awards": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"type_name": "Subawards",
"download_name": "{agency}{type}_Subawards_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": subaward_download,
"annotations_function": subaward_annotations,
},
# Appropriations Account Data
"account_balances": {
"source_type": "account",
"table": AppropriationAccountBalances,
"table_name": "account_balances",
"download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"filter_function": account_download_filter,
},
# Object Class Program Activity Account Data
"object_class_program_activity": {
"source_type": "account",
"table": FinancialAccountsByProgramActivityObjectClass,
"table_name": "object_class_program_activity",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"filter_function": account_download_filter,
},
"award_financial": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"filter_function": account_download_filter,
},
"idv_orders": {
"source_type": "award",
"table": Award,
"table_name": "idv_orders",
"download_name": "IDV_{piid}_Orders",
"contract_data": "latest_transaction__contract_data",
"filter_function": idv_order_filter,
"is_for_idv": True,
"annotations_function": idv_order_annotations,
},
"idv_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "IDV_{piid}_FederalAccountFunding",
"filter_function": idv_treasury_account_funding_filter,
"is_for_idv": True,
},
"idv_transaction_history": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "IDV_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": idv_transaction_filter,
"is_for_idv": True,
"annotations_function": idv_transaction_annotations,
},
"contract_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Contract_{piid}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_contract": True,
},
"assistance_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Assistance_{assistance_id}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_assistance": True,
},
"sub_contracts": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Contract_{piid}_Sub-Awards",
"contract_data": "award__latest_transaction__contract_data",
"filter_function": awards_subaward_filter,
"is_for_contract": True,
"annotations_function": subaward_annotations,
},
"sub_grants": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Assistance_{assistance_id}_Sub-Awards",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": awards_subaward_filter,
"is_for_assistance": True,
"annotations_function": subaward_annotations,
},
"contract_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "Contract_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": awards_transaction_filter,
"is_for_contract": True,
"annotations_function": idv_transaction_annotations,
},
"assistance_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "assistance_transaction_history",
"download_name": "Assistance_{assistance_id}_TransactionHistory",
"assistance_data": "assistance_data",
"filter_function": awards_transaction_filter,
"is_for_assistance": True,
"annotations_function": idv_transaction_annotations,
},
"disaster_recipient": {
"source_type": "disaster",
"table": AwardSearchView,
"table_name": "recipient",
"download_name": "COVID-19_Recipients_{award_category}_{timestamp}",
"filter_function": disaster_filter_function,
"base_fields": ["recipient_name", "recipient_unique_id"],
},
}
# Bulk Download still uses "prime awards" instead of "transactions"
VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"]
# List of CFO CGACS for list agencies viewset in the correct order, names included for reference
# TODO: Find a solution that marks the CFO agencies in the database AND have the correct order
CFO_CGACS_MAPPING = OrderedDict(
[
("012", "Department of Agriculture"),
("013", "Department of Commerce"),
("097", "Department of Defense"),
("091", "Department of Education"),
("089", "Department of Energy"),
("075", "Department of Health and Human Services"),
("070", "Department of Homeland Security"),
("086", "Department of Housing and Urban Development"),
("015", "Department of Justice"),
("1601", "Department of Labor"),
("019", "Department of State"),
("014", "Department of the Interior"),
("020", "Department of the Treasury"),
("069", "Department of Transportation"),
("036", "Department of Veterans Affairs"),
("068", "Environmental Protection Agency"),
("047", "General Services Administration"),
("080", "National Aeronautics and Space Administration"),
("049", "National Science Foundation"),
("031", "Nuclear Regulatory Commission"),
("024", "Office of Personnel Management"),
("073", "Small Business Administration"),
("028", "Social Security Administration"),
("072", "Agency for International Development"),
]
)
CFO_CGACS = list(CFO_CGACS_MAPPING.keys())
FILE_FORMATS = {
"csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"},
"tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"},
"pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"},
}
VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
| 42.835664 | 105 | 0.691862 |
be70bab0d740612dff3c9c4f650b4e73f95cd9c5 | 1,985 | py | Python | python/modules/mysql_server.py | 91-jinrong/-91_monitor | e0325229bffbb0df20d9337925b591eee8ac0289 | [
"Apache-2.0"
] | 1 | 2015-03-30T06:25:59.000Z | 2015-03-30T06:25:59.000Z | python/modules/mysql_server.py | 91-jinrong/91_monitor | e0325229bffbb0df20d9337925b591eee8ac0289 | [
"Apache-2.0"
] | null | null | null | python/modules/mysql_server.py | 91-jinrong/91_monitor | e0325229bffbb0df20d9337925b591eee8ac0289 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import string
import time
import datetime
import MySQLdb
| 31.507937 | 156 | 0.624181 |
be71f6b56c912c07678325e23f7389ad744e9921 | 149 | py | Python | Ethan File/Carrentsystem/Carrentsystem/test.py | hklhfong/Car-Rental-System | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | [
"Apache-2.0"
] | null | null | null | Ethan File/Carrentsystem/Carrentsystem/test.py | hklhfong/Car-Rental-System | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | [
"Apache-2.0"
] | null | null | null | Ethan File/Carrentsystem/Carrentsystem/test.py | hklhfong/Car-Rental-System | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | [
"Apache-2.0"
] | null | null | null | import sqlite3
conn = sqlite3.connect("db")
cur = conn.cursor()
cur.execute("select * from CAR_ID limit 5;")
results = cur.fetchall()
print(results)
| 21.285714 | 44 | 0.724832 |
be72c9a20697c3fb3a739104db43d4e053b51e7c | 249 | py | Python | tests/integration/hub_usage/dummyhub_slow/__init__.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 2 | 2021-01-22T07:34:35.000Z | 2021-01-23T04:36:41.000Z | tests/integration/hub_usage/dummyhub_slow/__init__.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 4 | 2020-09-01T17:47:27.000Z | 2021-04-16T23:11:57.000Z | tests/integration/hub_usage/dummyhub_slow/__init__.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | null | null | null | import time
from jina.executors.crafters import BaseCrafter
from .helper import foo
| 19.153846 | 47 | 0.678715 |
be7401e08d215565703c4b9fa33b7d5e7ca05a69 | 8,827 | py | Python | src/evaluation_utils.py | philipp-hess/deep-learning-for-heavy-rainfall | dbec03245dd8db0c5f2f53af014b8dd8d80f245c | [
"MIT"
] | null | null | null | src/evaluation_utils.py | philipp-hess/deep-learning-for-heavy-rainfall | dbec03245dd8db0c5f2f53af014b8dd8d80f245c | [
"MIT"
] | null | null | null | src/evaluation_utils.py | philipp-hess/deep-learning-for-heavy-rainfall | dbec03245dd8db0c5f2f53af014b8dd8d80f245c | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.metrics import f1_score, precision_score, recall_score
from IPython.display import display, clear_output
from sklearn.metrics import confusion_matrix
import scipy.stats as st
def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray:
""" Converts continuous data into binar classes using quantiles
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing quantiles
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2]))
for i, quantile in enumerate(quantiles):
threshold = np.quantile(data, quantile)
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray:
""" Converts continuous data into binar classes using thresholds
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing thresholds
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2]))
for i, threshold in enumerate(thresholds):
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, X]
target: shape [n_classes, X]
X can be any other number of dimensions > 0
Returns:
scores (list):
List with an element per class
"""
n_classes = prediction.shape[0]
prediction = prediction.reshape(n_classes, -1)
target = target.reshape(n_classes, -1)
scores = []
for c in range(n_classes):
forecast_skill = ForecastSkill(prediction[c], target[c])
forecast_skill.compute_categories(mask=mask)
scores.append(getattr(forecast_skill, f'get_{metric_name}')())
return scores
def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, n_time, n_lat, n_lon]
target: shape [n_classes, n_time, n_lat, n_lon]
Returns:
scores: shape [n_classes, n_lat, n_lon]
"""
n_classes = prediction.shape[0]
n_lat = prediction.shape[2]
n_lon = prediction.shape[3]
scores = np.zeros((n_classes, n_lat, n_lon))
for c in range(n_classes):
for lat in range(n_lat):
for lon in range(n_lon):
grid_cell_prediction = prediction[c, :, lat, lon]
grid_cell_target = target[c, :, lat, lon]
if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0:
scores[c, lat, lon] = -999
else:
forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon])
forecast_skill.compute_categories()
scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')()
print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%')
clear_output(wait=True)
return scores
| 32.814126 | 116 | 0.605528 |
be74846aa8bb878ca4aaee267b213fd10335d381 | 1,709 | py | Python | poloniex_apis/api_models/deposit_withdrawal_history.py | xJuggl3r/anapolo | 5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b | [
"MIT"
] | null | null | null | poloniex_apis/api_models/deposit_withdrawal_history.py | xJuggl3r/anapolo | 5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b | [
"MIT"
] | null | null | null | poloniex_apis/api_models/deposit_withdrawal_history.py | xJuggl3r/anapolo | 5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b | [
"MIT"
] | null | null | null | from collections import defaultdict
from poloniex_apis.api_models.ticker_price import TickerData
| 39.744186 | 87 | 0.599181 |
be748f98db9ba8c29d78f47f7af4dd25c01061b7 | 7,320 | py | Python | app/handler.py | vnrag/aws-pipeline-dashboard | 679af73f8e777990840bc829a014e205f0c94ac0 | [
"BSD-3-Clause"
] | null | null | null | app/handler.py | vnrag/aws-pipeline-dashboard | 679af73f8e777990840bc829a014e205f0c94ac0 | [
"BSD-3-Clause"
] | null | null | null | app/handler.py | vnrag/aws-pipeline-dashboard | 679af73f8e777990840bc829a014e205f0c94ac0 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime,timezone
import sys
import boto3
import json
# Return the state from the event iff it's one of SUCCEEDED or FAILED
# Return the execution summary for a given execution id
# Return the execution summary for the most prior final execution before a given execution id
if __name__ == '__main__':
dashboard_event(None, None)
| 35.882353 | 396 | 0.593579 |
be74f9e10e7b3e7db834044fe7d0389031a09884 | 4,507 | py | Python | cogs/commands.py | sudo-do/discord-chatbot | 970af7d8b9275a518396648ebe5c33c291370d6a | [
"MIT"
] | 1 | 2021-05-14T08:01:53.000Z | 2021-05-14T08:01:53.000Z | cogs/commands.py | sudo-do/discord-chatbot | 970af7d8b9275a518396648ebe5c33c291370d6a | [
"MIT"
] | null | null | null | cogs/commands.py | sudo-do/discord-chatbot | 970af7d8b9275a518396648ebe5c33c291370d6a | [
"MIT"
] | null | null | null | import discord
import sqlite3
from discord.ext import commands
conn= sqlite3.connect("dbs/main.db")
| 36.942623 | 292 | 0.676503 |
be75b53bc3cf75e488408e710557a7588ee69c9c | 6,210 | py | Python | poetry/console/commands/self/update.py | mgasner/poetry | 44221689e05feb0cc93c231096334f8eefbf86fc | [
"MIT"
] | null | null | null | poetry/console/commands/self/update.py | mgasner/poetry | 44221689e05feb0cc93c231096334f8eefbf86fc | [
"MIT"
] | null | null | null | poetry/console/commands/self/update.py | mgasner/poetry | 44221689e05feb0cc93c231096334f8eefbf86fc | [
"MIT"
] | null | null | null | import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
from functools import cmp_to_key
from gzip import GzipFile
try:
from urllib.error import HTTPError
from urllib.request import urlopen
except ImportError:
from urllib2 import HTTPError
from urllib2 import urlopen
from cleo import argument
from cleo import option
from ..command import Command
| 27.972973 | 90 | 0.54847 |
be75c777b16f1617c2f87efa99ed969f4c41aed6 | 1,192 | py | Python | osp/test/corpus/syllabus/test_text.py | davidmcclure/open-syllabus-project | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | [
"Apache-2.0"
] | 220 | 2016-01-22T21:19:02.000Z | 2022-01-25T04:33:55.000Z | osp/test/corpus/syllabus/test_text.py | davidmcclure/open-syllabus-project | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | [
"Apache-2.0"
] | 14 | 2016-01-23T14:34:39.000Z | 2016-09-19T19:58:37.000Z | osp/test/corpus/syllabus/test_text.py | davidmcclure/open-syllabus-project | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | [
"Apache-2.0"
] | 14 | 2016-02-03T13:47:48.000Z | 2019-03-27T13:09:05.000Z |
from osp.corpus.syllabus import Syllabus
from osp.test.utils import requires_tika
def test_empty(mock_osp):
"""
Should return None if the file is empty.
"""
path = mock_osp.add_file(content='', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == None
def test_plaintext(mock_osp):
"""
Should extract text from vanilla text files.
"""
path = mock_osp.add_file(content='text', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_html(mock_osp):
"""
Should extract text from HTML files.
"""
path = mock_osp.add_file(content='<p>text</p>', ftype='html')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_pdf(mock_osp):
"""
Should extract text from PDF files.
"""
path = mock_osp.add_file(content='text', ftype='pdf')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
| 18.060606 | 65 | 0.645134 |
be763dff688768c2aba41209e3bec63f50ee2a53 | 19,099 | py | Python | boa_test/tests/test_ico_template.py | mixbee/neo-boa | da7366c26c7b8e60afb9ac27439a1da37b0be355 | [
"MIT"
] | 4 | 2018-08-22T03:30:34.000Z | 2019-04-16T10:54:08.000Z | boa_test/tests/test_ico_template.py | mixbee/neo-boa | da7366c26c7b8e60afb9ac27439a1da37b0be355 | [
"MIT"
] | 3 | 2018-09-03T09:19:26.000Z | 2019-01-24T00:06:29.000Z | boa_test/tests/test_ico_template.py | mixbee/neo-boa | da7366c26c7b8e60afb9ac27439a1da37b0be355 | [
"MIT"
] | 12 | 2018-07-19T06:36:44.000Z | 2019-05-13T05:45:58.000Z | from boa_test.tests.boa_test import BoaFixtureTest
from boa.compiler import Compiler
from neo.Core.TX.Transaction import Transaction
from neo.Prompt.Commands.BuildNRun import TestBuild
from neo.EventHub import events
from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent
from neo.Settings import settings
from neo.Prompt.Utils import parse_param
from neo.Core.FunctionCode import FunctionCode
from neocore.Fixed8 import Fixed8
from boa_test.example.demo.nex.token import *
import shutil
import os
from logzero import logger
settings.USE_DEBUG_STORAGE = True
settings.DEBUG_STORAGE_PATH = './fixtures/debugstorage'
| 51.899457 | 202 | 0.673857 |
be76f999496b5e5961109377d7a8e9bebf2c7e1e | 2,576 | py | Python | regexem.py | lvijay/ilc | 1c3b1381e7e5a5064bda829e3d34bfaf24745d1a | [
"BSD-3-Clause-No-Nuclear-Warranty"
] | 1 | 2019-01-03T17:44:11.000Z | 2019-01-03T17:44:11.000Z | regexem.py | lvijay/ilc | 1c3b1381e7e5a5064bda829e3d34bfaf24745d1a | [
"BSD-3-Clause-No-Nuclear-Warranty"
] | null | null | null | regexem.py | lvijay/ilc | 1c3b1381e7e5a5064bda829e3d34bfaf24745d1a | [
"BSD-3-Clause-No-Nuclear-Warranty"
] | null | null | null | #!/usr/bin/python
# -*- mode: python; -*-
## This file is part of Indian Language Converter
## Copyright (C) 2006 Vijay Lakshminarayanan <liyer.vijay@gmail.com>
## Indian Language Converter is free software; you can redistribute it
## and/or modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2 of
## the License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
## $Id: regexem.py,v 1.4 2006-03-26 03:15:24 vijay Exp $
## Author: Vijay Lakshminarayanan
## $Date: 2006-03-26 03:15:24 $
import sys
from re import escape
def regexem (strlst):
"""Returns a single string which is the regular expression to
identify any single word in the given argument.
See the Examples given at the end of this file."""
return regexem_internal([escape(s) for s in strlst])
if __name__ == '__main__':
print ''.join(regexem(sys.argv[1:]))
## Examples
#
# $ ./regexem.py emacs vi ed
# (ed)|(emacs)|(vi)
#
# $ ./regexem.py batsman bats well
# (well)|(bats(man)?)
#
# $ ./regexem.py houses housefly
# (houses)|(housefly) ## Note that they aren't grouped together
#
## a slightly complicated example
# $ ./regexem.py an anteater and an ant
# (an((d)|(t(eater)?))?)
| 33.025641 | 77 | 0.632376 |
be7730b08647563bbdf351876a21f2fa9df7d7f9 | 3,765 | py | Python | main.py | rohit-k-das/crowdstrike-alerts | 48c23357f819f90134f76cefb58f1355967363d4 | [
"MIT"
] | 3 | 2019-07-10T17:05:56.000Z | 2019-10-18T22:34:08.000Z | main.py | rohit-k-das/crowdstrike-alerts | 48c23357f819f90134f76cefb58f1355967363d4 | [
"MIT"
] | 1 | 2020-01-09T14:43:58.000Z | 2020-02-06T11:24:04.000Z | main.py | rohit-k-das/crowdstrike-alerts | 48c23357f819f90134f76cefb58f1355967363d4 | [
"MIT"
] | 2 | 2019-07-10T17:05:57.000Z | 2019-10-18T22:34:09.000Z | import requests
import crowdstrike_detection as crowdstrike
import logging
import click
import urllib.parse
import ConfigParser
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds'))
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Send slack alert via hubot for each high or critical detection in crowdstrike
if __name__ == '__main__':
main()
| 41.373626 | 176 | 0.661355 |
be7756c046d0e49be191bd99222501f37d6b8b9a | 92 | py | Python | connexion/http_facts.py | lumikanta/connexion | b6530d32aaee92ebbdfef501540d642a26185174 | [
"Apache-2.0"
] | null | null | null | connexion/http_facts.py | lumikanta/connexion | b6530d32aaee92ebbdfef501540d642a26185174 | [
"Apache-2.0"
] | null | null | null | connexion/http_facts.py | lumikanta/connexion | b6530d32aaee92ebbdfef501540d642a26185174 | [
"Apache-2.0"
] | 1 | 2019-03-21T18:21:32.000Z | 2019-03-21T18:21:32.000Z | FORM_CONTENT_TYPES = [
'application/x-www-form-urlencoded',
'multipart/form-data'
]
| 18.4 | 40 | 0.695652 |
be775d3a62274c2c57f452dafb16e1035b3dff0c | 4,593 | py | Python | Test3/yandexAPI3.py | klepik1990/YandexTestAPI | ded41ff607c0b209b51efbcaa13c8008156a5e0a | [
"MIT"
] | null | null | null | Test3/yandexAPI3.py | klepik1990/YandexTestAPI | ded41ff607c0b209b51efbcaa13c8008156a5e0a | [
"MIT"
] | null | null | null | Test3/yandexAPI3.py | klepik1990/YandexTestAPI | ded41ff607c0b209b51efbcaa13c8008156a5e0a | [
"MIT"
] | null | null | null | import requests
import json
HEADERS = {"Authorization": "OAuth AgAAAAA00Se2AAW1W1yCegavqkretMXBGkoUUQk", "Accept": "*/*"}
URL = "https://cloud-api.yandex.net:443/v1/disk/"
def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None):
"""
Args:
folder_name_1: .
folder_name_2: .
url: .
headers: , .
Returns:
: , . .
"""
info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS)
dict_response = json.loads(info.content)
if info.status_code == 404:
return dict_response["description"]
else:
return dict_response["path"]
def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""
Args:
folder_name_1: .
folder_name_2: .
file_name: .
url: .
headers: , .
Returns:
.
"""
file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&fields=path", headers = HEADERS)
file_info_dict = json.loads(file_info_json.content)
if file_info_json.status_code == 404:
return file_info_dict["description"]
else:
return file_info_dict["path"]
def create_folder(folder_name_1, folder_name_2, url=None, headers=None):
""" .
Args:
folder_name_1: .
folder_name_2: .
url: .
headers: , .
Returns:
.
"""
response_code = [202, 204]
new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
if new_folder.status_code == 409:
new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS)
if new_folder.status_code in response_code:
requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS)
return get_folder_info(folder_name_1, folder_name_2)
def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None):
""" .
Args:
folder_name_1: .
folder_name_2: .
file_name: .
url: .
headers: , .
Returns:
.
"""
assert len(file_name) > 0, " "
new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&overwrite=true", headers=HEADERS)
get_link = new_file.content
link = json.loads(get_link)
requests.put(url=link["href"])
return get_file_info(folder_name_1, folder_name_2, file_name)
def move_to_bucket(folder_name, url=None, headers=None):
""" .
Args:
folder_name: .
url: .
headers: , .
Returns:
.
"""
order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS)
return json.loads(order_response.content)["href"]
def get_status(link, headers=None):
""" .
Args:
link: , .
headers: , .
Returns:
.
"""
status_response = requests.get(url=link, headers=HEADERS)
return json.loads(status_response.content)["status"]
def clean_bucket():
""" .
Returns:
.
"""
remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS)
return json.loads(remove_folder.content)["href"]
| 33.282609 | 125 | 0.674069 |
be78c46e8b283fc835a189209cd53b3fea610e40 | 3,208 | py | Python | app/users/operator/views.py | trinanda/AQUR | 2a415b05ba4c0113b05b6fa14fb454af2bad52ec | [
"MIT"
] | null | null | null | app/users/operator/views.py | trinanda/AQUR | 2a415b05ba4c0113b05b6fa14fb454af2bad52ec | [
"MIT"
] | null | null | null | app/users/operator/views.py | trinanda/AQUR | 2a415b05ba4c0113b05b6fa14fb454af2bad52ec | [
"MIT"
] | null | null | null | import os
from collections import defaultdict
from flask import render_template
from flask_login import login_required
from sqlalchemy import and_
from app import db
from app.decorators import operator_required
from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule
from app.users.operator import operator
| 40.1 | 118 | 0.711658 |
be7b321e4983f3461ae58d22d3131016ec26c37d | 5,936 | py | Python | arvet/core/metric.py | jskinn/arvet | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | [
"BSD-2-Clause"
] | 2 | 2021-05-27T21:48:34.000Z | 2021-06-12T02:58:44.000Z | arvet/core/metric.py | jskinn/arvet | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | [
"BSD-2-Clause"
] | null | null | null | arvet/core/metric.py | jskinn/arvet | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017, John Skinner
import abc
import typing
import bson
import pymodm
import pymodm.fields as fields
import arvet.database.pymodm_abc as pymodm_abc
from arvet.database.reference_list_field import ReferenceListField
import arvet.core.trial_result
def check_trial_collection(trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> typing.Union[str, None]:
"""
A helper function to check that all the given trial results come from the same system and image source.
Call this at the start of Metric.measure_results
:param trial_results: A collection of trial results passed to Metric.measure_results
:return: None if all the trials are OK, string explaining the problem if they are not
"""
first_trial = None
for idx, trial in enumerate(trial_results):
if not trial.success:
return "Trial {0} (1) is failed".format(idx, trial.pk)
if first_trial is None:
first_trial = trial
else:
if trial.image_source != first_trial.image_source:
return "Trial {0} ({1}) does not have the same image source as the first trial".format(idx, trial.pk)
if trial.system != first_trial.system:
return "Trial {0} ({1}) does not have the same system as the first trial".format(idx, trial.pk)
| 37.56962 | 117 | 0.657008 |
be7e9dc9b18c9759a533f45fd2110a059eb361f0 | 19,192 | py | Python | pfile/accessor.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 4 | 2016-12-17T20:06:10.000Z | 2021-11-19T04:45:29.000Z | pfile/accessor.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 11 | 2021-01-06T05:35:11.000Z | 2022-03-11T23:28:31.000Z | pfile/accessor.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 3 | 2015-06-12T10:44:16.000Z | 2021-07-26T18:39:47.000Z | """File access utils"""
__author__ = 'thorwhalen'
# from ut.datapath import datapath
import pickle
import os
from ut.util.importing import get_environment_variable
import pandas as pd
import ut.pfile.to as file_to
import ut.pfile.name as pfile_name
import ut.pstr.to as pstr_to
from ut.serialize.local import Local
from ut.serialize.s3 import S3
from os import environ # does this load the whole array? Can we just take MS_DATA instead?
import ut.pstr.trans as pstr_trans
import shutil
try:
MS_DATA = get_environment_variable('MS_DATA')
except KeyError:
MS_DATA = ''
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
####################################################################################################################
# Quick Utils
####################################################################################################################
# FACTORIES
####################################################################################################################
####################################################################################################################
# OTHER UTILS
def _make_a_file_loc_proc_and_location_from_string_specifications(file_loc_proc, location):
if file_loc_proc is None and isinstance(location, str):
file_loc_proc = location + "/"
location = None
elif location is None and isinstance(file_loc_proc, str):
first_folder = pfile_name.get_highest_level_folder(location)
if first_folder in [LOCATION_LOCAL, LOCATION_S3]:
location = first_folder # set the location to first_folder
file_loc_proc.replace(location+"/","") # remove the first_folder
else:
raise ValueError("location was not specified and couldn't be guessed from the file_loc_proc")
else:
raise ValueError("you've neither specified a file_loc_proc (as a file_loc_proc) nor a location")
# make a file accessor for the (location, target_relative_root)
file_loc_proc = FilepathHandler(relative_root=os.path.join(location,file_loc_proc)).process
return (file_loc_proc, location)
def file_loc_proc_from_full_path(fullpath):
return FilepathHandler(relative_root=fullpath).process
def fullpath_to_s3_kargs(filename):
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if filename.startswith('/'):
filename = filename[1:]
mother_root = pfile_name.get_highest_level_folder(filename)
rest_of_the_filepath = filename.replace(mother_root + '/','',1)
return {
'bucket_name': mother_root,
'key_name': rest_of_the_filepath
}
##### LOCAL METHODS
##### S3 METHODS
| 41.90393 | 125 | 0.634275 |
be7ea94dc71a3948ab59fd9c3e80bde2599bb1f1 | 4,309 | py | Python | scripts/statistics.py | cstenkamp/MastersThesisText | d026f9c19819c83d99dfff12b594db9d061bfb31 | [
"CC0-1.0"
] | null | null | null | scripts/statistics.py | cstenkamp/MastersThesisText | d026f9c19819c83d99dfff12b594db9d061bfb31 | [
"CC0-1.0"
] | null | null | null | scripts/statistics.py | cstenkamp/MastersThesisText | d026f9c19819c83d99dfff12b594db9d061bfb31 | [
"CC0-1.0"
] | null | null | null | import subprocess
import git
from os.path import dirname, join, abspath
import pandas as pd
from matplotlib import pyplot as plt
import requests
import io
import zipfile
import tempfile
from datetime import timedelta
FILENAME = join(dirname(__file__), "..", "thesis.tex")
DISP_PAGESMAX = 80
DISP_WORDSMAX = 10000
if __name__ == "__main__":
#history
df = create_history_df(dirname(FILENAME), "thesis.tex")
date_pages = github_get_npages("cstenkamp", "MastersThesisText", "thesis.pdf")
df = merge_page_df(df, date_pages)
plot_df(df)
#current
n_words = int(return_piped_cmd(f"detex {FILENAME} | wc -w"))
n_pages = get_npages(FILENAME)
n_todos = get_todos(FILENAME)
print(f"Words: {n_words}, Pages: {n_pages}, Todos: {n_todos}") | 38.132743 | 144 | 0.637503 |
be7ef9e5cafc81c92530c829cae514f567ffa39a | 1,966 | py | Python | setup.py | TheFraserLab/enrich_pvalues | 6c5065da5e6367cc39a045afbdfa1e78322857a6 | [
"MIT"
] | 1 | 2019-03-25T17:38:47.000Z | 2019-03-25T17:38:47.000Z | setup.py | TheFraserLab/enrich_pvalues | 6c5065da5e6367cc39a045afbdfa1e78322857a6 | [
"MIT"
] | null | null | null | setup.py | TheFraserLab/enrich_pvalues | 6c5065da5e6367cc39a045afbdfa1e78322857a6 | [
"MIT"
] | null | null | null | """Installation instructions for enrich_pvalues."""
import os
from setuptools import setup
import enrich_pvalues # For version
VERSION=enrich_pvalues.__version__
GITHUB='https://github.com/MikeDacre/enrich_pvalues'
with open('requirements.txt') as fin:
REQUIREMENTS = [
i[0] for i in [j.split('>=') for j in fin.read().strip().split('\n')]
]
def read(fname):
"""Read the contents of a file in this dir."""
with open(os.path.join(os.path.dirname(__file__), fname)) as fin:
return fin.read()
# Actual setup instructions
setup(
name = 'enrich_pvalues',
version = VERSION,
author = 'Mike Dacre',
author_email = 'mike.dacre@gmail.com',
description = (
"Compare one dataset to another at a variety of p-value cutoffs"
),
keywords = (
"statistics p-values biology molecular-biology console"
),
long_description = read('README.rst'),
license = 'MIT',
# URLs
url = GITHUB,
download_url='{0}/archive/v{1}.tar.gz'.format(GITHUB, VERSION),
py_modules=['enrich_pvalues'],
entry_points = {
'console_scripts': [
'enrich_pvalues = enrich_pvalues:main',
],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
# Requirements
requires=REQUIREMENTS,
install_requires=REQUIREMENTS
)
| 28.085714 | 77 | 0.61648 |
be7fa8fa9510f2347bc60a9ff146e619c5f6dc1c | 11,457 | py | Python | homeschool/students/tests/test_forms.py | brandonmcclure/homeschool | 6ba2e35014740e952222535e9492cde0d41338b4 | [
"MIT"
] | null | null | null | homeschool/students/tests/test_forms.py | brandonmcclure/homeschool | 6ba2e35014740e952222535e9492cde0d41338b4 | [
"MIT"
] | null | null | null | homeschool/students/tests/test_forms.py | brandonmcclure/homeschool | 6ba2e35014740e952222535e9492cde0d41338b4 | [
"MIT"
] | null | null | null | import datetime
from homeschool.courses.tests.factories import (
CourseFactory,
CourseTaskFactory,
GradedWorkFactory,
)
from homeschool.schools.tests.factories import GradeLevelFactory
from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm
from homeschool.students.models import Coursework, Grade
from homeschool.students.tests.factories import (
CourseworkFactory,
EnrollmentFactory,
GradeFactory,
StudentFactory,
)
from homeschool.test import TestCase
| 36.141956 | 87 | 0.639696 |
be7fc184a7b92d4ec6db9908dc208989d6e4f546 | 23,144 | py | Python | Mining_Projects/getAllProjects_Parallel.py | ai-se/heroes_compsci | 613fd623a6da073b2c62c773ed902acb0c756809 | [
"MIT"
] | null | null | null | Mining_Projects/getAllProjects_Parallel.py | ai-se/heroes_compsci | 613fd623a6da073b2c62c773ed902acb0c756809 | [
"MIT"
] | 12 | 2019-12-17T04:04:19.000Z | 2019-12-26T20:23:02.000Z | Mining_Projects/getAllProjects_Parallel.py | ai-se/heroes_compsci | 613fd623a6da073b2c62c773ed902acb0c756809 | [
"MIT"
] | 1 | 2020-03-12T22:19:48.000Z | 2020-03-12T22:19:48.000Z | """ @Author Jchakra"""
""" This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """
from multiprocessing import Process,Lock
import time
import json
import requests
## Downloading all the projects
if __name__ == '__main__':
lock = Lock()
p1 = Process(target=func1)
p2 = Process(target=func2)
p3 = Process(target=func3)
p4 = Process(target=func4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| 29.407878 | 169 | 0.527523 |
be8016a800ed48d86a67fbff5afe5ec6d0a2e6a3 | 2,173 | py | Python | examples/source/benchmarks/googlenet_model.py | ably77/dcos-tensorflow-tools | d434ff6c0cee6db9f62be583723dc2bee46ebbf2 | [
"Apache-2.0"
] | 7 | 2017-11-02T18:21:37.000Z | 2019-06-20T20:46:51.000Z | scripts/tf_cnn_benchmarks/googlenet_model.py | Aetf/tf_benchmarks | b473961620de1b03cb34902960c820e195bea678 | [
"Apache-2.0"
] | 7 | 2017-10-19T20:45:25.000Z | 2020-03-24T15:28:52.000Z | scripts/tf_cnn_benchmarks/googlenet_model.py | Aetf/tf_benchmarks | b473961620de1b03cb34902960c820e195bea678 | [
"Apache-2.0"
] | 4 | 2017-10-19T09:57:17.000Z | 2019-01-22T05:33:25.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Googlenet model configuration.
References:
Szegedy, Christian, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich
Going deeper with convolutions
arXiv preprint arXiv:1409.4842 (2014)
"""
import model
| 37.465517 | 80 | 0.61942 |
be802497e70c37700eec284c7ee0e2b8f03f6401 | 60 | py | Python | demos/prey-predator/prey_predator_abm/sim_params.py | neo-empresarial/covid-19 | cef10ee79d955c9e84148c3c8da542788a1f7395 | [
"MIT"
] | 3 | 2020-05-26T12:17:48.000Z | 2020-06-25T12:03:37.000Z | demos/prey-predator/prey_predator_abm/sim_params.py | neo-empresarial/covid-19 | cef10ee79d955c9e84148c3c8da542788a1f7395 | [
"MIT"
] | 4 | 2020-05-26T21:03:44.000Z | 2020-06-30T12:13:15.000Z | demos/prey-predator/prey_predator_abm/sim_params.py | neo-empresarial/epidemiological-analysis | cef10ee79d955c9e84148c3c8da542788a1f7395 | [
"MIT"
] | 1 | 2021-11-22T23:10:45.000Z | 2021-11-22T23:10:45.000Z | """
Simulation parameters.
"""
SIMULATION_TIME_STEPS = 300
| 10 | 27 | 0.733333 |
be82ffa5bc528b97777e4e4160bb45aca2d0d6ec | 12,669 | py | Python | process_ops.py | gcosne/generative_inpainting | 1ae50277e5815a4f0c1e339ede0dbfae8e5036d1 | [
"MIT"
] | 11 | 2018-11-16T04:29:06.000Z | 2019-07-25T08:11:47.000Z | process_ops.py | Yukariin/PEPSI | 91aea1ae6f528d92ee19007ed132d3482b3a98cc | [
"MIT"
] | null | null | null | process_ops.py | Yukariin/PEPSI | 91aea1ae6f528d92ee19007ed132d3482b3a98cc | [
"MIT"
] | 1 | 2019-07-16T18:52:49.000Z | 2019-07-16T18:52:49.000Z | import cv2
import numpy as np
try:
import scipy
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
def random_rotation(x, rg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shear(x, intensity, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x, shear=shear, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
# For curving soybean pods. L.C.Uzal
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 2D numpy array, single image.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows in the input image.
col_axis: Index of axis for columns in the input image.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order int: order of interpolation
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
if __name__ == "__main__":
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='', type=str)
parser.add_argument('--imageOut', default='result.png', type=str)
args = parser.parse_args()
im = np.array(Image.open(args.image))
img = random_transform(im, rotation_range=10, shear_range=.5, zoom_range=.2, channel_shift_range=10., horizontal_flip=True)
Image.fromarray(np.uint8(img)).save(args.imageOut)
| 38.861963 | 127 | 0.591207 |
be831484dedc63eae50e233ddb777cdbd9a06d19 | 1,093 | py | Python | keystone/tests/unit/token/test_provider.py | maestro-hybrid-cloud/keystone | a597a86b854215835a4d54885daeb161d7b0efb8 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/token/test_provider.py | maestro-hybrid-cloud/keystone | a597a86b854215835a4d54885daeb161d7b0efb8 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/token/test_provider.py | maestro-hybrid-cloud/keystone | a597a86b854215835a4d54885daeb161d7b0efb8 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from six.moves import urllib
from keystone.tests import unit
from keystone.token import provider
| 35.258065 | 75 | 0.751144 |
be84323ccf5c7d5239ba7b3bf5eba0ad7152ce2f | 2,927 | py | Python | fasm2bels/database/connection_db_utils.py | mithro/symbiflow-xc-fasm2bels | 9ed029558bedca4e726969427dc4e62ecd6d5733 | [
"ISC"
] | null | null | null | fasm2bels/database/connection_db_utils.py | mithro/symbiflow-xc-fasm2bels | 9ed029558bedca4e726969427dc4e62ecd6d5733 | [
"ISC"
] | null | null | null | fasm2bels/database/connection_db_utils.py | mithro/symbiflow-xc-fasm2bels | 9ed029558bedca4e726969427dc4e62ecd6d5733 | [
"ISC"
] | null | null | null | import functools
| 21.364964 | 86 | 0.618039 |
be84a1cf98701b670f1ef999229373bd7e2f389c | 2,443 | py | Python | ppr-api/src/services/payment_service.py | bcgov/ppr-deprecated | c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3 | [
"Apache-2.0"
] | 1 | 2019-11-15T19:07:25.000Z | 2019-11-15T19:07:25.000Z | ppr-api/src/services/payment_service.py | bryan-gilbert/ppr | c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3 | [
"Apache-2.0"
] | 6 | 2021-03-03T05:18:35.000Z | 2022-02-10T21:55:45.000Z | ppr-api/src/services/payment_service.py | bcgov/ppr-deprecated | c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3 | [
"Apache-2.0"
] | null | null | null | """A module that provides functionality for accessing the Payments API."""
import enum
import http
import logging
import requests
from fastapi import Depends, Header, HTTPException
from fastapi.security.http import HTTPAuthorizationCredentials
import auth.authentication
import config
import schemas.payment
logger = logging.getLogger(__name__)
CORP_TYPE = 'PPR'
| 35.405797 | 110 | 0.677855 |
be84bdd8bc7a0db1a7baadae4ae6c5d55cf356e0 | 168 | py | Python | SmerekaRoman/HW_6/HW 6.3.py | kolyasalubov/Lv-639.pythonCore | 06f10669a188318884adb00723127465ebdf2907 | [
"MIT"
] | null | null | null | SmerekaRoman/HW_6/HW 6.3.py | kolyasalubov/Lv-639.pythonCore | 06f10669a188318884adb00723127465ebdf2907 | [
"MIT"
] | null | null | null | SmerekaRoman/HW_6/HW 6.3.py | kolyasalubov/Lv-639.pythonCore | 06f10669a188318884adb00723127465ebdf2907 | [
"MIT"
] | null | null | null |
a = numb_of_char(str(input("Input the word please: ")))
print(a)
| 16.8 | 55 | 0.577381 |
be876cf3ef298b948a6559bdc7b9b04da2062463 | 589 | py | Python | 0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z |
# Your Vector2D object will be instantiated and called as such:
# obj = Vector2D(v)
# param_1 = obj.next()
# param_2 = obj.hasNext()
| 22.653846 | 63 | 0.519525 |
be87bd0c5c2ff868bb6a502f0a693e022ddbbafe | 1,049 | py | Python | logger_decorator.py | jbhayback/reconciliation-manager | 5de10a0ec89e397a4937d1764976c94cde06beee | [
"MIT"
] | null | null | null | logger_decorator.py | jbhayback/reconciliation-manager | 5de10a0ec89e397a4937d1764976c94cde06beee | [
"MIT"
] | null | null | null | logger_decorator.py | jbhayback/reconciliation-manager | 5de10a0ec89e397a4937d1764976c94cde06beee | [
"MIT"
] | null | null | null | from datetime import datetime
import inspect
| 25.585366 | 84 | 0.530029 |
be8915c20c303761d43a0098702f7e241e75e9c4 | 40 | py | Python | lf3py/di/__init__.py | rog-works/lf3py | e89937f7aa133ed54d85764f06101ab9abf6b960 | [
"CNRI-Python"
] | null | null | null | lf3py/di/__init__.py | rog-works/lf3py | e89937f7aa133ed54d85764f06101ab9abf6b960 | [
"CNRI-Python"
] | 48 | 2020-12-19T13:47:26.000Z | 2021-01-07T22:27:56.000Z | lf3py/di/__init__.py | rog-works/lf3py | e89937f7aa133ed54d85764f06101ab9abf6b960 | [
"CNRI-Python"
] | null | null | null | from lf3py.di.di import DI # noqa F401
| 20 | 39 | 0.725 |
be8a2d82d13baa6e60ff4dbca25351bcb2190394 | 1,418 | py | Python | critical/tasks.py | lenarother/django-critical-css | 15c12ea02f7ea049e59efba4d963c35f41f26d78 | [
"MIT"
] | 2 | 2020-06-06T06:50:38.000Z | 2022-02-03T08:54:28.000Z | critical/tasks.py | lenarother/django-critical-css | 15c12ea02f7ea049e59efba4d963c35f41f26d78 | [
"MIT"
] | 5 | 2018-12-17T11:12:20.000Z | 2020-11-27T10:28:51.000Z | critical/tasks.py | lenarother/django-critical-css | 15c12ea02f7ea049e59efba4d963c35f41f26d78 | [
"MIT"
] | 1 | 2021-08-19T06:02:44.000Z | 2021-08-19T06:02:44.000Z | import logging
from django.utils.safestring import mark_safe
from django_rq import job
from inline_static.css import transform_css_urls
logger = logging.getLogger(__name__)
| 37.315789 | 94 | 0.74189 |
be8c87105d1db21be6f93eb2ae080ad460d99a47 | 1,837 | py | Python | test.py | wei2912/bce-simulation | 65c19051417c871bce4585481eb06c5ba986a96f | [
"MIT"
] | null | null | null | test.py | wei2912/bce-simulation | 65c19051417c871bce4585481eb06c5ba986a96f | [
"MIT"
] | 1 | 2016-11-06T11:50:45.000Z | 2016-11-06T11:53:49.000Z | test.py | wei2912/bce-simulation | 65c19051417c871bce4585481eb06c5ba986a96f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
This script tests the simulations of the experiments.
"""
import math
from utils import coin_var, needle_var
main()
| 25.873239 | 108 | 0.491018 |
be8cef6fbad82834998e279653a3e939a968c9d8 | 2,244 | py | Python | instructions/instructions.py | fernandozanutto/PyNES | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | [
"Apache-2.0"
] | null | null | null | instructions/instructions.py | fernandozanutto/PyNES | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | [
"Apache-2.0"
] | null | null | null | instructions/instructions.py | fernandozanutto/PyNES | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | [
"Apache-2.0"
] | null | null | null | from addressing import *
from instructions.base_instructions import SetBit, ClearBit
from instructions.generic_instructions import Instruction
from status import Status
# set status instructions
# clear status instructions
| 25.213483 | 72 | 0.69385 |
be8d24f272fa353fa6c9d0869d13de96b4754241 | 1,960 | py | Python | python/530.minimum-absolute-difference-in-bst.py | vermouth1992/Leetcode | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | [
"MIT"
] | null | null | null | python/530.minimum-absolute-difference-in-bst.py | vermouth1992/Leetcode | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | [
"MIT"
] | null | null | null | python/530.minimum-absolute-difference-in-bst.py | vermouth1992/Leetcode | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=530 lang=python3
#
# [530] Minimum Absolute Difference in BST
#
# https://leetcode.com/problems/minimum-absolute-difference-in-bst/description/
#
# algorithms
# Easy (55.23%)
# Total Accepted: 115.5K
# Total Submissions: 209K
# Testcase Example: '[4,2,6,1,3]'
#
# Given the root of a Binary Search Tree (BST), return the minimum absolute
# difference between the values of any two different nodes in the tree.
#
#
# Example 1:
#
#
# Input: root = [4,2,6,1,3]
# Output: 1
#
#
# Example 2:
#
#
# Input: root = [1,0,48,null,null,12,49]
# Output: 1
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [2, 10^4].
# 0 <= Node.val <= 10^5
#
#
#
# Note: This question is the same as 783:
# https://leetcode.com/problems/minimum-distance-between-bst-nodes/
#
#
# Definition for a binary tree node.
from typing import List
| 24.5 | 79 | 0.625 |
be8d50256f2d9fce8a7ed11893b6cad92bc5a14b | 2,769 | py | Python | tensorflow/python/eager/remote_cloud_tpu_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 26 | 2019-11-10T15:33:34.000Z | 2022-03-24T19:56:57.000Z | tensorflow/python/eager/remote_cloud_tpu_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 6 | 2022-01-15T07:17:47.000Z | 2022-02-14T15:28:22.000Z | tensorflow/python/eager/remote_cloud_tpu_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 6 | 2020-03-29T11:10:53.000Z | 2021-06-14T05:39:14.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that we can connect to a real Cloud TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
EXPECTED_DEVICES_PRE_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0'
]
EXPECTED_DEVICES_AFTER_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:CPU:0',
'/job:worker/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:TPU_SYSTEM:0',
'/job:worker/replica:0/task:0/device:TPU:0',
'/job:worker/replica:0/task:0/device:TPU:1',
'/job:worker/replica:0/task:0/device:TPU:2',
'/job:worker/replica:0/task:0/device:TPU:3',
'/job:worker/replica:0/task:0/device:TPU:4',
'/job:worker/replica:0/task:0/device:TPU:5',
'/job:worker/replica:0/task:0/device:TPU:6',
'/job:worker/replica:0/task:0/device:TPU:7',
]
if __name__ == '__main__':
absltest.main()
| 36.434211 | 80 | 0.717949 |
be8db6395c3bc7d6f2f0df95f16ef512dceb29b7 | 1,418 | py | Python | test/functional/bsv-blocksize-params.py | gbtn/bitcoin-sv-gbtn | 8b09d1aa072da819fb3309b0be85dae0f1ac9549 | [
"MIT"
] | 3 | 2018-12-03T03:55:08.000Z | 2019-08-13T07:50:45.000Z | test/functional/bsv-blocksize-params.py | Chihuataneo/bitcoin-sv | d9b12a23dbf0d2afc5f488fa077d762b302ba873 | [
"MIT"
] | 1 | 2020-02-09T11:35:45.000Z | 2020-02-09T11:35:45.000Z | test/functional/bsv-blocksize-params.py | Chihuataneo/bitcoin-sv | d9b12a23dbf0d2afc5f488fa077d762b302ba873 | [
"MIT"
] | 1 | 2018-11-25T03:18:52.000Z | 2018-11-25T03:18:52.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that the blockmaxsize and excessiveblocksize parameters are also
settable via the bitcoin.conf file.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.cdefs import (ONE_MEGABYTE)
import os
if __name__ == '__main__':
BSVBlockSizeParams().main()
| 34.585366 | 107 | 0.715797 |
be8eb4d6e0f2ba30a5412f64a491cd5cc3dcacad | 1,750 | py | Python | yotta/test/cli/outdated.py | headlessme/yotta | 947ab074b629c8f18ca91ab84ebaa29096b011c6 | [
"Apache-2.0"
] | null | null | null | yotta/test/cli/outdated.py | headlessme/yotta | 947ab074b629c8f18ca91ab84ebaa29096b011c6 | [
"Apache-2.0"
] | null | null | null | yotta/test/cli/outdated.py | headlessme/yotta | 947ab074b629c8f18ca91ab84ebaa29096b011c6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
# internal modules:
from . import util
from . import cli
Test_Outdated = {
'module.json':'''{
"name": "test-outdated",
"version": "0.0.0",
"description": "Test yotta outdated",
"author": "James Crosby <james.crosby@arm.com>",
"license": "Apache-2.0",
"dependencies":{
"test-testing-dummy": "*"
}
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
# test-testing-dummy v0.0.1 (a newer version is available from the registry,
# and will be installed by yt up)
'yotta_modules/test-testing-dummy/module.json':'''{
"name": "test-testing-dummy",
"version": "0.0.1",
"description": "Test yotta's compilation of tests.",
"author": "James Crosby <james.crosby@arm.com>",
"license": "Apache-2.0"
}
'''
}
| 27.777778 | 94 | 0.646286 |
be8eb9841690585b80bc1d8c7ae03dcd42ff539a | 208 | py | Python | geoposition/tests/urls.py | Starcross/django-geoposition | b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093 | [
"MIT"
] | null | null | null | geoposition/tests/urls.py | Starcross/django-geoposition | b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093 | [
"MIT"
] | 1 | 2020-02-03T17:10:55.000Z | 2020-02-03T17:10:55.000Z | geoposition/tests/urls.py | Starcross/django-geoposition | b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093 | [
"MIT"
] | 1 | 2019-12-22T12:17:19.000Z | 2019-12-22T12:17:19.000Z | from django.urls import path, include
from django.contrib import admin
from example.views import poi_list
admin.autodiscover()
urlpatterns = [
path('', poi_list),
path('admin/', admin.site.urls),
]
| 18.909091 | 37 | 0.725962 |
be8fca7576bb080c666d1d705dca421abd5cb1da | 2,453 | py | Python | A_Stocker/Stocker.py | Allen1218/Python_Project_Interesting | 55d5e58e70e21d45c4bb9dc4d4c219f3a8385834 | [
"Apache-2.0"
] | 1 | 2021-02-03T12:08:06.000Z | 2021-02-03T12:08:06.000Z | A_Stocker/Stocker.py | Allen1218/Python_Project_Interesting | 55d5e58e70e21d45c4bb9dc4d4c219f3a8385834 | [
"Apache-2.0"
] | null | null | null | A_Stocker/Stocker.py | Allen1218/Python_Project_Interesting | 55d5e58e70e21d45c4bb9dc4d4c219f3a8385834 | [
"Apache-2.0"
] | null | null | null | import threading
import tushare as ts
import pandas as pd
import datetime
STOCK = {#'002594':[1,170.15], ## /
'601012':[11,99.9], ##
'002340':[12,8.72], ##
'603259':[1,141.7], ##
'002346':[10,10.68], ##
#'600438':[9,42.96], ##
#'002475':[3,59.51], ##
#'603308':[1,33.49], ##
#'002415': [3, 66.40], ##
# '600559':[3,35.3], ##
# '601100':[1, 114.5], ##
# '603466':[6, 22.40] ##
}
TimerNum = 20.0 # s
Total = 0
# #rodo
def get_all_price():
'''process all stock'''
stockCode = list(STOCK.keys())
df = ts.get_realtime_quotes(stockCode)
lp = list(STOCK.values())
stockNum = []
stockCostPrice = []
for i in range(len(lp)):
stockNum.append(lp[i][0])
stockCostPrice.append(lp[i][1])
df['num'] = stockNum
df['stockCostPrice'] = stockCostPrice
#
# profit and lost ratio
plRatio = round((df['price'].astype(float) / df['stockCostPrice'] - 1)*100,2)
# profit and lost
df['plRatio'] = plRatio
df['stockNum'] = stockNum
pl = round(df['plRatio'].astype(float) * df['stockNum'] * df['stockCostPrice'].astype(float),2)
df['pl'] = pl
# Rise and fall
currentRF = round((df['price'].astype(float) / df['pre_close'].astype(float) - 1)*100,2)
df['currentRF'] = currentRF
df1 = df[[ 'open', 'price', 'stockCostPrice', 'plRatio', 'num','pl', 'currentRF','name']]
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.width', 180) # (****)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
sss = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f)")[:-4]
print('\n')
print("----------------" + sss +"------------------")
print(df1)
sum_int = round(df['pl'].sum(),2)
print("total profit and lost is " + sum_int.astype(str))
print('\n')
# df.to_csv('stock_data.csv', encoding='utf_8_sig', index=None)
global timer
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
if __name__ == '__main__':
print(STOCK)
get_all_price()
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
| 30.6625 | 99 | 0.565838 |
be9026a8dcf2d835f2e8c702efdeeb3e278299c1 | 1,011 | py | Python | tests/extractors/test_etrade.py | mkazin/StatementRenamer | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | [
"Apache-2.0"
] | null | null | null | tests/extractors/test_etrade.py | mkazin/StatementRenamer | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | [
"Apache-2.0"
] | 15 | 2018-05-01T12:48:30.000Z | 2021-05-14T02:52:48.000Z | tests/extractors/test_etrade.py | mkazin/StatementRenamer | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | [
"Apache-2.0"
] | 1 | 2019-07-09T22:59:50.000Z | 2019-07-09T22:59:50.000Z | from datetime import datetime
from statement_renamer.extractors.etrade import ETradeDateExtractor as EXTRACTOR_UNDER_TEST
from statement_renamer.extractors.factory import ExtractorFactory
TESTDATA = (
"""
PAGE 1 OF 6 February 1, 2019 - March 31, 2019AccountNumber:####-####AccountType:ROTH IRA
PAGE 5 OF 6Account Number: ####-####Statement Period : February 1, 2019 - March 31, 2019Account Type
TolearnmoreabouttheRSDAProgram,pleasereviewyourRSDAProgramCustomerAgreement,visitwww.etrade.com,orcallusat1-800-387-2331
"""
)
| 32.612903 | 124 | 0.75272 |
be917ccdfeb7754dd0eabc0327954755752723d8 | 425 | py | Python | Estrutura_Decisao/who.py | M3nin0/supreme-broccoli | 186c1ea3b839ba3139f9301660dec8fbd27a162e | [
"Apache-2.0"
] | null | null | null | Estrutura_Decisao/who.py | M3nin0/supreme-broccoli | 186c1ea3b839ba3139f9301660dec8fbd27a162e | [
"Apache-2.0"
] | null | null | null | Estrutura_Decisao/who.py | M3nin0/supreme-broccoli | 186c1ea3b839ba3139f9301660dec8fbd27a162e | [
"Apache-2.0"
] | null | null | null | prod1 = float(input("Insira o valor do produto A: "))
prod2 = float(input("Insira o valor do produto B: "))
prod3 = float(input("Insira o valor do produto C: "))
if prod1 < prod2 and prod1 < prod3:
print ("Escolha o produto A o mais barato")
elif prod2 < prod1 and prod2 < prod3:
print ("Escolha o produto B o mais barato")
elif prod3 < prod1 and prod3 < prod2:
print ("Escolha o produto C o mais barato")
| 38.636364 | 53 | 0.68 |
be98084b654d84cf6a197790eaa2f280fb68a68e | 800 | py | Python | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 2 | 2020-09-30T00:11:09.000Z | 2021-10-04T13:00:38.000Z | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | null | null | null | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 1 | 2021-01-28T01:57:41.000Z | 2021-01-28T01:57:41.000Z | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.train.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.training.experimental.loss_scale import DynamicLossScale
from tensorflow.python.training.experimental.loss_scale import FixedLossScale
from tensorflow.python.training.experimental.loss_scale import LossScale
from tensorflow.python.training.experimental.mixed_precision import disable_mixed_precision_graph_rewrite
from tensorflow.python.training.experimental.mixed_precision import enable_mixed_precision_graph_rewrite
from tensorflow.python.training.tracking.python_state import PythonState
del _print_function
| 44.444444 | 105 | 0.86875 |
be986d230ef62a7e44ef6996ed58eb548aa4181b | 4,004 | py | Python | SciDataTool/Methods/VectorField/plot_3D_Data.py | BenjaminGabet/SciDataTool | 7994441de4c54921d43750cacd8df761ba4bd421 | [
"Apache-2.0"
] | null | null | null | SciDataTool/Methods/VectorField/plot_3D_Data.py | BenjaminGabet/SciDataTool | 7994441de4c54921d43750cacd8df761ba4bd421 | [
"Apache-2.0"
] | null | null | null | SciDataTool/Methods/VectorField/plot_3D_Data.py | BenjaminGabet/SciDataTool | 7994441de4c54921d43750cacd8df761ba4bd421 | [
"Apache-2.0"
] | null | null | null | def plot_3D_Data(
self,
*arg_list,
is_norm=False,
unit="SI",
component_list=None,
save_path=None,
x_min=None,
x_max=None,
y_min=None,
y_max=None,
z_min=None,
z_max=None,
z_range=None,
is_auto_ticks=True,
is_auto_range=False,
is_2D_view=False,
is_same_size=False,
N_stem=100,
fig=None,
ax=None,
is_show_fig=None,
is_logscale_x=False,
is_logscale_y=False,
is_logscale_z=False,
thresh=0.02,
is_switch_axes=False,
colormap="RdBu_r",
win_title=None,
font_name="arial",
font_size_title=12,
font_size_label=10,
font_size_legend=8,
):
"""Plots a field as a function of time
Parameters
----------
self : Output
an Output object
Data_str : str
name of the Data Object to plot (e.g. "mag.Br")
*arg_list : list of str
arguments to specify which axes to plot
is_norm : bool
boolean indicating if the field must be normalized
unit : str
unit in which to plot the field
save_path : str
full path including folder, name and extension of the file to save if save_path is not None
x_min : float
minimum value for the x-axis
x_max : float
maximum value for the x-axis
y_min : float
minimum value for the y-axis
y_max : float
maximum value for the y-axis
z_min : float
minimum value for the z-axis
z_max : float
maximum value for the z-axis
is_auto_ticks : bool
in fft, adjust ticks to freqs (deactivate if too close)
is_auto_range : bool
in fft, display up to 1% of max
is_2D_view : bool
True to plot Data in xy plane and put z as colormap
is_same_size : bool
True to have all color blocks with same size in 2D view
N_stem : int
number of harmonics to plot (only for stem plots)
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
ax on which to plot the data
is_show_fig : bool
True to show figure after plot
is_logscale_x : bool
boolean indicating if the x-axis must be set in logarithmic scale
is_logscale_y : bool
boolean indicating if the y-axis must be set in logarithmic scale
is_logscale_z : bool
boolean indicating if the z-axis must be set in logarithmic scale
thresh : float
threshold for automatic fft ticks
is_switch_axes : bool
to switch x and y axes
"""
# Call the plot on each component
if component_list is None: # default: extract all components
component_list = self.components.keys()
for i, comp in enumerate(component_list):
if save_path is not None and len(component_list) > 1:
save_path_comp = (
save_path.split(".")[0] + "_" + comp + "." + save_path.split(".")[1]
)
else:
save_path_comp = save_path
self.components[comp].plot_3D_Data(
arg_list,
is_norm=is_norm,
unit=unit,
save_path=save_path_comp,
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
z_min=z_min,
z_max=z_max,
colormap=colormap,
is_auto_ticks=is_auto_ticks,
is_auto_range=is_auto_range,
is_2D_view=is_2D_view,
is_same_size=is_same_size,
N_stem=N_stem,
fig=fig,
ax=ax,
is_show_fig=is_show_fig,
is_logscale_x=is_logscale_x,
is_logscale_y=is_logscale_y,
is_logscale_z=is_logscale_z,
thresh=thresh,
is_switch_axes=is_switch_axes,
win_title=win_title,
font_name=font_name,
font_size_title=font_size_title,
font_size_label=font_size_label,
font_size_legend=font_size_legend,
)
| 29.880597 | 99 | 0.610889 |
be995dc30a4b39d65ba03829daf98d9b834c9449 | 37,788 | py | Python | tests/unittests/plotting/test_plotly_backend.py | obilaniu/orion | bc886daf791d66490b59e43657f6f6db45d34ea8 | [
"BSD-3-Clause"
] | 1 | 2021-04-10T16:18:03.000Z | 2021-04-10T16:18:03.000Z | tests/unittests/plotting/test_plotly_backend.py | obilaniu/orion | bc886daf791d66490b59e43657f6f6db45d34ea8 | [
"BSD-3-Clause"
] | null | null | null | tests/unittests/plotting/test_plotly_backend.py | obilaniu/orion | bc886daf791d66490b59e43657f6f6db45d34ea8 | [
"BSD-3-Clause"
] | null | null | null | """Collection of tests for :mod:`orion.plotting.backend_plotly`."""
import copy
import numpy
import pandas
import plotly
import pytest
import orion.client
from orion.analysis.partial_dependency_utils import partial_dependency_grid
from orion.core.worker.experiment import Experiment
from orion.plotting.base import (
lpi,
parallel_coordinates,
partial_dependencies,
rankings,
regret,
regrets,
)
from orion.testing import create_experiment
from orion.testing.plotting import (
assert_lpi_plot,
assert_parallel_coordinates_plot,
assert_partial_dependencies_plot,
assert_rankings_plot,
assert_regret_plot,
assert_regrets_plot,
)
config = dict(
name="experiment-name",
space={"x": "uniform(0, 200)"},
metadata={
"user": "test-user",
"orion_version": "XYZ",
"VCS": {
"type": "git",
"is_dirty": False,
"HEAD_sha": "test",
"active_branch": None,
"diff_sha": "diff",
},
},
version=1,
pool_size=1,
max_trials=10,
working_dir="",
algorithms={"random": {"seed": 1}},
producer={"strategy": "NoParallelStrategy"},
)
trial_config = {
"experiment": 0,
"status": "completed",
"worker": None,
"start_time": None,
"end_time": None,
"heartbeat": None,
"results": [],
"params": [],
}
def mock_space(x="uniform(0, 6)", y="uniform(0, 3)", **kwargs):
"""Build a mocked space"""
mocked_config = copy.deepcopy(config)
mocked_config["space"] = {"x": x}
if y is not None:
mocked_config["space"]["y"] = y
mocked_config["space"].update(kwargs)
return mocked_config
def mock_experiment(
monkeypatch, ids=None, x=None, y=None, z=None, objectives=None, status=None
):
"""Mock experiment to_pandas to return given data (or default one)"""
if ids is None:
ids = ["a", "b", "c", "d"]
if x is None:
x = [0, 1, 2, 4]
if y is None:
y = [3, 2, 0, 1]
if objectives is None:
objectives = [0.1, 0.2, 0.3, 0.5]
if status is None:
status = ["completed", "completed", "completed", "completed"]
data = {
"id": ids,
"x": x,
"objective": objectives,
"status": status,
"suggested": ids,
}
if not isinstance(y, str):
data["y"] = y
if z is not None:
data["z"] = z
monkeypatch.setattr(Experiment, "to_pandas", to_pandas)
def mock_model():
"""Return a mocked regressor which just predict iterated integers"""
return Model()
def mock_train_regressor(monkeypatch, assert_model=None, assert_model_kwargs=None):
"""Mock the train_regressor to return the mocked regressor instead"""
def train_regressor(model, data, **kwargs):
"""Return the mocked model, and then model argument if requested"""
if assert_model:
assert model == assert_model
if assert_model_kwargs:
assert kwargs == assert_model_kwargs
return mock_model()
monkeypatch.setattr(
"orion.analysis.partial_dependency_utils.train_regressor", train_regressor
)
| 35.716446 | 87 | 0.59196 |
be99d62141111a8ad89510bea1e2a527e33cf08b | 478 | py | Python | autodiff/debug_vjp.py | Jakob-Unfried/msc-legacy | 2c41f3f714936c25dd534bd66da802c26176fcfa | [
"MIT"
] | 1 | 2021-03-22T14:16:43.000Z | 2021-03-22T14:16:43.000Z | autodiff/debug_vjp.py | Jakob-Unfried/msc-legacy | 2c41f3f714936c25dd534bd66da802c26176fcfa | [
"MIT"
] | null | null | null | autodiff/debug_vjp.py | Jakob-Unfried/msc-legacy | 2c41f3f714936c25dd534bd66da802c26176fcfa | [
"MIT"
] | null | null | null | import pdb
import warnings
from jax import custom_vjp
# noinspection PyUnusedLocal
debug_identity.defvjp(_debug_fwd, _debug_bwd)
| 17.071429 | 67 | 0.713389 |
be9aae87c4295f41e5dad9ea47ddb818dd41be55 | 1,246 | py | Python | mileage.py | vwfinley/mileage | eb880107c8c38d33706eac74d01a0d0516716cc7 | [
"MIT"
] | null | null | null | mileage.py | vwfinley/mileage | eb880107c8c38d33706eac74d01a0d0516716cc7 | [
"MIT"
] | null | null | null | mileage.py | vwfinley/mileage | eb880107c8c38d33706eac74d01a0d0516716cc7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Some helpful links
# https://docs.python.org/3/library/tkinter.html
# https://www.python-course.eu/tkinter_entry_widgets.php
import tkinter as tk
app = Application(root=tk.Tk())
app.mainloop()
| 28.318182 | 90 | 0.578652 |
be9bd5f7d840a39915f5c547fcf6ced95fe85e75 | 1,087 | py | Python | rankings/elo.py | ulternate/table_tennis_league | 1762c5b606f149b27d9c06c82e825c948c47b56f | [
"MIT"
] | null | null | null | rankings/elo.py | ulternate/table_tennis_league | 1762c5b606f149b27d9c06c82e825c948c47b56f | [
"MIT"
] | 7 | 2017-08-18T04:15:16.000Z | 2017-08-28T00:54:25.000Z | rankings/elo.py | mohamed-yahya-zakria/table-tennis-league | 07cc6fe46100a4d4279c8a6ae5eea26984df4664 | [
"MIT"
] | 1 | 2017-08-18T11:24:00.000Z | 2017-08-18T11:24:00.000Z | def elo(winner_rank, loser_rank, weighting):
"""
:param winner: The Player that won the match.
:param loser: The Player that lost the match.
:param weighting: The weighting factor to suit your comp.
:return: (winner_new_rank, loser_new_rank) Tuple.
This follows the ELO ranking method.
"""
winner_rank_transformed = 10 ** (winner_rank / 400)
opponent_rank_transformed = 10 ** (loser_rank / 400)
transformed_sum = winner_rank_transformed + opponent_rank_transformed
winner_score = winner_rank_transformed / transformed_sum
loser_score = opponent_rank_transformed / transformed_sum
winner_rank = winner_rank + weighting * (
1 - winner_score)
loser_rank = loser_rank - weighting * loser_score
# Set a floor of 100 for the rankings.
winner_rank = 100 if winner_rank < 100 else winner_rank
loser_rank = 100 if loser_rank < 100 else loser_rank
winner_rank = float('{result:.2f}'.format(result=winner_rank))
loser_rank = float('{result:.2f}'.format(result=loser_rank))
return winner_rank, loser_rank
| 37.482759 | 73 | 0.720331 |
be9c9dcbecf6ee782a06508d51f148623da5f942 | 3,766 | py | Python | src/samplics/regression/glm.py | samplics-org/samplics | b5f49d075194cc24208f567e6a00e86aa24bec26 | [
"MIT"
] | 14 | 2021-05-03T19:59:58.000Z | 2022-03-27T18:58:36.000Z | src/samplics/regression/glm.py | samplics-org/samplics | b5f49d075194cc24208f567e6a00e86aa24bec26 | [
"MIT"
] | 8 | 2021-06-17T01:13:01.000Z | 2022-03-27T18:31:15.000Z | src/samplics/regression/glm.py | samplics-org/samplics | b5f49d075194cc24208f567e6a00e86aa24bec26 | [
"MIT"
] | 1 | 2022-03-28T06:58:55.000Z | 2022-03-28T06:58:55.000Z | from __future__ import annotations
from typing import Any, Callable, Optional, Union
import numpy as np
# import pandas as pd
import statsmodels.api as sm
from samplics.estimation.expansion import TaylorEstimator
from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans
from samplics.utils.types import Array, Number, Series, StringNumber
| 32.747826 | 97 | 0.573022 |
be9e12d7ef9f5aeb6611304d96bd16eabcc64477 | 2,563 | py | Python | tests/test_scopes.py | leg100/scopes | 6a31908acf44b9f65f25668230197ed13229a80d | [
"MIT"
] | null | null | null | tests/test_scopes.py | leg100/scopes | 6a31908acf44b9f65f25668230197ed13229a80d | [
"MIT"
] | 1 | 2021-11-15T17:47:40.000Z | 2021-11-15T17:47:40.000Z | tests/test_scopes.py | leg100/scopes | 6a31908acf44b9f65f25668230197ed13229a80d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `scopes` package."""
import os
print(os.getenv('PYTHONPATH'))
import pytest
from click.testing import CliRunner
from scopes.tasks import tasks, bolt, spout, builder
from scopes.graph import G, build, topological_sort, traverse
from scopes import cli
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'scopes.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
# t1---
# | |
# v v
# t2 t3
# \ / t4
# v |
# t5<----/
| 22.286957 | 78 | 0.536871 |
be9e3afec2b413ef97912bf7c25f3305c1a3ab7c | 1,055 | py | Python | timeparse/LunarSolarConverter/__init__.py | tornadoyi/timeparse | 1e44dbc6acdb07d6c023806d55034642c7ec0de9 | [
"Apache-2.0"
] | null | null | null | timeparse/LunarSolarConverter/__init__.py | tornadoyi/timeparse | 1e44dbc6acdb07d6c023806d55034642c7ec0de9 | [
"Apache-2.0"
] | null | null | null | timeparse/LunarSolarConverter/__init__.py | tornadoyi/timeparse | 1e44dbc6acdb07d6c023806d55034642c7ec0de9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'isee15'
import LunarSolarConverter
converter = LunarSolarConverter.LunarSolarConverter()
| 28.513514 | 81 | 0.694787 |
be9f7ef00ae244d09a69281d387b6fc00e3b787b | 4,345 | py | Python | examples/hello-pt/custom/cifar10validator.py | ArnovanHilten/NVFlare | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | [
"Apache-2.0"
] | 155 | 2021-08-05T18:05:09.000Z | 2022-03-27T15:32:56.000Z | examples/hello-pt/custom/cifar10validator.py | ArnovanHilten/NVFlare | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | [
"Apache-2.0"
] | 216 | 2021-12-01T06:07:12.000Z | 2022-03-30T23:34:02.000Z | examples/hello-pt/custom/cifar10validator.py | ArnovanHilten/NVFlare | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | [
"Apache-2.0"
] | 44 | 2021-11-24T16:03:29.000Z | 2022-03-24T23:28:39.000Z | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from simple_network import SimpleNetwork
| 40.231481 | 114 | 0.643268 |
be9f9cd98cdf38a09e9b5c7bf41b9142f3bd6c42 | 4,220 | py | Python | lambda/enable-traffic-mirroring.py | wrharding/aws-infra | 5e913f8342b3a3b3a4599648c4a914f828b5bc18 | [
"MIT"
] | 1 | 2022-01-14T18:03:29.000Z | 2022-01-14T18:03:29.000Z | lambda/enable-traffic-mirroring.py | wrharding/aws-infra | 5e913f8342b3a3b3a4599648c4a914f828b5bc18 | [
"MIT"
] | null | null | null | lambda/enable-traffic-mirroring.py | wrharding/aws-infra | 5e913f8342b3a3b3a4599648c4a914f828b5bc18 | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2020-2021 Chris Farris (https://www.chrisfarris.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import boto3
from botocore.exceptions import ClientError
import json
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
TAG_KEY=os.getenv('TAG_KEY', default='WireShark')
## END OF FUNCTION ##
if __name__ == '__main__':
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
handler(None, None)
except KeyboardInterrupt:
exit(1)
| 39.074074 | 129 | 0.679147 |
be9fea8e8fc13061760196f0e3818adcd5989d77 | 9,088 | py | Python | src/value_function.py | wu6u3/async_trpo | b6e3dd56775464b58f7433773e8b04d88cf3fdbc | [
"MIT"
] | 6 | 2018-02-02T19:53:08.000Z | 2021-12-06T19:48:19.000Z | src/value_function.py | wu6u3/async_trpo | b6e3dd56775464b58f7433773e8b04d88cf3fdbc | [
"MIT"
] | null | null | null | src/value_function.py | wu6u3/async_trpo | b6e3dd56775464b58f7433773e8b04d88cf3fdbc | [
"MIT"
] | 2 | 2018-07-26T06:22:04.000Z | 2019-03-06T10:05:18.000Z | """
State-Value Function
Written by Patrick Coady (pat-coady.github.io)
Modified by Tin-Yin Lai (wu6u3) into asynchronous version
"""
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
#import os
| 42.666667 | 121 | 0.575704 |
bea166ed0dc38a3bddb60dc5fe5709a4f52a15f3 | 168 | py | Python | mdepub/actions/__init__.py | bkidwell/mdepub | af9e7d2065fb8251b6767e827ac2cff059ce7668 | [
"0BSD"
] | 35 | 2015-01-14T22:15:35.000Z | 2021-05-23T06:04:34.000Z | mdepub/actions/__init__.py | bkidwell/mdepub | af9e7d2065fb8251b6767e827ac2cff059ce7668 | [
"0BSD"
] | null | null | null | mdepub/actions/__init__.py | bkidwell/mdepub | af9e7d2065fb8251b6767e827ac2cff059ce7668 | [
"0BSD"
] | 7 | 2015-07-23T11:28:18.000Z | 2021-02-09T17:07:06.000Z | """mdepub actions -- these modules do the actual work."""
import archive
import clean
import create
import epub
import extract
import html
import newid
import version
| 15.272727 | 57 | 0.791667 |
bea186d9537f0999c2f3875648b97a7c001cd71a | 10,439 | py | Python | gbe/views/make_bid_view.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 1 | 2021-03-14T11:56:47.000Z | 2021-03-14T11:56:47.000Z | gbe/views/make_bid_view.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 180 | 2019-09-15T19:52:46.000Z | 2021-11-06T23:48:01.000Z | gbe/views/make_bid_view.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | null | null | null | from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import (
get_object_or_404,
render,
)
from gbe.models import (
Conference,
UserMessage,
)
from gbe_logging import log_func
from gbe.functions import (
validate_profile,
)
from gbe.email.functions import notify_reviewers_on_bid_change
from gbetext import (
no_login_msg,
fee_instructions,
full_login_msg,
payment_needed_msg,
payment_details_error,
)
from gbe_utils.text import no_profile_msg
from gbe.ticketing_idd_interface import (
get_payment_details,
get_ticket_form,
fee_paid,
)
| 36.121107 | 78 | 0.560111 |
bea1d1375a8d223083e55cf97bff2f2ce8f4f7ba | 6,977 | py | Python | epicteller/core/dao/character.py | KawashiroNitori/epicteller | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | [
"MIT"
] | null | null | null | epicteller/core/dao/character.py | KawashiroNitori/epicteller | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | [
"MIT"
] | null | null | null | epicteller/core/dao/character.py | KawashiroNitori/epicteller | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from collections import defaultdict
from typing import List, Optional, Iterable, Dict
import base62
from sqlalchemy import select, and_
from sqlalchemy.dialects.mysql import insert as mysql_insert
from epicteller.core.model.character import Character
from epicteller.core.tables import table
from epicteller.core.util import ObjectDict
from epicteller.core.util.enum import ExternalType
from epicteller.core.util.seq import get_id
| 36.528796 | 116 | 0.642826 |
bea22b520ab74130906570943260ba5b3628befe | 4,313 | py | Python | examples/sentence_classfication/task_sentiment_classification_roformer_v2.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | 49 | 2022-03-15T07:28:16.000Z | 2022-03-31T07:16:15.000Z | examples/sentence_classfication/task_sentiment_classification_roformer_v2.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | null | null | null | examples/sentence_classfication/task_sentiment_classification_roformer_v2.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | null | null | null | #! -*- coding:utf-8 -*-
# RoPE
# https://github.com/ZhuiyiTechnology/roformer-v2
# pytorchhttps://github.com/JunnYu/RoFormer_pytorch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#
tokenizer = Tokenizer(dict_path, do_lower_case=True)
#
#
train_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# bert
model = Model().to(device)
# lossoptimizer
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5), #
metrics=['accuracy']
)
#
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=500, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
| 38.855856 | 176 | 0.703455 |
bea3fce840a92d3dac26a2f605494f57192e6efe | 1,217 | py | Python | pyscf/nao/test/test_0037_aos.py | fdmalone/pyscf | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | [
"Apache-2.0"
] | 1 | 2019-07-01T12:39:45.000Z | 2019-07-01T12:39:45.000Z | pyscf/nao/test/test_0037_aos.py | fdmalone/pyscf | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | [
"Apache-2.0"
] | null | null | null | pyscf/nao/test/test_0037_aos.py | fdmalone/pyscf | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
if __name__ == "__main__": unittest.main()
| 38.03125 | 102 | 0.739523 |
bea43752768259680c29953a0cec72ec71c5a8eb | 1,329 | py | Python | code_week12_713_719/is_graph_bipartite_hard.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week12_713_719/is_graph_bipartite_hard.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week12_713_719/is_graph_bipartite_hard.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | '''
graphtrue
ABAB
graphgraph[i]i0graph.length-1graph[i]igraph[i]
1:
: [[1,3], [0,2], [1,3], [0,2]]
: true
:
:
0----1
| |
| |
3----2
: {0, 2} {1, 3}
2:
: [[1,2,3], [0,2], [0,1,3], [0,2]]
: false
:
:
0----1
| \ |
| \ |
3----2
LeetCode
https://leetcode-cn.com/problems/is-graph-bipartite
'''
| 21.435484 | 117 | 0.51392 |
bea493d4dc7e2d4506520e5f797ce4cb0a9a2a6e | 1,417 | py | Python | data_preprocessing/decision_tree_regression.py | Frost199/Machine_Learning | 8cf77c6cbbae7781ac6f2ffcc9218ad79472d287 | [
"MIT"
] | null | null | null | data_preprocessing/decision_tree_regression.py | Frost199/Machine_Learning | 8cf77c6cbbae7781ac6f2ffcc9218ad79472d287 | [
"MIT"
] | null | null | null | data_preprocessing/decision_tree_regression.py | Frost199/Machine_Learning | 8cf77c6cbbae7781ac6f2ffcc9218ad79472d287 | [
"MIT"
] | 1 | 2020-05-23T16:46:52.000Z | 2020-05-23T16:46:52.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 06:44:47 2018
@author: Eleam Emmanuel
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
# take all the columns but leave the last one(-1)
# always make sure our independent variable is a matrix not a vector and
# dependent variable can be a vector
X = dataset.iloc[:, 1:-1].values
Y = dataset.iloc[:, 2].values
# splitting the dataset into a training set and a test set
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# feature scaling
"""sc_X = StandardScaler()
x_train = sc_X.fit_transform(x_train)
x_test = sc_X.transform(x_test)
sc_Y = StandardScaler()
x_train = sc_X.fit_transform(x_train)"""
# fitting the Decision Tree regression Model to the dataset
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, Y)
# predicting a new result
y_pred = regressor.predict(6.5)
# Visualizing the Decision tree regression result (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, Y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title("Truth or Bluff (Regression Model)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show() | 30.804348 | 92 | 0.740296 |
bea4e663116d7a61eb7a7e77d69904ecfbbff62c | 1,786 | py | Python | user_messages/apps.py | everaccountable/django-user-messages | 101d539b785bdb440bf166fb16ad25eb66e4174a | [
"MIT"
] | null | null | null | user_messages/apps.py | everaccountable/django-user-messages | 101d539b785bdb440bf166fb16ad25eb66e4174a | [
"MIT"
] | null | null | null | user_messages/apps.py | everaccountable/django-user-messages | 101d539b785bdb440bf166fb16ad25eb66e4174a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.conf import settings
from django.core import checks
from django.template import engines
from django.template.backends.django import DjangoTemplates
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
| 33.698113 | 79 | 0.647816 |