hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2258db01c670eb29b690517709324afbc74e8b71 | 8,582 | py | Python | Kernel/kernel.py | y11en/BranchMonitoringProject | 5d3ca533338da919a1757562f3810d49296ebf48 | [
"MIT"
] | 122 | 2017-03-08T00:45:03.000Z | 2022-03-01T03:05:21.000Z | Kernel/kernel.py | y11en/BranchMonitoringProject | 5d3ca533338da919a1757562f3810d49296ebf48 | [
"MIT"
] | 3 | 2017-03-08T01:16:54.000Z | 2017-03-22T22:59:26.000Z | Kernel/kernel.py | y11en/BranchMonitoringProject | 5d3ca533338da919a1757562f3810d49296ebf48 | [
"MIT"
] | 42 | 2017-03-08T21:28:48.000Z | 2022-02-20T15:24:46.000Z | # Kernel introspection module to enrich branch collected data
# This code is part of BranchMonitoring framework
# Written by: Marcus Botacin - 2017
# Federal University of Parana (UFPR)
from xml.etree.ElementTree import ElementTree # Parse XML
import subprocess # Run dump tools
import win32file as w # Use windows API
import time # Wait for data
import signal # Interrupt endless loop
# Monitoring class - retrieves branch data
# Dumper: the introspection class
# "main"
if __name__ == '__main__':
# introspect first
d = Dumper()
d.dump_modules()
mods, exports = d.parse_modules()
# then monitor
m=Monitor(save="save.log")
# infinite loop
# introspected data as parameter to the monitor
m.loop(mods,exports,True)
# no module import
else:
print("No module import support yet!")
| 34.465863 | 105 | 0.53519 |
2258e4decef3126cb93f24dd49680df54adc84dc | 243 | py | Python | config/environments/__init__.py | mihail-ivanov/flask-init | 47f634f70bb8bd02db8f0a0a3a1955b08a249254 | [
"MIT"
] | null | null | null | config/environments/__init__.py | mihail-ivanov/flask-init | 47f634f70bb8bd02db8f0a0a3a1955b08a249254 | [
"MIT"
] | null | null | null | config/environments/__init__.py | mihail-ivanov/flask-init | 47f634f70bb8bd02db8f0a0a3a1955b08a249254 | [
"MIT"
] | null | null | null |
from .development import DevelopmentConfig
from .testing import TestingConfig
from .production import ProductionConfig
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
}
| 20.25 | 42 | 0.773663 |
225b7caf45db6cf9057062f56f08950fb1b441f2 | 5,026 | py | Python | dialogs.py | rdbende/Sun-Valley-messageboxes | d6f2b0849caf63c609fc22ecd3909491e2f3ffcf | [
"MIT"
] | 5 | 2021-12-29T11:58:37.000Z | 2022-03-06T15:13:08.000Z | dialogs.py | rdbende/Sun-Valley-messageboxes | d6f2b0849caf63c609fc22ecd3909491e2f3ffcf | [
"MIT"
] | 1 | 2022-02-05T10:30:08.000Z | 2022-02-05T16:04:06.000Z | dialogs.py | rdbende/Sun-Valley-messageboxes | d6f2b0849caf63c609fc22ecd3909491e2f3ffcf | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from functools import partial
if __name__ == "__main__":
window = tk.Tk()
window.tk.call("source", "sun-valley.tcl")
window.tk.call("set_theme", "dark")
window.geometry("600x600")
show_message("No WiFi connection", "Check your connection and try again.")
window.mainloop()
| 27.615385 | 87 | 0.591922 |
225c724f4896f9bddbbf401bf1a3929af43df247 | 94 | py | Python | enthought/endo/docerror.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/endo/docerror.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/endo/docerror.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from __future__ import absolute_import
from etsdevtools.endo.docerror import *
| 23.5 | 39 | 0.840426 |
225d1d06e227d6a8a3242fe225e574042e91441e | 12,591 | py | Python | troposphere/kendra.py | marinpurgar/troposphere | ec35854000ddfd5e2eecd251d5ecaf31979bd2d1 | [
"BSD-2-Clause"
] | null | null | null | troposphere/kendra.py | marinpurgar/troposphere | ec35854000ddfd5e2eecd251d5ecaf31979bd2d1 | [
"BSD-2-Clause"
] | null | null | null | troposphere/kendra.py | marinpurgar/troposphere | ec35854000ddfd5e2eecd251d5ecaf31979bd2d1 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2020, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 18.6.0
from . import AWSObject
from . import AWSProperty
from . import Tags
from .validators import boolean
from .validators import integer
| 30.194245 | 76 | 0.669685 |
225fee4b672c69f3b564170c5c438a29025400e1 | 3,046 | py | Python | cv2/wxPython-CV-widget/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | cv2/wxPython-CV-widget/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | cv2/wxPython-CV-widget/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | import wx
import cv2
#----------------------------------------------------------------------
# Panel to display image from camera
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Main Window
#----------------------------------------------------------------------
#----------------------------------------------------------------------
camera = cv2.VideoCapture(0)
app = wx.App()
MainWindow(camera)
app.MainLoop()
| 30.158416 | 114 | 0.520026 |
2260413d47cac288ecaeb49a5d64f3b2f805bd94 | 580 | py | Python | src/inputbox.py | creikey/nuked-dashboard | 250f8af29570bca69394fd1328343917fa067543 | [
"MIT"
] | 1 | 2019-01-17T14:20:32.000Z | 2019-01-17T14:20:32.000Z | src/inputbox.py | creikey/nuked-dashboard | 250f8af29570bca69394fd1328343917fa067543 | [
"MIT"
] | 3 | 2019-01-19T01:33:10.000Z | 2019-01-19T01:35:35.000Z | src/inputbox.py | creikey/doomsdash | 250f8af29570bca69394fd1328343917fa067543 | [
"MIT"
] | null | null | null | import pynk
from pynk.nkpygame import NkPygame
| 34.117647 | 90 | 0.575862 |
226058992d51da3d32320a685665a445a8020b91 | 1,454 | py | Python | 01_demo/MLP_test.py | wwww666/Tensorflow2.0 | 4df3a3784482bb8db7943ffb402b5822d5111ab9 | [
"Apache-2.0"
] | 2 | 2020-04-24T10:20:18.000Z | 2021-02-25T03:53:07.000Z | 01_demo/MLP_test.py | wwww666/Tensorflow2.0 | 4df3a3784482bb8db7943ffb402b5822d5111ab9 | [
"Apache-2.0"
] | null | null | null | 01_demo/MLP_test.py | wwww666/Tensorflow2.0 | 4df3a3784482bb8db7943ffb402b5822d5111ab9 | [
"Apache-2.0"
] | null | null | null | '''
Relu
'''
import tensorflow as tf
import numpy as np
import sys
sys.path.append("..")
from softmax_test import train_che3
from tensorflow.keras.datasets.fashion_mnist import load_data
#
(x_train,y_train),(x_test,y_test)=load_data()
batch_size=256
x_train=tf.cast(x_train,tf.float32)
x_test=tf.cast(x_test,tf.float32)
x_train=x_train/255.
x_test=x_test/255.
train_iter=tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(batch_size)
test_iter=tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(batch_size)
# Wb
num_inputs,num_outputs,num_hiddens=784,10,256
W1=tf.Variable(tf.random.normal(shape=(num_inputs,num_hiddens),mean=0.0,stddev=0.01,dtype=tf.float32))
b1=tf.Variable(tf.zeros(num_hiddens,dtype=tf.float32))
W2=tf.Variable(tf.random.normal(shape=(num_hiddens,num_outputs),mean=0.0,stddev=0.01,dtype=tf.float32))
b2=tf.Variable(tf.random.normal([num_outputs],stddev=0.1))
# relu
# softmax
#
#
num_epochs,lr=5,0.1
params=[W1,b1,W2,b2]
#
train_che3(net,train_iter,test_iter,loss,num_epochs,batch_size,params,lr)
| 29.08 | 104 | 0.751719 |
22613f6d8ef797b79ce3c0bf426040fa5c8d5f9b | 1,704 | py | Python | ticketnum/ticket_numbering.py | phizzl3/PrintShopScripts | 26cf12d189836907370fd8671ef0d8eba7cd3411 | [
"MIT"
] | 1 | 2021-01-19T20:36:35.000Z | 2021-01-19T20:36:35.000Z | ticketnum/ticket_numbering.py | phizzl3/counter-calculator | 26cf12d189836907370fd8671ef0d8eba7cd3411 | [
"MIT"
] | null | null | null | ticketnum/ticket_numbering.py | phizzl3/counter-calculator | 26cf12d189836907370fd8671ef0d8eba7cd3411 | [
"MIT"
] | null | null | null | """
A simple script for numbering nUp tickets for the print shop.
"""
def numbering_main() -> None:
"""
Gets numbering sequences for nUp ticket numbering.
Gets the total number of tickets requested along with now many will fit on a
sheet (n_up) as well as the starting ticket number and prints the ticket
number groupings to the console.
"""
print('[ Ticket Numbering Assist ]'.center(40))
# Get ticket, sheet and numbering info
total_requested = int(input('\n How many tickets do you need in total?: '))
n_up = int(input(' How many tickets will fit on a sheet?: '))
starting_number = int(input(' What number should we start with?: '))
# Do math & round up if needed
total_sheets = total_requested // n_up
final_tickets = total_requested
if total_requested % n_up > 0:
total_sheets += 1
final_tickets = total_sheets * n_up
# Print totals to the console
print('\n Final totals...')
print(f' Total tickets Printed: {final_tickets}')
print(f' Tickets per sheet: {n_up}')
print(f' Total Sheets needed: {total_sheets}\n')
print(' Here are your numbers...\n')
# Get ending ticket number and set initial display number
ending_number = starting_number + total_sheets - 1
display_number = 1
# Display to console
for i in range(n_up):
print(
f' #{display_number:2}: Starting Number - {starting_number:4} | Ending Number - {ending_number:4}')
starting_number = ending_number + 1
ending_number = starting_number + total_sheets - 1
display_number += 1
input('\n Press ENTER to return...')
if __name__ == '__main__':
numbering_main()
| 33.411765 | 111 | 0.662559 |
2261d6d71d2909cadfc80285de61e3b9d29b7970 | 2,539 | py | Python | emergent ferromagnetism near three-quarters filling in twisted bilayer graphene/scripts/myTerrain.py | aaronsharpe/publication_archives | aabf1a7899b81c43fc27bdd05094f5a84e509e90 | [
"MIT"
] | null | null | null | emergent ferromagnetism near three-quarters filling in twisted bilayer graphene/scripts/myTerrain.py | aaronsharpe/publication_archives | aabf1a7899b81c43fc27bdd05094f5a84e509e90 | [
"MIT"
] | null | null | null | emergent ferromagnetism near three-quarters filling in twisted bilayer graphene/scripts/myTerrain.py | aaronsharpe/publication_archives | aabf1a7899b81c43fc27bdd05094f5a84e509e90 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 15 21:32:17 2017
@author: Aaron Sharpe
"""
import numpy as np
import os
from matplotlib.colors import LinearSegmentedColormap
| 30.590361 | 86 | 0.54549 |
2262f6ba6d8c2278a63ea0e571aa7725d2647bf8 | 11,843 | py | Python | plugin/taskmage2/project/projects.py | willjp/vim-taskmage | adcf809ccf1768753eca4dadaf6279b34e8d5699 | [
"BSD-2-Clause"
] | 1 | 2017-11-28T14:12:03.000Z | 2017-11-28T14:12:03.000Z | plugin/taskmage2/project/projects.py | willjp/vim-taskmage | adcf809ccf1768753eca4dadaf6279b34e8d5699 | [
"BSD-2-Clause"
] | 16 | 2017-08-13T18:01:26.000Z | 2020-11-17T04:55:43.000Z | plugin/taskmage2/project/projects.py | willjp/vim-taskmage | adcf809ccf1768753eca4dadaf6279b34e8d5699 | [
"BSD-2-Clause"
] | null | null | null | import os
import shutil
import tempfile
from taskmage2.utils import filesystem, functional
from taskmage2.asttree import asttree, renderers
from taskmage2.parser import iostream, parsers
from taskmage2.project import taskfiles
def archive_completed(self, filepath=None):
""" Archives all completed task-branches.
Example:
.. code-block:: ReStructuredText
## a,b, and c will be archived
## (entire task-branch completed)
x a
x b
x c
## nothing will be archived
## (task-branch is not entirely completed)
x a
x b
* c
Args:
filepath (str, optional): ``(ex: '/src/project/file.mtask' )``
Optionally, archive completed tasks in a single target file.
"""
if filepath is not None:
self._archive_completed(filepath)
else:
# for every mtask file in the entire project...
raise NotImplementedError('todo - archive completed tasks from all mtask files')
def is_project_path(self, filepath):
""" Test if a file is within this project.
"""
if filepath.startswith('{}/'.format(self.root)):
return True
return False
def is_archived_path(self, filepath):
""" Test if file is an archived mtask file.
"""
if filepath.startswith('{}/.taskmage/'.format(self.root)):
return True
return False
def is_active_path(self, filepath):
""" Test if file is an active (non-archived) mtask file.
"""
if self.is_project_path(filepath) and not self.is_archived_path(filepath):
return True
return False
def get_archived_path(self, filepath):
""" Returns filepath to corresponding archived mtask file's (from un-archived mtask file).
"""
if not self.is_project_path(filepath):
msg = ('filepath not within current taskmage project. \n'
'project "{}"\n'
'filepath "{}\n').format(self.root, filepath)
raise RuntimeError(msg)
if self.is_archived_path(filepath):
return filepath
filepath = filesystem.format_path(filepath)
relpath = filepath[len(self.root) + 1:]
archived_path = '{}/.taskmage/{}'.format(self.root, relpath)
return archived_path
def get_active_path(self, filepath):
""" Returns filepath to corresponding un-archived mtask file (from archived mtask file).
"""
if not self.is_project_path(filepath):
raise RuntimeError(
('filepath not within current taskmage project. \n'
'project "{}"\n'
'filepath "{}\n').format(self.root, filepath)
)
if not self.is_archived_path(filepath):
return filepath
filepath = filesystem.format_path(filepath)
taskdir = '{}/.taskmage'.format(self.root)
relpath = filepath[len(taskdir) + 1:]
active_path = '{}/{}'.format(self.root, relpath)
return active_path
def get_counterpart(self, filepath):
""" Returns active-path if archived-path, or inverse.
"""
if not self.is_project_path(filepath):
raise RuntimeError(
('filepath not within current taskmage project. \n'
'project "{}"\n'
'filepath "{}\n').format(self.root, filepath)
)
if self.is_archived_path(filepath):
return self.get_active_path(filepath)
else:
return self.get_archived_path(filepath)
def filter_taskfiles(self, filters):
""" Returns a list of all taskfiles in project, filtered by provided `filters` .
Args:
filters (list):
List of functions that accepts a :py:obj:`taskmage2.project.taskfiles.TaskFile`
as an argument, and returns True (keep) or False (remove)
Returns:
Iterable:
iterable of project taskfiles (after all filters applied to them).
.. code-block:: python
[
TaskFile('/path/to/todos/file1.mtask'),
TaskFile('/path/to/todos/file2.mtask'),
TaskFile('/path/to/todos/file3.mtask'),
...
]
"""
return functional.multifilter(filters, self.iter_taskfiles())
def _archive_completed(self, filepath):
"""
Args:
filepath (str):
absolute path to a .mtask file.
"""
(active_ast, archive_ast) = self._archive_completed_as_ast(filepath)
archive_path = self.get_archived_path(filepath)
tempdir = tempfile.mkdtemp()
try:
# create tempfile objects
active_taskfile = taskfiles.TaskFile('{}/active.mtask'.format(tempdir))
archive_taskfile = taskfiles.TaskFile('{}/archive.mtask'.format(tempdir))
# write tempfiles
active_taskfile.write(active_ast)
archive_taskfile.write(archive_ast)
# (if successful) overwrite real files
active_taskfile.copyfile(filepath)
archive_taskfile.copyfile(archive_path)
finally:
# delete tempdir
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def _archive_completed_as_ast(self, filepath):
"""
Returns:
.. code-block:: python
(
asttree.AbstractSyntaxTree(), # new active AST
asttree.AbstractSyntaxTree(), # new archive AST
)
"""
# get active AST
active_ast = self._get_mtaskfile_ast(filepath)
# get archive AST
archive_path = self.get_archived_path(filepath)
archive_ast = self._get_mtaskfile_ast(archive_path)
# perform archive
archive_ast = active_ast.archive_completed(archive_ast)
return (active_ast, archive_ast)
def format_rootpath(path):
""" Formats a project-directory path.
Ensures path ends with `.taskmage` dir, and uses forward slashes exclusively.
Returns:
str:
a new formatted path
"""
return functional.pipeline(
path,
[
_ensure_path_ends_with_dot_taskmage,
filesystem.format_path,
]
)
def _ensure_path_ends_with_dot_taskmage(path):
if os.path.basename(path):
return path
return '{}/.taskmage'.format(path)
| 31.248021 | 125 | 0.548679 |
2263a0daf4d65f69a2ef1044b98efa275d27150f | 1,611 | py | Python | discord/ext/vbu/cogs/utils/converters/filtered_user.py | 6days9weeks/Novus | a21157f15d7a07669cb75b3f023bd9eedf40e40e | [
"MIT"
] | 2 | 2022-01-22T16:05:42.000Z | 2022-01-22T16:06:07.000Z | discord/ext/vbu/cogs/utils/converters/filtered_user.py | 6days9weeks/Novus | a21157f15d7a07669cb75b3f023bd9eedf40e40e | [
"MIT"
] | null | null | null | discord/ext/vbu/cogs/utils/converters/filtered_user.py | 6days9weeks/Novus | a21157f15d7a07669cb75b3f023bd9eedf40e40e | [
"MIT"
] | null | null | null | from discord.ext import commands
| 38.357143 | 82 | 0.664184 |
226437962414de4509b79b7a803dd031ebb02932 | 361 | py | Python | py/2017/3B.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | py/2017/3B.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | py/2017/3B.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | a = 289326
coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]
x,y = (0,0)
dx,dy = (1,0)
M = {(x,y):1}
while M[(x, y)] < a:
x, y = x+dx, y+dy
M[(x, y)] = sum([M[(x+ox, y+oy)] for ox,oy in coords if (x+ox,y+oy) in M])
if (x == y) or (x > 0 and x == 1-y) or (x < 0 and x == -y):
dx, dy = -dy, dx
print M[(x, y)] | 24.066667 | 79 | 0.382271 |
226749a06c765ec39cc633d7c553b9c567992420 | 811 | py | Python | q.py | Akatsuki1910/tokuron | 2f5b05dc1c1395f30e738a0d5749ac32d46e5379 | [
"MIT"
] | null | null | null | q.py | Akatsuki1910/tokuron | 2f5b05dc1c1395f30e738a0d5749ac32d46e5379 | [
"MIT"
] | null | null | null | q.py | Akatsuki1910/tokuron | 2f5b05dc1c1395f30e738a0d5749ac32d46e5379 | [
"MIT"
] | null | null | null | """ Q learning """
import numpy as np
import plot
Q = np.array(np.zeros([11, 3]))
GAMMA = 0.9
ALPHA = 0.1
def action_select(s_s):
""" action select """
return np.random.choice([i for i in range(1, 4) if i + s_s < 11])
for i in range(10000):
S_STATE = 0
while S_STATE != 10:
a_state = action_select(S_STATE)
R = 0.001
s_state_dash = S_STATE + a_state
if s_state_dash == 10:
R = -10
else:
s_state_dash = action_select(s_state_dash)+s_state_dash
if s_state_dash == 10:
R = 10
Q[S_STATE, a_state-1] = Q[S_STATE, a_state-1]+ALPHA * \
(R+GAMMA * Q[s_state_dash,
np.argmax(Q[s_state_dash, ])] - Q[S_STATE, a_state-1])
S_STATE = s_state_dash
plot.plot_func(Q)
| 21.918919 | 69 | 0.557337 |
226990cee4efe4dbfe653dc0472db81ab56d2396 | 390 | py | Python | videogame_project/videogame_app/models.py | cs-fullstack-fall-2018/django-form-post1-R3coTh3Cod3r | 3e44b737425fe347757a50f30aa5df021057bfde | [
"Apache-2.0"
] | null | null | null | videogame_project/videogame_app/models.py | cs-fullstack-fall-2018/django-form-post1-R3coTh3Cod3r | 3e44b737425fe347757a50f30aa5df021057bfde | [
"Apache-2.0"
] | null | null | null | videogame_project/videogame_app/models.py | cs-fullstack-fall-2018/django-form-post1-R3coTh3Cod3r | 3e44b737425fe347757a50f30aa5df021057bfde | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils import timezone | 26 | 60 | 0.707692 |
226a5ca3cf4445179f1951c272dd77866530bcb2 | 4,296 | py | Python | tests/test_estimate_r.py | lo-hfk/epyestim | ca2ca928b744f324dade248c24a40872b69a5222 | [
"MIT"
] | 11 | 2021-01-10T22:37:26.000Z | 2022-03-14T10:46:21.000Z | tests/test_estimate_r.py | lo-hfk/epyestim | ca2ca928b744f324dade248c24a40872b69a5222 | [
"MIT"
] | null | null | null | tests/test_estimate_r.py | lo-hfk/epyestim | ca2ca928b744f324dade248c24a40872b69a5222 | [
"MIT"
] | 4 | 2021-03-26T23:43:03.000Z | 2021-11-21T15:16:05.000Z | import unittest
from datetime import date
import numpy as np
import pandas as pd
from numpy.testing import assert_array_almost_equal
from scipy.stats import gamma
from epyestim.estimate_r import overall_infectivity, sum_by_split_dates, estimate_r, gamma_quantiles
if __name__ == '__main__':
unittest.main()
| 34.926829 | 106 | 0.571927 |
226bbbb2f75ccc059e2118af7b3e40bfe68eb6e9 | 3,355 | py | Python | tests/imagenet_classification_test.py | SanggunLee/edgetpu | d3cf166783265f475c1ddba5883e150ee84f7bfe | [
"Apache-2.0"
] | 2 | 2020-05-07T22:34:16.000Z | 2020-09-03T20:30:37.000Z | tests/imagenet_classification_test.py | SanggunLee/edgetpu | d3cf166783265f475c1ddba5883e150ee84f7bfe | [
"Apache-2.0"
] | null | null | null | tests/imagenet_classification_test.py | SanggunLee/edgetpu | d3cf166783265f475c1ddba5883e150ee84f7bfe | [
"Apache-2.0"
] | 1 | 2020-01-08T05:55:58.000Z | 2020-01-08T05:55:58.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests image classification accuracy with ImageNet validation data set.
Please download the validation image data from to edgetpu/test_data/imagenet/
"""
import unittest
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
from . import test_utils
if __name__ == '__main__':
unittest.main()
| 37.696629 | 80 | 0.701937 |
226d9e6adcc58d1700424bf4cff15de32eb71005 | 1,781 | py | Python | 11_testing_best_practices/generate_maze_faster.py | krother/maze_run | 2860198a2af7d05609d043de1b582cc0070aac09 | [
"MIT"
] | 7 | 2017-05-02T12:23:03.000Z | 2020-04-07T07:01:52.000Z | 11_testing_best_practices/generate_maze_faster.py | fengshao007P/maze_run | 2860198a2af7d05609d043de1b582cc0070aac09 | [
"MIT"
] | null | null | null | 11_testing_best_practices/generate_maze_faster.py | fengshao007P/maze_run | 2860198a2af7d05609d043de1b582cc0070aac09 | [
"MIT"
] | 21 | 2016-02-26T10:26:16.000Z | 2021-12-04T23:38:00.000Z |
# Improved version of the code from chapter 03
# created in chapter 11 to accelerate execution
import random
XMAX, YMAX = 19, 16
def create_grid_string(dots, xsize, ysize):
"""
Creates a grid of size (xx, yy)
with the given positions of dots.
"""
grid = ""
for y in range(ysize):
for x in range(xsize):
grid += "." if (x, y) in dots else "#"
grid += "\n"
return grid
def get_all_dot_positions(xsize, ysize):
"""Returns a list of (x, y) tuples covering all positions in a grid"""
return [(x,y) for x in range(1, xsize-1) for y in range(1, ysize-1)]
def get_neighbors(x, y):
"""Returns a list with the 8 neighbor positions of (x, y)"""
return [
(x, y-1), (x, y+1), (x-1, y), (x+1, y),
(x-1, y-1), (x+1, y-1), (x-1, y+1), (x+1, y+1)
]
def generate_dot_positions(xsize, ysize):
"""Creates positions of dots for a random maze"""
positions = get_all_dot_positions(xsize, ysize)
random.shuffle(positions)
dots = set()
for x, y in positions:
neighbors = get_neighbors(x, y)
free = [nb in dots for nb in neighbors]
if free.count(True) < 5:
dots.add((x, y))
return dots
def create_maze(xsize, ysize):
"""Returns a xsize*ysize maze as a string"""
dots = generate_dot_positions(xsize, ysize)
maze = create_grid_string(dots, xsize, ysize)
return maze
if __name__ == '__main__':
dots = set(((1,1), (1,2), (1,3), (2,2), (3,1), (3,2), (3,3)))
print(create_grid_string(dots, 5, 5))
positions = get_all_dot_positions(5, 5)
print(create_grid_string(positions, 5, 5))
neighbors = get_neighbors(3, 2)
print(create_grid_string(neighbors, 5, 5))
maze = create_maze(12, 7)
print(maze)
| 25.811594 | 74 | 0.601909 |
226ee0a94d2c674c5419d2b1671a6c420a52ce80 | 98 | py | Python | flask-backend/create_database.py | amlannandy/OpenMF | da5f474bb3002084f3e5bc9ceb18b32efdf34107 | [
"Apache-2.0"
] | null | null | null | flask-backend/create_database.py | amlannandy/OpenMF | da5f474bb3002084f3e5bc9ceb18b32efdf34107 | [
"Apache-2.0"
] | null | null | null | flask-backend/create_database.py | amlannandy/OpenMF | da5f474bb3002084f3e5bc9ceb18b32efdf34107 | [
"Apache-2.0"
] | null | null | null | from api.models.models import User
from api import db, create_app
db.create_all(app=create_app()) | 24.5 | 34 | 0.806122 |
226f3f6717063fd8afff828ee410784d07c44bf7 | 1,820 | py | Python | src/UQpy/Distributions/baseclass/DistributionContinuous1D.py | marrov/UQpy | b04a267b3080e3d4d38e876547ba0d3b979734f3 | [
"MIT"
] | 132 | 2018-03-13T13:56:33.000Z | 2022-03-21T13:59:17.000Z | src/UQpy/Distributions/baseclass/DistributionContinuous1D.py | marrov/UQpy | b04a267b3080e3d4d38e876547ba0d3b979734f3 | [
"MIT"
] | 140 | 2018-05-21T13:40:01.000Z | 2022-03-29T14:18:01.000Z | src/UQpy/Distributions/baseclass/DistributionContinuous1D.py | marrov/UQpy | b04a267b3080e3d4d38e876547ba0d3b979734f3 | [
"MIT"
] | 61 | 2018-05-02T13:40:05.000Z | 2022-03-06T11:31:21.000Z | import numpy as np
import scipy.stats as stats
from UQpy.Distributions.baseclass.Distribution import Distribution
| 45.5 | 108 | 0.647253 |
226fb3b836b4a323bba46bf26d01dbf892dfb882 | 1,666 | py | Python | bridge/models/basic/layers.py | JTT94/schrodinger_bridge | 71841f2789c180a23d4b4641f160da5c0288a337 | [
"MIT"
] | null | null | null | bridge/models/basic/layers.py | JTT94/schrodinger_bridge | 71841f2789c180a23d4b4641f160da5c0288a337 | [
"MIT"
] | null | null | null | bridge/models/basic/layers.py | JTT94/schrodinger_bridge | 71841f2789c180a23d4b4641f160da5c0288a337 | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
import math
from functools import partial
| 34.708333 | 121 | 0.630852 |
2270789e36e09bf77f3225fa068413436f325de3 | 10,920 | py | Python | chessbot.py | UbiLabsChessbot/tensorflow_chessbot | 5112d9213d0224dc7acc373a7048167b7e6da6ce | [
"MIT"
] | null | null | null | chessbot.py | UbiLabsChessbot/tensorflow_chessbot | 5112d9213d0224dc7acc373a7048167b7e6da6ce | [
"MIT"
] | null | null | null | chessbot.py | UbiLabsChessbot/tensorflow_chessbot | 5112d9213d0224dc7acc373a7048167b7e6da6ce | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Finds submissions with chessboard images in them,
# use a tensorflow convolutional neural network to predict pieces and return
# a lichess analysis link and FEN diagram of chessboard
import praw
import collections
import os
import time
from datetime import datetime
from praw.helpers import submission_stream
import requests
import socket
import re
from helper_functions_chessbot import *
import auth_config # for PRAW
import tensorflow_chessbot # For neural network model
#########################################################
# Setup
# Set up praw
chess_fen_bot = "ChessFenBot"
# Login
r = praw.Reddit(auth_config.USER_AGENT)
# Login old-style due to Reddit politics
r.login(auth_config.USERNAME, auth_config.PASSWORD, disable_warning=True)
# Get accessor to subreddit
subreddit = r.get_subreddit('chess+chessbeginners+AnarchyChess+betterchess')
# How many submissions to read from initially
submission_read_limit = 100
# How long to wait after replying to a post before continuing
reply_wait_time = 10 # minimum seconds to wait between replies, will also rate-limit safely
# Filename containing list of submission ids that
# have already been processed, updated at end of program
processed_filename = "submissions_already_processed.txt"
# Submissions computer vision or prediction failed on
failures_filename = "submission_failures.txt"
# All responses id, fen + certainty
responses_filename = "submission_responses.txt"
# Response message template
message_template = """[ _ ]^*
I attempted to generate a [chessboard layout]({unaligned_fen_img_link}) from the posted image,
with a certainty of **{certainty:.3f}%**. *{pithy_message}*
-
White to play : [Analysis]({lichess_analysis_w}) | [Editor]({lichess_editor_w})
`{fen_w}`
-
Black to play : [Analysis]({lichess_analysis_b}) | [Editor]({lichess_editor_b})
`{fen_b}`
-
> Links for when pieces are inverted on the board:
>
> White to play : [Analysis]({inverted_lichess_analysis_w}) | [Editor]({inverted_lichess_editor_w})
> `{inverted_fen_w}`
>
> Black to play : [Analysis]({inverted_lichess_analysis_b}) | [Editor]({inverted_lichess_editor_b})
> `{inverted_fen_b}`
-
---
^(Yes I am a machine learning bot | )
[^(`How I work`)](http://github.com/Elucidation/tensorflow_chessbot 'Must go deeper')
^( | Reply with a corrected FEN to add to my next training dataset)
"""
#########################################################
# ChessBot Message Generation Functions
def isPotentialChessboardTopic(sub):
"""if url is imgur link, or url ends in .png/.jpg/.gif"""
if sub.url == None:
return False
return ('imgur' in sub.url
or any([sub.url.lower().endswith(ending) for ending in ['.png', '.jpg', '.gif']]))
def generateMessage(fen, certainty, side):
"""Generate response message using FEN, certainty and side for flipping link order"""
vals = {} # Holds template responses
# Things that don't rely on black/white to play
# FEN image link is aligned with screenshot, not side to play
vals['unaligned_fen_img_link'] = 'http://www.fen-to-image.com/image/30/%s.png' % fen
vals['certainty'] = certainty*100.0 # to percentage
vals['pithy_message'] = getPithyMessage(certainty)
if side == 'b':
# Flip FEN if black to play, assumes image is flipped
fen = invert(fen)
inverted_fen = invert(fen)
# Get castling status based on pieces being in initial positions or not
castle_status = getCastlingStatus(fen)
inverted_castle_status = getCastlingStatus(inverted_fen)
# Fill out template and return
vals['fen_w'] = "%s w %s -" % (fen, castle_status)
vals['fen_b'] = "%s b %s -" % (fen, castle_status)
vals['inverted_fen_w'] = "%s w %s -" % (inverted_fen, inverted_castle_status)
vals['inverted_fen_b'] = "%s b %s -" % (inverted_fen, inverted_castle_status)
vals['lichess_analysis_w'] = 'http://www.lichess.org/analysis/%s_w_%s' % (fen, castle_status)
vals['lichess_analysis_b'] = 'http://www.lichess.org/analysis/%s_b_%s' % (fen, castle_status)
vals['lichess_editor_w'] = 'http://www.lichess.org/editor/%s_w_%s' % (fen, castle_status)
vals['lichess_editor_b'] = 'http://www.lichess.org/editor/%s_b_%s' % (fen, castle_status)
vals['inverted_lichess_analysis_w'] = 'http://www.lichess.org/analysis/%s_w_%s' % (inverted_fen, inverted_castle_status)
vals['inverted_lichess_analysis_b'] = 'http://www.lichess.org/analysis/%s_b_%s' % (inverted_fen, inverted_castle_status)
vals['inverted_lichess_editor_w'] = 'http://www.lichess.org/editor/%s_w_%s' % (inverted_fen, inverted_castle_status)
vals['inverted_lichess_editor_b'] = 'http://www.lichess.org/editor/%s_b_%s' % (inverted_fen, inverted_castle_status)
return message_template.format(**vals)
#########################################################
# PRAW Helper Functions
def waitWithComments(sleep_time, segment=60):
"""Sleep for sleep_time seconds, printing to stdout every segment of time"""
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
while sleep_time > segment:
time.sleep(segment) # sleep in increments of 1 minute
sleep_time -= segment
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
time.sleep(sleep_time)
logInfoPerSubmission.last = time.time() # 'static' variable
#########################################################
# Main Script
# Track commend ids that have already been processed successfully
# Load list of already processed comment ids
already_processed = loadProcessed()
print("%s - Starting with %d already processed\n==========\n\n" % (datetime.now(), len(already_processed)))
count = 0
count_actual = 0
running = True
# Start up Tensorflow CNN with trained model
predictor = tensorflow_chessbot.ChessboardPredictor()
while running:
# get submission stream
try:
submissions = submission_stream(r, subreddit, limit=submission_read_limit)
# for each submission
for submission in submissions:
count += 1
# print out some debug info
is_processed = submission.id in already_processed
logInfoPerSubmission(submission, count, count_actual, is_processed)
# Skip if already processed
if is_processed:
continue
# check if submission title is a question
if isPotentialChessboardTopic(submission):
# Use CNN to make a prediction
print("\n---\nImage URL: %s" % submission.url)
fen, certainty = predictor.makePrediction(submission.url)
if fen is None:
print("> %s - Couldn't generate FEN, skipping..." % datetime.now())
# update & save list
already_processed.add(submission.id)
saveProcessed(already_processed)
addSubmissionToFailures(submission)
print("\n---\n")
continue
fen = shortenFEN(fen) # ex. '111pq11r' -> '3pq2r'
print("Predicted FEN: %s" % fen)
print("Certainty: %.4f%%" % (certainty*100))
# Get side from title or fen
side = getSideToPlay(submission.title, fen)
# Generate response message
msg = generateMessage(fen, certainty, side)
print("fen: %s\nside: %s\n" % (fen, side))
# respond, keep trying till success
while True:
try:
print("> %s - Responding to %s: %s" % (datetime.now(), submission.id, submission))
# Reply with comment
submission.add_comment(msg)
# update & save list
already_processed.add(submission.id)
saveProcessed(already_processed)
addSubmissionToResponses(submission, fen, certainty, side)
count_actual += 1
print("\n---\n")
# Wait after submitting to not overload
waitWithComments(reply_wait_time)
break
except praw.errors.AlreadySubmitted as e:
print("> %s - Already submitted skipping..." % datetime.now())
break
except praw.errors.RateLimitExceeded as e:
print("> {} - Rate Limit Error for commenting on {}, sleeping for {} before retrying...".format(datetime.now(), submission.id, e.sleep_time))
waitWithComments(e.sleep_time)
# Handle errors
except (socket.error, requests.exceptions.ReadTimeout, requests.packages.urllib3.exceptions.ReadTimeoutError, requests.exceptions.ConnectionError) as e:
print("> %s - Connection error, resetting accessor, waiting 30 and trying again: %s" % (datetime.now(), e))
# saveProcessed(already_processed)
time.sleep(30)
continue
except Exception as e:
print("Unknown Error, continuing after 30:",e)
time.sleep(30)
continue
except KeyboardInterrupt:
print("Exiting...")
running = False
finally:
saveProcessed(already_processed)
print("%s - %d Processed total." % (datetime.now(),len(already_processed)))
print("%s - Program Ended. %d replied / %d read in this session" % (datetime.now(), count_actual, count))
| 36.891892 | 154 | 0.677747 |
2271553668c1d9c135110d311fde305c56e23bd6 | 1,557 | py | Python | Tools/english_word/src/spider.py | pynickle/awesome-python-tools | e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07 | [
"BSD-2-Clause"
] | 21 | 2019-06-02T01:55:14.000Z | 2022-01-08T22:35:31.000Z | Tools/english_word/src/spider.py | code-nick-python/awesome-daily-tools | e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07 | [
"BSD-2-Clause"
] | 3 | 2019-06-02T01:55:17.000Z | 2019-06-14T12:32:06.000Z | Tools/english_word/src/spider.py | code-nick-python/awesome-daily-tools | e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07 | [
"BSD-2-Clause"
] | 16 | 2019-06-23T13:00:04.000Z | 2021-09-18T06:09:58.000Z | import requests
import re
import time
import random
import pprint
import os
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3858.0 Safari/537.36"}
if __name__ == "__main__":
main()
| 32.4375 | 173 | 0.552987 |
22727c318ff129b6243d715f6523dbfa7a528208 | 769 | py | Python | NFCow/malls/migrations/0001_initial.py | jojoriveraa/titulacion-NFCOW | 643f7f2cbe9c68d9343f38d12629720b12e9ce1e | [
"Apache-2.0"
] | null | null | null | NFCow/malls/migrations/0001_initial.py | jojoriveraa/titulacion-NFCOW | 643f7f2cbe9c68d9343f38d12629720b12e9ce1e | [
"Apache-2.0"
] | 11 | 2016-01-09T06:27:02.000Z | 2016-01-10T05:21:05.000Z | NFCow/malls/migrations/0001_initial.py | jojoriveraa/titulacion-NFCOW | 643f7f2cbe9c68d9343f38d12629720b12e9ce1e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 08:44
from __future__ import unicode_literals
from django.db import migrations, models
| 27.464286 | 87 | 0.572172 |
22749bf06e02c8354fb9677be5d2215d3d9afe0c | 16,406 | py | Python | mmdnn/conversion/examples/tensorflow/extractor.py | ferriswym/MMdnn | dc204cdba58a6cba079816715ac766d94bd87732 | [
"MIT"
] | null | null | null | mmdnn/conversion/examples/tensorflow/extractor.py | ferriswym/MMdnn | dc204cdba58a6cba079816715ac766d94bd87732 | [
"MIT"
] | null | null | null | mmdnn/conversion/examples/tensorflow/extractor.py | ferriswym/MMdnn | dc204cdba58a6cba079816715ac766d94bd87732 | [
"MIT"
] | null | null | null | #----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import tensorflow as tf
from tensorflow.contrib.slim.nets import vgg
from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.slim.nets import resnet_v1
from tensorflow.contrib.slim.nets import resnet_v2
from mmdnn.conversion.examples.tensorflow.models import inception_resnet_v2
from mmdnn.conversion.examples.tensorflow.models import mobilenet_v1
from mmdnn.conversion.examples.tensorflow.models import nasnet
from mmdnn.conversion.examples.tensorflow.models.mobilenet import mobilenet_v2
from mmdnn.conversion.examples.tensorflow.models import inception_resnet_v1
from mmdnn.conversion.examples.tensorflow.models import test_rnn
slim = tf.contrib.slim
from mmdnn.conversion.examples.imagenet_test import TestKit
from mmdnn.conversion.examples.extractor import base_extractor
from mmdnn.conversion.common.utils import download_file
# https://github.com/tensorflow/tensorflow/issues/24496
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
| 50.018293 | 146 | 0.581007 |
22752bc484df5df4799f2011d63b6f5871537908 | 2,546 | py | Python | nostrint/command_line.py | zevtyardt/no-strint | 47583d55e3c4cd12f00f46902d2fd7d5138c3275 | [
"MIT"
] | 13 | 2019-03-13T04:14:45.000Z | 2020-04-05T09:13:21.000Z | nostrint/command_line.py | zevtyardt/no-strint | 47583d55e3c4cd12f00f46902d2fd7d5138c3275 | [
"MIT"
] | null | null | null | nostrint/command_line.py | zevtyardt/no-strint | 47583d55e3c4cd12f00f46902d2fd7d5138c3275 | [
"MIT"
] | 6 | 2019-03-22T04:48:59.000Z | 2020-08-07T17:09:20.000Z | from redat import __version__
import argparse as _argparse
import sys as _sys
| 72.742857 | 193 | 0.710919 |
22754cccad56e7c435e32fdb50e3fc9c09afbc92 | 15,435 | py | Python | simple_lib.py | rcmorehead/simplanets | 3d9b3d1273a4f1a32ce656bdf5e9d6c6c38e3f7b | [
"MIT"
] | null | null | null | simple_lib.py | rcmorehead/simplanets | 3d9b3d1273a4f1a32ce656bdf5e9d6c6c38e3f7b | [
"MIT"
] | null | null | null | simple_lib.py | rcmorehead/simplanets | 3d9b3d1273a4f1a32ce656bdf5e9d6c6c38e3f7b | [
"MIT"
] | null | null | null | """
Useful classes and functions for SIMPLE.
"""
import numpy as np
import warnings
import math
from scipy import integrate
r_sun_au = 0.004649
r_earth_r_sun = 0.009155
day_hrs = 24.0
#@profile
def impact_parameter(a, e, i, w, r_star):
"""
Compute the impact parameter at for a transiting planet.
Parameters
----------
a : int, float or numpy array
Semimajor axis of planet's orbit in AU
e : int, float or numpy array
Eccentricity of planet. WARNING! This function breaks down at
high eccentricity (>> 0.9), so be careful!
i : int, float or numpy array
Inclination of planet in degrees. 90 degrees is edge-on.
w : int, float or numpy array
Longitude of ascending node defined with respect to sky-plane.
r_star : int, float or numpy array
Radius of star in solar radii.
Returns
-------
b : float or numpy array
The impact parameter, ie transit latitude in units of stellar radius.
Examples
--------
>>> impact_parameter(1, 0, 90, 0, 1)
1.3171077641937547e-14
>>> a = np.linspace(.1, 1.5, 3)
>>> e = np.linspace(0, .9, 3)
>>> i = np.linspace(89, 91, 3)
>>> w = np.linspace(0, 360, 3)
>>> r_star = np.linspace(0.1, 10, 3)
>>> impact_parameter(a, e, i, w, r_star)
array([ 3.75401300e+00, 1.66398961e-15, 1.06989371e-01])
Notes
-----
Using Eqn. (7), Chap. 4, Page 56 of Exoplanets, edited by S. Seager.
Tucson, AZ: University of Arizona Press, 2011, 526 pp.
ISBN 978-0-8165-2945-2.
"""
return abs(a/(r_star * r_sun_au) * np.cos(np.radians(i)) *
(1 - e**2) / (1 + e * np.sin(np.radians(w))))
#@profile
def inclination(fund_plane, mutual_inc, node):
"""
Compute the inclination of a planet.
Uses the law a spherical cosines to compute the sky plane of a orbit
given a reference plane inclination, angle from reference plane (ie mutual
inclination) and a nodal angle.
Parameters
----------
fund_plane: int, float or numpy array
Inclination of of the fundamental plane of the system in degrees with
respect to the sky plane 90 degrees is edge-on.
mutual_inc : int, float or numpy array
Angle in degrees of the orbital plane of the planet with respect to the
fundamental plane of the system.
node : int, float or numpy array
Rotation in degrees of the planet's orbit about the perpendicular of
the reference plane. I.e. the longitude of the node with respect to the
reference plane.
Returns
-------
i : float or numpy array
The inclination of the planet's orbit with respect to the sky plane.
Examples
--------
>>> inclination(90, 3, 0)
87.0
>>> fun_i = np.linspace(80, 110, 3)
>>> mi = np.linspace(0, 10, 3)
>>> node = np.linspace(30,100,3)
>>> inclination(fun_i, mi, node)
array([ 80. , 92.87347869, 111.41738591])
Notes
-----
See eqn. () in
"""
fund_plane = np.radians(fund_plane)
mutual_inc = np.radians(mutual_inc)
node = np.radians(node)
return np.degrees(np.arccos(np.cos(fund_plane) * np.cos(mutual_inc) +
np.sin(fund_plane) * np.sin(mutual_inc) * np.cos(node)))
#@profile
def semimajor_axis(period, mass):
"""
Compute the semimajor axis of an object.
This is a simple implementation of the general form Kepler's Third law.
Parameters
----------
period : int, float or numpy array
The orbital period of the orbiting body in units of days.
mass : int, float or array-like
The mass of the central body (or mass sum) in units of solar mass.
Returns
-------
a : float or numpy array
The semimajor axis in AU.
Examples
--------
>>> semimajor_axis(365.256363,1.00)
0.999985270598628
>>> semimajor_axis(np.linspace(1, 1000, 5),np.linspace(0.08, 4, 5))
array([ 0.00843254, 0.7934587 , 1.56461631, 2.33561574, 3.10657426])
"""
return (((2.959E-4*mass)/(4*np.pi**2))*period**2.0) ** (1.0/3.0)
#@profile
def transit_depth(r_star, r_planet):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
return ((r_planet * r_earth_r_sun)/r_star)**2 * 1e6
#@profile
def transit_duration(p, a, e, i, w, b, r_star, r_planet):
"""
Compute the full (Q1-Q4) transit duration.
Full description
Parameters
----------
p : int, float or numpy array
Period of planet orbit in days
a : int, float or numpy array
Semimajor axis of planet's orbit in AU
e : int, float or numpy array
Eccentricity of planet. WARNING! This function breaks down at
high eccentricity (>> 0.9), so be careful!
i : int, float or numpy array
Inclination of planet in degrees. 90 degrees is edge-on.
w : int, float or numpy array
Longitude of ascending node defined with respect to sky-plane.
b : int, float or numpy array
Impact parameter of planet.
r_star : int, float or numpy array
Radius of star in solar radii.
r_planet : int, float or numpy array
Radius of planet in Earth radii
Returns
-------
T : float or numpy array
The Q1-Q4 (full) transit duration of the planet in hours.
Examples
--------
Notes
-----
Using Eqns. (15) and (16), Chap. 4, Page 58 of Exoplanets, edited by S.
Seager. Tucson, AZ: University of Arizona Press, 2011, 526 pp.
ISBN 978-0-8165-2945-2.
"""
#TODO Make this robust against b > 1
#warnings.simplefilter("always")
#print "pars", p, a, e, i, w, b, r_star, r_planet
#print ""
#print (1 - (r_planet * r_earth_r_sun) / r_star)**2 - b**2
#print (1 - e**2)
#print ""
duration = np.where(e < 1.0, (p / np.pi *
np.arcsin((r_star * r_sun_au) / a * 1 / np.sin(np.radians(i)) *
np.sqrt((1 - (r_planet * r_earth_r_sun) / r_star)**2
- b**2)) *
1 / (1 + e*np.sin(np.radians(w))) * np.sqrt(1 - e**2)) * day_hrs, 0)
return duration
#@profile
def snr(catalog):
"""
Calculate Signal to Noise ratio for a planet transit
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
return catalog['depth']/catalog['cdpp6'] * np.sqrt((catalog['days_obs'] /
catalog['period']) *
catalog['T']/6.0)
#@profile
def xi(catalog):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
catalog.sort(order=['ktc_kepler_id', 'period'])
p_in = np.roll(catalog['period'], 1)
t_in = np.roll(catalog['T'], 1)
kic_id = np.roll(catalog['ktc_kepler_id'], 1)
idx = np.where(catalog['ktc_kepler_id'] == kic_id)
P_ratio = catalog['period'][idx]/p_in[idx]
D_ratio = t_in[idx]/catalog['T'][idx]
#idx = np.where(P_ratio >= 1.0)
#print P_ratio
logxi = np.log10(D_ratio * P_ratio**(1./3.))
if logxi.size < 1:
xi_fraction = 0.0
else:
xi_fraction = logxi[logxi >= 0.0].size/float(logxi.size)
return logxi, xi_fraction
#@profile
def multi_count(catalog, stars):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
count = np.zeros(stars['ktc_kepler_id'].size)
bincount = np.bincount(catalog['ktc_kepler_id'])
bincount = bincount[bincount > 0]
count[:bincount.size] = bincount
return count
#@profile
def duration_anomaly(catalog):
"""
Returns T/T_nu where T is the transit duration and T_nu is the
duration for a e = 0, b = 0 transit.
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
catalog['T_nu'] = (catalog['T'] /
((catalog['radius'] * r_sun_au * catalog['period'])
/(np.pi * catalog['a']) * day_hrs))
return catalog
#@profile
def normed_duration(catalog):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
return (catalog['T']/day_hrs)/(catalog['period'])**(1/3.0)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in np.arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in np.arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
logp : float
The log (ln) of an approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / np.arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in np.arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in np.arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
return A2
def hellinger_funct(x,P,Q):
"""
P,Q should be numpy stats gkde objects
"""
return np.sqrt(P(x) * Q(x))
def hellinger_cont(P,Q):
"""
P,Q should be numpy stats gkde objects
F should be the hellinger_funct method
"""
return 1 - integrate.quad(hellinger_funct, -np.inf, np.inf, args=(P,Q))[0]
def hellinger_disc(P,Q):
"""
P,Q should be numpy histogram objects that have density=True
"""
if P[0].size == Q[0].size:
pass
else:
if P[0].size > Q[0].size:
Q[0].resize(P[0].size)
else:
P[0].resize(Q[0].size)
return 1 - np.sum(np.sqrt(P[0]*Q[0]))
| 27.464413 | 94 | 0.579981 |
2275883293755489ec49ada67afba2a65cceb970 | 179 | py | Python | y10m/join.lines.py | goodagood/story | 99dd959f4be44070144fe87313cf51595d928a11 | [
"Apache-2.0"
] | 3 | 2019-12-03T02:08:55.000Z | 2021-05-30T14:02:21.000Z | y10m/join.lines.py | goodagood/story | 99dd959f4be44070144fe87313cf51595d928a11 | [
"Apache-2.0"
] | null | null | null | y10m/join.lines.py | goodagood/story | 99dd959f4be44070144fe87313cf51595d928a11 | [
"Apache-2.0"
] | 1 | 2020-08-07T23:09:45.000Z | 2020-08-07T23:09:45.000Z |
#inputFile = 'sand.407'
inputFile = 'sand.407'
outputFile= 'sand.out'
with open(inputFile) as OF:
lines = OF.readlines()
print(lines[0:3])
| 11.1875 | 27 | 0.631285 |
227598bb20ab74c029eb76f22348999ce40f32c0 | 718 | py | Python | web_programming/fetch_github_info.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 14 | 2020-10-03T05:43:48.000Z | 2021-11-01T21:02:26.000Z | web_programming/fetch_github_info.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 3 | 2020-06-08T07:03:15.000Z | 2020-06-08T08:41:22.000Z | web_programming/fetch_github_info.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 12 | 2020-10-03T05:44:19.000Z | 2022-01-16T05:37:54.000Z | #!/usr/bin/env python3
"""
Created by sarathkaul on 14/11/19
Basic authentication using an API password is deprecated and will soon no longer work.
Visit https://developer.github.com/changes/2020-02-14-deprecating-password-auth
for more information around suggested workarounds and removal dates.
"""
import requests
_GITHUB_API = "https://api.github.com/user"
def fetch_github_info(auth_user: str, auth_pass: str) -> dict:
"""
Fetch GitHub info of a user using the requests module
"""
return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()
if __name__ == "__main__":
for key, value in fetch_github_info("<USER NAME>", "<PASSWORD>").items():
print(f"{key}: {value}")
| 26.592593 | 86 | 0.71727 |
2275d1ae20d552ba2f46265e141e463daa5307b3 | 1,362 | py | Python | ACM ICPC/Sorting/Merge Sort/Python/Merge_Sort.py | shreejitverma/GeeksforGeeks | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 2 | 2022-02-18T05:14:28.000Z | 2022-03-08T07:00:08.000Z | ACM ICPC/Sorting/Merge Sort/Python/Merge_Sort.py | shivaniverma1/Competitive-Programming-1 | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 6 | 2022-01-13T04:31:04.000Z | 2022-03-12T01:06:16.000Z | ACM ICPC/Sorting/Merge Sort/Python/Merge_Sort.py | shivaniverma1/Competitive-Programming-1 | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 2 | 2022-02-14T19:53:53.000Z | 2022-02-18T05:14:30.000Z |
if __name__ == "__main__":
i = MergeSort([5, 4, 3, 2, 1])
i.sort()
print(i.show())
| 30.266667 | 68 | 0.509545 |
2277a209f052632755ba80cff0004cf66a4c0551 | 3,952 | py | Python | nightly.py | insolar/insolar-jepsen | f95e05fdf0b3d28756f60de9aef1b8c44ef0d030 | [
"Apache-2.0"
] | 6 | 2019-03-26T10:02:54.000Z | 2019-09-13T15:31:39.000Z | nightly.py | insolar/insolar-jepsen | f95e05fdf0b3d28756f60de9aef1b8c44ef0d030 | [
"Apache-2.0"
] | 17 | 2019-06-04T10:55:42.000Z | 2020-03-10T09:22:52.000Z | nightly.py | insolar/insolar-jepsen | f95e05fdf0b3d28756f60de9aef1b8c44ef0d030 | [
"Apache-2.0"
] | 3 | 2019-11-22T10:41:00.000Z | 2021-02-18T12:03:38.000Z | #!/usr/bin/env python3
# vim: set ai et ts=4 sw=4:
import os
import subprocess
import argparse
import time
import calendar
import re
parser = argparse.ArgumentParser(
description='Run nightly Insolar Jepsen-like tests')
parser.add_argument(
'-b', '--branch', metavar='B', type=str, default='master',
help='git branch name (default: master)')
parser.add_argument(
'-r', '--repeat', metavar='N', type=int, default=100,
help='number of times to repeat tests (default: 100)')
parser.add_argument(
'-c', '--channel', metavar='C', type=str, default='#dev-backend',
help='slack channel (default: #dev-backend)')
parser.add_argument(
'-e', '--emoji', metavar='E', type=str, default='aphyr',
help='message emoji (default: aphyr)')
parser.add_argument(
'-s', '--slack', metavar='H', type=str, required=True,
help='slack hook string (it looks like base64 string)')
parser.add_argument(
'-l', '--logdir', metavar='DIR', type=str, required=True,
help='path to the directory where logfiles will be saved')
parser.add_argument(
'-u', '--url', metavar='URL', type=str, required=True,
help='URL where saved logfiles will be accessible')
args = parser.parse_args()
tests_passed = False
date = "FAILED_TO_GET_DATE"
try:
date = get_output('date +%Y%m%d%H%M00')
except Exception as e:
print("ERROR:")
print(str(e))
logfile_name = 'jepsen-' + date + '.txt'
logfile_fullname = args.logdir + '/' + logfile_name
try:
run('echo "=== BUILDING BRANCH '+args.branch +
' ===" | tee -a '+logfile_fullname)
run('./build-docker.py '+args.branch+' 2>&1 | tee -a '+logfile_fullname)
run('echo "==== RUNNING TESTS '+str(args.repeat) +
' TIMES ===" | tee -a '+logfile_fullname)
run('./run-test.py -i insolar-jepsen:latest -r ' +
str(args.repeat)+' 2>&1 | tee -a '+logfile_fullname)
tests_passed = True
except Exception as e:
print("ERROR:")
print(str(e))
podlogs_name = 'jepsen-' + date + '.tgz'
podlogs_fullname = args.logdir + '/' + podlogs_name
try:
run('echo "=== AGGREGATING LOGS TO ' +
podlogs_fullname+' ===" | tee -a '+logfile_fullname)
run('./aggregate-logs.py /tmp/jepsen-'+date)
run('gunzip /tmp/jepsen-'+date+'/*/*.log.gz || true')
run('tar -cvzf '+podlogs_fullname+' /tmp/jepsen-'+date)
run('rm -r /tmp/jepsen-'+date)
run('echo "=== CLEANING UP '+args.logdir+' ===" | tee -a '+logfile_fullname)
now = int(time.time())
os.chdir(args.logdir)
for fname in os.listdir("."):
m = re.search("jepsen-(\d{4}\d{2}\d{2})", fname)
if m is None:
run(' echo "File: ' + fname + ' - skipped" | tee -a '+logfile_fullname)
continue
ftime = calendar.timegm(time.strptime(m.group(1), "%Y%m%d"))
ndays = int((now - ftime) / (60 * 60 * 24))
delete = ndays > 15
run(' echo "File: ' + fname + ', ndays: ' + str(ndays) +
', delete: ' + str(delete) + '" | tee -a '+logfile_fullname)
if delete:
os.unlink(fname)
except Exception as e:
print("ERROR:")
print(str(e))
print("Test passed: "+str(tests_passed))
message = 'PASSED' if tests_passed else 'FAILED'
message = 'Nightly Jepsen-like tests '+message +\
'. Log: '+args.url+'/'+logfile_name +\
' Pod logs: '+args.url+'/'+podlogs_name
cmd = 'curl -X POST --data-urlencode \'payload={"channel": "'+args.channel +\
'", "username": "aphyr", "text": "'+message +\
'", "icon_emoji": ":'+args.emoji +\
':"}\' https://hooks.slack.com/services/'+args.slack
print("EXECUTING: "+cmd)
run(cmd)
| 34.365217 | 83 | 0.605263 |
2277e005db07cac1472613b25b5759c8831551c6 | 7,195 | py | Python | ecart/ecart.py | micael-grilo/E-Cart | 76e86b4c7ea5bd2becda23ef8c69470c86630c5e | [
"MIT"
] | null | null | null | ecart/ecart.py | micael-grilo/E-Cart | 76e86b4c7ea5bd2becda23ef8c69470c86630c5e | [
"MIT"
] | null | null | null | ecart/ecart.py | micael-grilo/E-Cart | 76e86b4c7ea5bd2becda23ef8c69470c86630c5e | [
"MIT"
] | null | null | null | import redis
import copy
from functools import wraps
from .exception import ErrorMessage
from .decorators import raise_exception
from .serializer import Serializer
TTL = 604800
def __get_user_redis_key_prefix(self):
"""
Generate the prefix for the user's redis key.
"""
return ":".join([self.__redis_user_hash_token, "USER_ID"])
def __get_user_redis_key(self, user_id):
"""
Generates the name of the Hash used for storing User cart in Redis
"""
if user_id:
return self.__get_user_redis_key_prefix() + ":"+str(user_id)
else:
raise ErrorMessage("user_id can't be null")
def __get_raw_cart(self):
return self.redis_connection.hgetall(
self.user_redis_key)
def __quantities(self):
return map(lambda product_dict: product_dict.get('quantity'), self.get_product_dicts())
def __product_price(self, product_dict):
"""
Returns the product of product_quantity and its unit_cost
"""
return product_dict['quantity'] * product_dict['unit_cost']
def __price_list(self):
"""
Returns the list of product's total_cost
"""
return map(lambda product_dict: self.__product_price(product_dict), self.get_product_dicts())
def __del__(self):
"""
Deletes the user's cart
"""
self.redis_connection.delete(self.user_redis_key)
| 34.927184 | 120 | 0.624739 |
2278e9e4e492d486947a4dea8110d0c980581f65 | 1,172 | py | Python | app/tests/test_transaction.py | geometry-labs/icon-filter-registration | 5ac93268465a529be453a51447805a65f2e23415 | [
"Apache-2.0"
] | null | null | null | app/tests/test_transaction.py | geometry-labs/icon-filter-registration | 5ac93268465a529be453a51447805a65f2e23415 | [
"Apache-2.0"
] | 1 | 2021-03-02T22:41:58.000Z | 2021-03-11T16:44:26.000Z | app/tests/test_transaction.py | geometry-labs/icon-filter-registration | 5ac93268465a529be453a51447805a65f2e23415 | [
"Apache-2.0"
] | null | null | null | import pytest
from app.main import app
from app.models import TransactionRegistration
from app.settings import settings
from httpx import AsyncClient
from tests.conftest import RequestCache
registration = TransactionRegistration(
to_address="cx0000000000000000000000000000000000000001",
from_address="cx0000000000000000000000000000000000000002",
)
| 28.585366 | 69 | 0.729522 |
22793242be75fa797dece7e56ce733139032b7be | 33,565 | py | Python | oslo_messaging/tests/rpc/test_server.py | ox12345/oslo.messaging | bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c | [
"Apache-1.1"
] | null | null | null | oslo_messaging/tests/rpc/test_server.py | ox12345/oslo.messaging | bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c | [
"Apache-1.1"
] | null | null | null | oslo_messaging/tests/rpc/test_server.py | ox12345/oslo.messaging | bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c | [
"Apache-1.1"
] | null | null | null |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import warnings
import eventlet
import fixtures
from oslo_config import cfg
from six.moves import mock
import testscenarios
import oslo_messaging
from oslo_messaging import rpc
from oslo_messaging.rpc import dispatcher
from oslo_messaging.rpc import server as rpc_server_module
from oslo_messaging import server as server_module
from oslo_messaging.tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
def test_no_target_server(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
server = oslo_messaging.get_rpc_server(
transport,
oslo_messaging.Target(topic='testtopic'),
[])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testtopic', ex.target.topic)
else:
self.assertTrue(False)
def test_no_server_topic(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
target = oslo_messaging.Target(server='testserver')
server = oslo_messaging.get_rpc_server(transport, target, [])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testserver', ex.target.server)
else:
self.assertTrue(False)
class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin):
_exchanges = [
('same_exchange', dict(exchange1=None, exchange2=None)),
('diff_exchange', dict(exchange1='x1', exchange2='x2')),
]
_topics = [
('same_topic', dict(topic1='t', topic2='t')),
('diff_topic', dict(topic1='t1', topic2='t2')),
]
_server = [
('same_server', dict(server1=None, server2=None)),
('diff_server', dict(server1='s1', server2='s2')),
]
_fanout = [
('not_fanout', dict(fanout1=None, fanout2=None)),
('fanout', dict(fanout1=True, fanout2=True)),
]
_method = [
('call', dict(call1=True, call2=True)),
('cast', dict(call1=False, call2=False)),
]
_endpoints = [
('one_endpoint',
dict(multi_endpoints=False,
expect1=['ds1', 'ds2'],
expect2=['ds1', 'ds2'])),
('two_endpoints',
dict(multi_endpoints=True,
expect1=['ds1'],
expect2=['ds2'])),
]
TestMultipleServers.generate_scenarios()
| 36.404555 | 79 | 0.601847 |
227a7fa4744296865be9c842b020f4d289542d47 | 3,683 | py | Python | 3.03-pdDataOps.py | pgiardiniere/notes-PythonDataScienceHandbook | ddb6662d2fbeedd5b6b09ce4d8ddee55813ec589 | [
"MIT"
] | 2 | 2019-05-01T02:23:02.000Z | 2019-05-04T03:26:39.000Z | 3.03-pdDataOps.py | pgiardiniere/notes-PythonDataScienceHandbook | ddb6662d2fbeedd5b6b09ce4d8ddee55813ec589 | [
"MIT"
] | null | null | null | 3.03-pdDataOps.py | pgiardiniere/notes-PythonDataScienceHandbook | ddb6662d2fbeedd5b6b09ce4d8ddee55813ec589 | [
"MIT"
] | null | null | null | # Recall material on NP Universal Functions from Ch2
# PD builds on ufuncs functionality a few ways:
# first, for unary operations (negation / trig funcs), ufuncs preserve
# index and column labels in the output
# second, for binary operations (addition / multiplication) PD aligns
# indices when passing objects to the ufunc
# the automatic handling of these makes error-prone NP ufuncs, PD-bulletproof
# additionally, there are operations when crossing Series/DataFrame structs
##############################
### Ufuncs: Index Preservation
# As PD designed to work with NP, NP Ufuncs work on PD Series/DataFrame
import pandas as pd
import numpy as np
rng = np.random.RandomState(42)
ser = pd.Series(rnd.randint(0, 10, 4))
ser
df = pd.DataFrame(rng.randint(0, 10, (3, 4)), columns=['A', 'B', 'C', 'D'])
df
# applying a NP ufunc on either of these objects,
# result with be another PD object with the indeces preserved:
np.exp(ser)
np.sin(df * np.pi / 4)
##############################
### UFuncs: Index Alignment
## Index Alignment in Series
# suppose we are combining two differnce data sources, want top 3 us states
# by area, and top 3 by population
area = pd.Series({'Alaska': 1723337, 'Texas': 695662,
'California': 423967}, name='area')
population = pd.Series({'California': 38332521, 'Texas': 26448193,
'New York': 19651127}, name='population')
# now, divide to compute population density
population / area
# we see the resulting array contains the Union of indeces of two input arrs
# we can verify that using standard Python set arithmetic on the indices
area.index | population.index
# any item for which one or the other doesn't have an entry is marked "NaN"
A = pd.Series([2, 4, 6], index=[0, 1, 2])
B = pd.Series([1, 3, 5], index=[1, 2, 3])
A + B
# if NaN vals isn't desired, fill val can be modified using object methods
# in place of the operators (with attribute "fill_value" used)
A.add(B, fill_value=0)
## Index Alignment in DataFrame
# similar alignment on both columns AND indices when using DataFrames:
A = pd.DataFrame(rng.randint(0, 20, (2, 2)), columns=list('AB'))
A
B = pd.DataFrame(rng.randint(0, 10, (3, 3)), columns=list('BAC'))
B
A + B
# note that indices are aligned correctly irrespective of order in objects,
# and indices in the result are sorted
# as before, can use object method with "fill_value" attribute to replace NaN
# here, we fill with the mean of all values stored in "A" instead of 0
fill = A.stack().mean()
A.add(B, fill_value=fill)
# Table: Python operators and equivalent PD Object methods:
# + add()
# - sub(), subtract()
# * mul(), multiply()
# / truediv(), div(), divide()
# // floordiv()
# % mod()
# ** pow()
##############################
### Ufuncs: Operations Between DataFrame and Series
# index & col alignment is similar when crossing DF and Series
# Remember: as DF is to Series in Pandas
# 1D arr is to 2d Arr in NumPy
# Find difference between a two-dimensional array and one of its rows:
A = rng.randint(10, size=(3, 4))
A
A - A[0]
# Per NP broadcasting rules, subtraction b/w 2D arr and row is done row-wise
# In Pandas, convention similarly operates row-wise by default:
df = pd.DataFrame(A, columns=list('QRST'))
df - df.iloc[0]
# to operate column-wise, use object methods and specify "axis" keywork
df.subtract(df['R'], axis=0)
# as before, indices are automatically aligned between 2 elements:
halfrow = df.iloc[0, ::2]
halfrow
df - halfrow
# as mentioned, automatic preservation + alignment of indices/cols means
# operations on data in Pandas will maintain data context
# more seamlessly than NP arrs | 32.59292 | 77 | 0.688298 |
227b1e1bc0c209d9e1b5a1176eb8edcc2f765f16 | 1,286 | py | Python | cogs/feelings.py | Surice/dc_sophie | fa42f457b7b9d68a156a4b6db41e3d849238384c | [
"MIT"
] | null | null | null | cogs/feelings.py | Surice/dc_sophie | fa42f457b7b9d68a156a4b6db41e3d849238384c | [
"MIT"
] | null | null | null | cogs/feelings.py | Surice/dc_sophie | fa42f457b7b9d68a156a4b6db41e3d849238384c | [
"MIT"
] | null | null | null | from itertools import chain
from components.config import getConfig
from components.convert import fetchUser, pretRes
import discord
from discord import channel
from discord.ext import commands
| 33.842105 | 105 | 0.681182 |
227bae7ab6f777a68303a1c49a615d5f64a02cfd | 2,400 | py | Python | Solutions/arrays/Median_of_Two_Sorted_Arrays.py | HuitingZhengAvery/Leetcode-solutions | ac21cef395717abab188e76895ad83cf212fd60f | [
"MIT"
] | 1 | 2019-06-21T16:28:59.000Z | 2019-06-21T16:28:59.000Z | Solutions/arrays/Median_of_Two_Sorted_Arrays.py | HuitingZhengAvery/Leetcode-solutions | ac21cef395717abab188e76895ad83cf212fd60f | [
"MIT"
] | null | null | null | Solutions/arrays/Median_of_Two_Sorted_Arrays.py | HuitingZhengAvery/Leetcode-solutions | ac21cef395717abab188e76895ad83cf212fd60f | [
"MIT"
] | null | null | null | '''
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
'''
### Nature: the meaning of MEDIAN, is that, the number of elements less than it,
### is equal to that is more than it.
### len(left) == len(right)
### It is NOT important that if these two parts are sorted.
## Time: O(log(min(m, n))), Space: O(1) --> we need fixed number of variables
# Iterative approach
# Central logics: there exists i, j where i+j = (m+n+1) // 2 AND
# A[i-1] (leftmax of A) < B[j] (rightmin of B) AND B[j-1] < A[i]
# (in general, all left <= all right)
| 36.923077 | 97 | 0.542083 |
227c213f9c9f02d257d21830222edf425fe68721 | 781 | py | Python | carl/envs/mario/mario_game.py | automl/genRL | b7382fec9006d7da768ad7252194c6c5f1b2bbd7 | [
"Apache-2.0"
] | 27 | 2021-09-13T21:50:10.000Z | 2022-03-30T15:35:38.000Z | carl/envs/mario/mario_game.py | automl/genRL | b7382fec9006d7da768ad7252194c6c5f1b2bbd7 | [
"Apache-2.0"
] | 35 | 2021-09-15T07:20:29.000Z | 2022-03-02T15:14:31.000Z | carl/envs/mario/mario_game.py | automl/genRL | b7382fec9006d7da768ad7252194c6c5f1b2bbd7 | [
"Apache-2.0"
] | 2 | 2022-01-13T11:13:12.000Z | 2022-03-14T06:11:13.000Z | from abc import ABC, abstractmethod
| 19.525 | 85 | 0.62484 |
227c29400ceb467de41b94027e6c73ad4c909b28 | 16,832 | py | Python | sec5.2/train.py | Z-T-WANG/ConvergentDQN | 1b7f1857e33bc0a41b16ed6fe3251cb78220c691 | [
"MIT"
] | 1 | 2021-08-20T11:38:58.000Z | 2021-08-20T11:38:58.000Z | sec5.2/train.py | Z-T-WANG/ConvergentDQN | 1b7f1857e33bc0a41b16ed6fe3251cb78220c691 | [
"MIT"
] | null | null | null | sec5.2/train.py | Z-T-WANG/ConvergentDQN | 1b7f1857e33bc0a41b16ed6fe3251cb78220c691 | [
"MIT"
] | null | null | null | import torch
import torch.optim as optim
import torch.nn.functional as F
import optimizers
import time, os, random
import numpy as np
import math, copy
from collections import deque
from common.utils import epsilon_scheduler, beta_scheduler, update_target, print_log, load_model, print_args
from model import DQN
from common.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
#from matplotlib import pyplot
i_count=0
accu1, accu2 = 0., 0.
accu_loss = 0.
def compute_td_loss(current_model, target_model, replay_buffer, optimizer, args, beta=None):
"""
Calculate loss and optimize
"""
global i_count, accu1, accu2, accu_loss
# sample data
if args.prioritized_replay:
state_next_state, action_, reward_, done, weights_, true_weights, indices = replay_buffer.sample(args.batch_size, beta)
weights = torch.from_numpy(weights_).to(args.device, non_blocking=True)
else:
state_next_state, action_, reward_, done, indices = replay_buffer.sample(args.batch_size)
weights = torch.ones(args.batch_size); weights_ = weights.numpy(); true_weights = weights_
weights = weights.to(args.device, non_blocking=True)
# we move data to GPU in chunks
state_next_state = torch.from_numpy(state_next_state).to(args.device, non_blocking=True).float().div_(255)
state, next_state = state_next_state
action = torch.from_numpy(action_).to(args.device, non_blocking=True)
gamma_mul_one_minus_done_ = (args.gamma * (1. - done)).astype(np.float32)
if args.currentTask == "DQN":
# in some cases these data do not really need to be copied to GPU
reward, gamma_mul_one_minus_done = torch.from_numpy(np.stack((reward_, gamma_mul_one_minus_done_))).to(args.device, non_blocking=True)
##### start training #####
optimizer.zero_grad()
# we use "values" to refer to Q values for all state-actions, and use "value" to refer to Q values for states
if args.currentTask == "DQN":
if args.double:
with torch.no_grad():
next_q_values = current_model(next_state)
next_q_action = next_q_values.max(1)[1].unsqueeze(1) # **unsqueeze
target_next_q_values = target_model(next_state)
next_q_value = target_next_q_values.gather(1, next_q_action).squeeze()
next_q_action = next_q_action.squeeze()
else:
with torch.no_grad():
next_q_value, next_q_action = target_model(next_state).max(1)
expected_q_value = torch.addcmul(reward, tensor1=next_q_value, tensor2=gamma_mul_one_minus_done)
q_values = current_model(state)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
loss = F.mse_loss(q_value, expected_q_value, reduction='none')
if args.prioritized_replay:
diff = (q_value.detach() - expected_q_value).cpu().numpy()
prios = np.abs(diff) + args.prio_eps #
loss = (loss * weights).mean()/2.
loss.backward()
# we report the mean squared error instead of the Huber loss as the loss
with torch.no_grad():
report_loss = (F.mse_loss(q_value, expected_q_value, reduction='none')*weights).mean().item()
if args.currentTask == "CDQN":
# compute the current and next state values in a single pass
size = list(state_next_state.size())
current_and_next_states = state_next_state.view([-1]+size[2:])
# compute the q values and the gradient
all_q_values = current_model(current_and_next_states)
with torch.no_grad():
q_values, next_q_values = all_q_values[:args.batch_size], all_q_values[args.batch_size:2*args.batch_size]
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value, next_q_action = next_q_values.max(1)
q_value, next_q_value = torch.stack((q_value, next_q_value)).cpu().numpy()
next_q_values_target = target_model(next_state)
if args.double:
next_q_value_target = next_q_values_target.gather(1, next_q_action.unsqueeze(1)).squeeze().cpu().numpy()
else:
next_q_value_target = np.max(next_q_values_target.cpu().numpy(), axis=1)
expected_q_value_self = reward_ + gamma_mul_one_minus_done_ * next_q_value
expected_q_value_target = reward_ + gamma_mul_one_minus_done_ * next_q_value_target
target_mask = (np.abs(q_value - expected_q_value_target) >= np.abs(q_value - expected_q_value_self))
expected_q_value = np.where(target_mask, expected_q_value_target, expected_q_value_self)
target_mask = target_mask.astype(np.float32)
diff = q_value - expected_q_value
if args.prioritized_replay:
prio_diff = diff
prios = np.abs(prio_diff) + args.prio_eps
# the Huber loss is used
weighted_diff = weights_ * diff
q_value_grad = 1./args.batch_size *weighted_diff
all_grads = torch.zeros_like(all_q_values)
# manually backpropagate the gradient through the term "expected_q_value"
next_q_value_grad = - (1.-target_mask) * q_value_grad
next_q_value_grad = next_q_value_grad * gamma_mul_one_minus_done_
grads = torch.from_numpy(np.concatenate([q_value_grad, next_q_value_grad], axis=0)).unsqueeze(1).to(args.device)
all_grads.scatter_(1, torch.cat([action, next_q_action], dim=0).unsqueeze(1), grads)
all_q_values.backward(all_grads) # this method makes it run faster
report_loss = np.dot(diff, weights_ * diff)/args.batch_size
if args.currentTask == "Residual":
# compute the current and next state values in a single pass
size = list(state_next_state.size())
current_and_next_states = state_next_state.view([-1]+size[2:])
# compute the q values and the gradient
all_q_values = current_model(current_and_next_states)
with torch.no_grad():
q_values, next_q_values = all_q_values[:args.batch_size], all_q_values[args.batch_size:2*args.batch_size]
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value, next_q_action = next_q_values.max(1)
q_value, next_q_value = torch.stack((q_value, next_q_value)).cpu().numpy()
expected_q_value = reward_ + gamma_mul_one_minus_done_ * next_q_value
# then compute the q values and the loss
diff = q_value - expected_q_value
if args.prioritized_replay:
prio_diff = diff
prios = np.abs(prio_diff) + args.prio_eps
# the Huber loss is used
weighted_diff = weights_ * diff
q_value_grad = 1./args.batch_size *weighted_diff
all_grads = torch.zeros_like(all_q_values)
# manually backpropagate the gradient through the term "expected_q_value"
next_q_value_grad = - q_value_grad
next_q_value_grad = next_q_value_grad * gamma_mul_one_minus_done_
grads = torch.from_numpy(np.concatenate([q_value_grad, next_q_value_grad], axis=0)).unsqueeze(1).to(args.device)
all_grads.scatter_(1, torch.cat([action, next_q_action], dim=0).unsqueeze(1), grads)
all_q_values.backward(all_grads) # this method makes it run faster
report_loss = np.dot(diff, weights_ * diff)/args.batch_size
if args.prioritized_replay:
replay_buffer.update_priorities(indices, prios)
# gradient clipping
if args.grad_clip > 0.:
grad_norm = torch.nn.utils.clip_grad_norm_(current_model.parameters(), max_norm = args.grad_clip)
accu1 += grad_norm
accu2 += grad_norm**2
if args.do_update_target: update_target(current_model, target_model); args.do_update_target=False
optimizer.step()
off_policy_rate = np.mean((np.argmax(q_values.detach().cpu().numpy(), axis=1)!=action_).astype(np.float)*true_weights)
i_count += 1
accu_loss += report_loss
report_period = math.ceil(args.evaluation_interval/args.train_freq)
if i_count % report_period == 0 and accu1 != 0.:
print("gradient norm {:.3f} +- {:.3f}".format(accu1/report_period, math.sqrt(accu2/report_period-(accu1/report_period)**2)))
accu1, accu2 = 0., 0.
if not args.silent:
with open(os.path.join(args.env, '{}mse_{}.txt'.format(args.currentTask, args.comment)), 'a') as f:
f.write('{:.0f}\t{}\n'.format((i_count*args.train_freq+args.learning_start)*4, accu_loss/report_period))
accu_loss = 0.
return report_loss, off_policy_rate
| 49.798817 | 172 | 0.649774 |
227d0c74c72ef68b3f928e3787684e5cdd3c8d18 | 6,290 | py | Python | tests/broker/test_update_chassis.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_update_chassis.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_update_chassis.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update chassis command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from chassistest import VerifyChassisMixin
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateChassis)
unittest.TextTestRunner(verbosity=2).run(suite)
| 43.082192 | 109 | 0.592687 |
97d3b4402a419951038455ac0b5764d606d2b2b1 | 11,289 | py | Python | label_propagation/label_propagation.py | lujiaxuan0520/NAIC-ReID-2020-contest | 51953a6927afb71733e39845fec9723210d37a1b | [
"MIT"
] | 1 | 2020-12-13T12:39:30.000Z | 2020-12-13T12:39:30.000Z | label_propagation/label_propagation.py | lujiaxuan0520/NAIC-ReID-2020-contest | 51953a6927afb71733e39845fec9723210d37a1b | [
"MIT"
] | null | null | null | label_propagation/label_propagation.py | lujiaxuan0520/NAIC-ReID-2020-contest | 51953a6927afb71733e39845fec9723210d37a1b | [
"MIT"
] | null | null | null | #########################################################################################
# semi-supervised learning: use label propagation to make pseudo labels for no label data
# This is not the parallel implement of label propagation, may requires a lot of time
# Author: Jiaxuan Lu
#########################################################################################
import time
import numpy as np
import math
import os, sys
import os.path as osp
sys.path.append("..")
sys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk("../") for name in dirs])
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchreid.dataset_loader import ImageDataset
from torchreid import transforms as T
from torchreid import models
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.torchtools import count_num_param
gpu_devices = "7" # gpu devices
extended_data = False # whether to use extended data
model_weight = "./log/resnet50-xent/vmgn_hgnn13/checkpoint_ep65.pth.tar"
arch = "vmgn_hgnn"
test_batch = 500
dataset_name = "pclreid"
global_branch = True
dist_metric = "cosine"
root = "./"
height = 256
width = 128
seed = 1
workers = 4
# return k neighbors index
# build a big graph (normalized weight matrix)
# label propagation
# main function
if __name__ == "__main__":
torch.manual_seed(seed)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices
use_gpu = torch.cuda.is_available()
if use_gpu:
print("Currently using GPU {}".format(gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(seed)
else:
print("Currently using CPU (GPU is highly recommended)")
print("Initializing dataset {}".format(dataset_name))
dataset_dir = osp.join(root, 'PCL_ReID')
list_label_path = osp.join(dataset_dir, 'train_extended_list.txt') if extended_data else \
osp.join(dataset_dir, 'train_list.txt')
list_unlabel_path = osp.join(dataset_dir, 'no_label_extended_list.txt') if extended_data else \
osp.join(dataset_dir, 'no_label_list.txt')
label_data, num_label_pids, num_label_imgs = process_dir_label(list_label_path, cam=0)
unlabel_data, num_unlabel_imgs = process_dir_unlabel(list_unlabel_path, cam=1)
transform_test = T.Compose([
T.Resize((height, width)),
T.ToTensor(),
# T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
T.Normalize(mean=[0.3495, 0.3453, 0.3941], std=[0.2755, 0.2122, 0.2563]),
])
pin_memory = True if use_gpu else False
labelloader = DataLoader(
ImageDataset(label_data, transform=transform_test),
batch_size=test_batch, shuffle=False, num_workers=workers,
pin_memory=pin_memory, drop_last=False
)
unlabelloader = DataLoader(
ImageDataset(unlabel_data, transform=transform_test, isFinal=True),
batch_size=test_batch, shuffle=False, num_workers=workers,
pin_memory=pin_memory, drop_last=False
)
print("Initializing model: {}".format(arch))
'''
vmgn_hgnn model, arch chosen from {'resnet50','resnet101','resnet152'}
efficientnet_hgnn model, arch chosen from {'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7','efficientnet-b8'}
'''
model = models.init_model(name=arch,
num_classes=29626, # 29626 or 34394
# num_classes=19658,
isFinal=False,
global_branch=global_branch,
arch="resnet101")
print("Model size: {:.3f} M".format(count_num_param(model)))
checkpoint = torch.load(model_weight)
pretrain_dict = checkpoint['state_dict']
# model_dict = model.state_dict()
# pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
# model_dict.update(pretrain_dict)
model.load_state_dict(pretrain_dict)
if use_gpu:
model = nn.DataParallel(model).cuda()
print("Evaluate only")
Mat_Label, Mat_Unlabel, labels, unlabel_img_path = test(model, labelloader, unlabelloader, use_gpu)
# num_unlabel_samples = 800
# Mat_Label, labels, Mat_Unlabel = loadBandData(num_unlabel_samples)
# Mat_Label, labels, Mat_Unlabel = loadCircleData(num_unlabel_samples)
## Notice: when use 'rbf' as our kernel, the choice of hyper parameter 'sigma' is very import! It should be
## chose according to your dataset, specific the distance of two data points. I think it should ensure that
## each point has about 10 knn or w_i,j is large enough. It also influence the speed of converge. So, may be
## 'knn' kernel is better!
# unlabel_data_labels = labelPropagation(Mat_Label, Mat_Unlabel, labels, kernel_type = 'rbf', rbf_sigma = 0.2)
print("start label propagation")
unlabel_data_labels = labelPropagation(Mat_Label, Mat_Unlabel, labels, kernel_type='knn', knn_num_neighbors=5,
max_iter=400)
# show(Mat_Label, labels, Mat_Unlabel, unlabel_data_labels)
for idx in range(len(unlabel_img_path)):
unlabel_img_path[idx] += ':' + str(unlabel_data_labels[idx])
np.savetxt("pseudo_label_for_no_label.txt", unlabel_img_path) | 37.257426 | 129 | 0.652582 |
97d3d479f4d7bb607ee11ef3af9de4bcb2b193c7 | 12,781 | py | Python | tests/helpers/test_file.py | Centaurioun/PyFunceble | 59b809f3322118f7824195752c6015220738d4a0 | [
"Apache-2.0"
] | null | null | null | tests/helpers/test_file.py | Centaurioun/PyFunceble | 59b809f3322118f7824195752c6015220738d4a0 | [
"Apache-2.0"
] | null | null | null | tests/helpers/test_file.py | Centaurioun/PyFunceble | 59b809f3322118f7824195752c6015220738d4a0 | [
"Apache-2.0"
] | null | null | null | """
The tool to check the availability or syntax of domain, IP or URL.
::
Tests of the file helper.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021, 2021 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import secrets
import tempfile
import unittest
from PyFunceble.helpers.file import FileHelper
from PyFunceble.utils.platform import PlatformUtility
if __name__ == "__main__":
unittest.main()
| 26.627083 | 88 | 0.606838 |
97d574a37c2dcf1ccbae57ff4f4d838393dd694f | 1,938 | py | Python | malaya_speech/supervised/unet.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 111 | 2020-08-31T04:58:54.000Z | 2022-03-29T15:44:18.000Z | malaya_speech/supervised/unet.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 14 | 2020-12-16T07:27:22.000Z | 2022-03-15T17:39:01.000Z | malaya_speech/supervised/unet.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 29 | 2021-02-09T08:57:15.000Z | 2022-03-12T14:09:19.000Z | from malaya_speech.utils import (
check_file,
load_graph,
generate_session,
nodes_session,
)
from malaya_speech.model.tf import UNET, UNETSTFT, UNET1D
| 25.168831 | 69 | 0.609391 |
97d6b1b1207de186f313949afee6fd694df16691 | 4,618 | py | Python | scripts/GUI_restart.py | zainamir-98/bioradar | b826ed869a58778a321153dae3c93f17f40d2f7a | [
"MIT"
] | null | null | null | scripts/GUI_restart.py | zainamir-98/bioradar | b826ed869a58778a321153dae3c93f17f40d2f7a | [
"MIT"
] | null | null | null | scripts/GUI_restart.py | zainamir-98/bioradar | b826ed869a58778a321153dae3c93f17f40d2f7a | [
"MIT"
] | null | null | null | # Use this command if numpy import fails: sudo apt-get install python-dev libatlas-base-dev
# If this doesn't work, uninstall both numpy and scipy. Thonny will keep an older default version of numpy.
# Install an older version of scipy that corresponds to the correct version of numpy.
from guizero import App, PushButton, Slider, Text, ButtonGroup, Picture, Box, CheckBox
import sys
import time
import subprocess
import os
DEBUG_MODE = False
#CONT_REALTIME_MONITORING = False
app = App(title="BioRadar (Prototype)", width=480, height=320, bg="#141414")
if not DEBUG_MODE:
app.full_screen = True
start_menu_box = Box(app, width="fill")
pad_1 = Box(start_menu_box, width="fill", height=20)
box_1 = Box(start_menu_box, width="fill")
pad_1_2 = Box(box_1, width=140, height=1, align="left")
picture = Picture(box_1, image="images/brlogo.png", width=51, height=40, align="left") # W:H = 1.277
pad_1_2 = Box(box_1, width=10, height=1, align="left")
message = Text(box_1, text="BioRadar", color="#FFFFFF", size=20, align="left")
pad_2 = Box(start_menu_box, width="fill", height=40)
message = Text(start_menu_box, text="Select how you want to monitor your vitals.", color="#FFFFFF", size=15)
pad_3 = Box(start_menu_box, width="fill", height=18)
button1 = PushButton(start_menu_box, text="Online mode", command=gui_go_to_connect)
button1.bg = "#6ED3A9"
pad_4 = Box(start_menu_box, width="fill", height=10)
button2 = PushButton(start_menu_box, text="Manual mode", command=gui_go_to_manual)
button2.bg = "#6ED3A9"
start_menu_box.hide()
connect_menu_box = Box(app, width="fill")
pad_1 = Box(connect_menu_box, width="fill", height=100)
connect_menu_text = Text(connect_menu_box, text="Connecting to MyVitals...", color="#FFFFFF", size=20)
pad_2 = Box(connect_menu_box, width="fill", height=30)
connect_menu_text2 = Text(connect_menu_box, text="Waiting for online commands...", color="#FFFFFF", size=16)
connect_menu_box.hide()
# Manual mode
manual_menu_box = Box(app, width="fill")
pad = Box(manual_menu_box, width="fill", height=20)
manual_menu_text = Text(manual_menu_box, text="Manual Mode", color="#FFFFFF", size=20)
pad = Box(manual_menu_box, width="fill", height=50)
button_box = Box(manual_menu_box, width=460, height=90)
button1 = PushButton(button_box, text="Respiration Rate\nHeart Rate", command=gui_open_rr_hr, align="left")
pad = Box(button_box, width=10, height=90, align="left")
button2 = PushButton(button_box, text="Heart Rate Variability\nHeart Rate*", command=gui_open_hrv_hr, align="right")
button1.text_size = 16
button2.text_size = 16
button1.bg = "#6ED3A9"
button2.bg = "#6ED3A9"
pad = Box(manual_menu_box, width="fill", height=30)
pad = Box(manual_menu_box, width="fill", height=6)
txt = Text(manual_menu_box, text="* You will need to hold your breath for 10 seconds for\nheart rate variability measurements.", color="#C8C8C8", size=11)
# Footers
start_footer_box = Box(app, width="fill", align="bottom")
fyp_text = Text(start_footer_box, text=" 2021 Final-Year Project, SEECS, NUST", color="#C8C8C8", size=11, align="left")
exit_button = PushButton(start_footer_box, text="Exit", align="right", command=exit)
exit_button.bg = "#6ED3A9"
start_footer_box.hide()
other_footer_box = Box(app, width="fill", align="bottom")
exit_button = PushButton(other_footer_box, text="Exit", align="right", command=exit)
exit_button.bg = "#6ED3A9"
back_button = PushButton(other_footer_box, text="Back", align="right", command=gui_go_back_to_menu)
back_button.bg = "#6ED3A9"
app.display() | 39.470085 | 154 | 0.731919 |
97da085bfcfa86877a3a5eae743b983ac785a5f4 | 1,182 | py | Python | pyFileFixity/lib/distance/distance/_lcsubstrings.py | hadi-f90/pyFileFixity | 2cb3dd6225a6b062a98fa2d61c4a0a29d8010428 | [
"MIT"
] | null | null | null | pyFileFixity/lib/distance/distance/_lcsubstrings.py | hadi-f90/pyFileFixity | 2cb3dd6225a6b062a98fa2d61c4a0a29d8010428 | [
"MIT"
] | 1 | 2022-01-19T13:46:55.000Z | 2022-01-19T13:46:55.000Z | pyFileFixity/lib/distance/distance/_lcsubstrings.py | hadi-f90/pyFileFixity | 2cb3dd6225a6b062a98fa2d61c4a0a29d8010428 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from array import array
def lcsubstrings(seq1, seq2, positions=False):
"""Find the longest common substring(s) in the sequences `seq1` and `seq2`.
If positions evaluates to `True` only their positions will be returned,
together with their length, in a tuple:
(length, [(start pos in seq1, start pos in seq2)..])
Otherwise, the substrings themselves will be returned, in a set.
Example:
>>> lcsubstrings("sedentar", "dentist")
{'dent'}
>>> lcsubstrings("sedentar", "dentist", positions=True)
(4, [(2, 0)])
"""
L1, L2 = len(seq1), len(seq2)
ms = []
mlen = last = 0
if L1 < L2:
seq1, seq2 = seq2, seq1
L1, L2 = L2, L1
column = array('L', range(L2))
for i in range(L1):
for j in range(L2):
old = column[j]
if seq1[i] == seq2[j]:
if i == 0 or j == 0:
column[j] = 1
else:
column[j] = last + 1
if column[j] > mlen:
mlen = column[j]
ms = [(i, j)]
elif column[j] == mlen:
ms.append((i, j))
else:
column[j] = 0
last = old
if positions:
return (mlen, tuple((i - mlen + 1, j - mlen + 1) for i, j in ms if ms))
return {seq1[i - mlen + 1:i + 1] for i, _ in ms if ms}
| 22.730769 | 76 | 0.583756 |
97db509debe2b8503920910c68f09fde1efdca62 | 6,072 | py | Python | colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py | JGoldstone/colour | 6829b363d5f0682bff0f4826995e7ceac189ff28 | [
"BSD-3-Clause"
] | null | null | null | colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py | JGoldstone/colour | 6829b363d5f0682bff0f4826995e7ceac189ff28 | [
"BSD-3-Clause"
] | null | null | null | colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py | JGoldstone/colour | 6829b363d5f0682bff0f4826995e7ceac189ff28 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.rgb.transfer_functions.\
panasonic_vlog` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_VLog,
log_decoding_VLog,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestLogEncoding_VLog',
'TestLogDecoding_VLog',
]
if __name__ == '__main__':
unittest.main()
| 31.138462 | 78 | 0.640316 |
97db587e34c2af72ba15568d5a03261d228ebb29 | 3,546 | py | Python | test/IECoreRI/All.py | gcodebackups/cortex-vfx | 72fa6c6eb3327fce4faf01361c8fcc2e1e892672 | [
"BSD-3-Clause"
] | 5 | 2016-07-26T06:09:28.000Z | 2022-03-07T03:58:51.000Z | test/IECoreRI/All.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | null | null | null | test/IECoreRI/All.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | 3 | 2015-03-25T18:45:24.000Z | 2020-02-15T15:37:18.000Z | ##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import unittest
import IECore
import IECoreRI
from SLOReader import *
from Renderer import *
from Instancing import *
from PTCParticleReader import *
from PTCParticleWriter import *
from ArchiveRecord import *
from DoubleSided import *
from Orientation import *
from MultipleContextsTest import *
from Camera import *
from CurvesTest import *
from TextureOrientationTest import *
from ArrayPrimVarTest import *
from CoordinateSystemTest import *
from IlluminateTest import *
from SubsurfaceTest import *
from PatchMeshTest import *
from RIBWriterTest import *
from ParameterisedProcedural import *
from MotionTest import MotionTest
from PythonProceduralTest import PythonProceduralTest
from DetailTest import DetailTest
from ProceduralThreadingTest import ProceduralThreadingTest
from StringArrayParameterTest import StringArrayParameterTest
from CoshaderTest import CoshaderTest
from GroupTest import GroupTest
from DspyTest import DspyTest
from RerenderingTest import RerenderingTest
if hasattr( IECoreRI, "SXRenderer" ) :
from SXRendererTest import SXRendererTest
if hasattr( IECoreRI, "GXEvaluator" ) :
from GXEvaluatorTest import GXEvaluatorTest
if hasattr( IECoreRI, "DTEXDeepImageReader" ) :
from DTEXDeepImageReaderTest import TestDTEXDeepImageReader
from DTEXDeepImageWriterTest import TestDTEXDeepImageWriter
if hasattr( IECoreRI, "SHWDeepImageReader" ) :
from SHWDeepImageReaderTest import TestSHWDeepImageReader
from SHWDeepImageWriterTest import TestSHWDeepImageWriter
if IECore.withFreeType() :
from TextTest import *
unittest.TestProgram(
testRunner = unittest.TextTestRunner(
stream = IECore.CompoundStream(
[
sys.stderr,
open( "test/IECoreRI/resultsPython.txt", "w" )
]
),
verbosity = 2
)
)
| 36.183673 | 76 | 0.758037 |
97dd0689130d6bd5ed6a18fd645d0dcff177ddd3 | 2,164 | py | Python | molecool/tests/test_measure.py | pavankum/molecool | 0aa4fe5423aa91cb59fb603e3293d89741cb87a6 | [
"MIT"
] | null | null | null | molecool/tests/test_measure.py | pavankum/molecool | 0aa4fe5423aa91cb59fb603e3293d89741cb87a6 | [
"MIT"
] | null | null | null | molecool/tests/test_measure.py | pavankum/molecool | 0aa4fe5423aa91cb59fb603e3293d89741cb87a6 | [
"MIT"
] | null | null | null | """
Unit tests for measure
"""
# Import package, test suite, and other packages as needed
import numpy as np
import molecool
import pytest
def test_calculate_distance():
"""Sample test to check calculate_distance is working """
r1 = np.array([1, 0, 0])
r2 = np.array([3, 0, 0])
expected_distance = 2
calculated_distance = molecool.calculate_distance(r1, r2)
assert calculated_distance == expected_distance
def test_calculate_angle():
"""Sample test to check calculate_anlge is working"""
r1 = np.array([1, 0, 0])
r2 = np.array([0, 0, 0])
r3 = np.array([0, 1, 0])
expected_angle = 90
calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees=True)
assert calculated_angle == expected_angle | 30.914286 | 129 | 0.652033 |
97dd106f5157a62375f9741a6b7c0edb0c3a8dee | 1,240 | py | Python | tests/test_util_matrix.py | PeerHerholz/pyrsa | 994007086c59de93d86b982f1fff73fe6a8ea929 | [
"MIT"
] | 4 | 2015-08-10T18:34:21.000Z | 2018-05-15T20:43:15.000Z | tests/test_util_matrix.py | PeerHerholz/pyrsa | 994007086c59de93d86b982f1fff73fe6a8ea929 | [
"MIT"
] | null | null | null | tests/test_util_matrix.py | PeerHerholz/pyrsa | 994007086c59de93d86b982f1fff73fe6a8ea929 | [
"MIT"
] | 2 | 2018-03-26T03:02:07.000Z | 2021-11-10T21:09:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_util_matrix
@author: jdiedrichsen
"""
import unittest
import pyrsa.util as rsu
import numpy as np
if __name__ == '__main__':
unittest.main()
| 24.313725 | 50 | 0.592742 |
97de7958e0a043ea00870086f0a3a9e86192755c | 6,999 | py | Python | custom_components/smartthinq_washer/wideq/washer.py | Golab/ha-smartthinq-washer | 92e4589a9be143f9b167853e2b5a1607631c1c42 | [
"Apache-2.0"
] | 1 | 2020-04-13T14:09:28.000Z | 2020-04-13T14:09:28.000Z | custom_components/smartthinq_washer/wideq/washer.py | Golab/ha-smartthinq-washer | 92e4589a9be143f9b167853e2b5a1607631c1c42 | [
"Apache-2.0"
] | null | null | null | custom_components/smartthinq_washer/wideq/washer.py | Golab/ha-smartthinq-washer | 92e4589a9be143f9b167853e2b5a1607631c1c42 | [
"Apache-2.0"
] | null | null | null | """------------------for Washer"""
import datetime
import enum
import time
import logging
from typing import Optional
from .device import (
Device,
DeviceStatus,
STATE_UNKNOWN,
STATE_OPTIONITEM_ON,
STATE_OPTIONITEM_OFF,
)
from .washer_states import (
STATE_WASHER,
STATE_WASHER_ERROR,
WASHERSTATES,
WASHERWATERTEMPS,
WASHERSPINSPEEDS,
WASHREFERRORS,
WASHERERRORS,
)
_LOGGER = logging.getLogger(__name__)
| 27.555118 | 84 | 0.603658 |
97df4a022eaff541facbf55fa41d937b36722e9a | 375 | py | Python | year2020/day17/reader.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day17/reader.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day17/reader.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | from pathlib import Path
| 22.058824 | 48 | 0.597333 |
97e1339259b947d5c260266bb5a742c74a8323da | 4,644 | py | Python | squad/base/argument_parser.py | uwnlp/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
] | 89 | 2018-08-25T07:59:07.000Z | 2021-05-04T06:37:27.000Z | squad/base/argument_parser.py | seominjoon/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
] | 11 | 2018-09-28T17:33:27.000Z | 2019-11-27T23:34:45.000Z | squad/base/argument_parser.py | uwnlp/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
] | 10 | 2018-09-19T06:48:06.000Z | 2020-04-14T20:42:06.000Z | import argparse
import os
| 50.478261 | 120 | 0.644703 |
97e32ebe567c88c97e005c959868e8ed6406d1eb | 2,210 | py | Python | getml/loss_functions.py | srnnkls/getml-python-api | 032b2fec19a0e0a519eab480ee61e0d422d63993 | [
"MIT"
] | null | null | null | getml/loss_functions.py | srnnkls/getml-python-api | 032b2fec19a0e0a519eab480ee61e0d422d63993 | [
"MIT"
] | null | null | null | getml/loss_functions.py | srnnkls/getml-python-api | 032b2fec19a0e0a519eab480ee61e0d422d63993 | [
"MIT"
] | null | null | null | # Copyright 2019 The SQLNet Company GmbH
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
This module contains the loss functions for the getml library.
"""
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 32.985075 | 80 | 0.619005 |
97e4ff9556a184829362cc46861ffd16d6689ddb | 870 | py | Python | transit/helpers.py | moredatarequired/python-stitch-client | 222ba24e34614d3acecab41cd78a5c78ab8ea782 | [
"Apache-2.0"
] | 71 | 2015-01-03T07:55:33.000Z | 2021-10-30T16:52:09.000Z | transit/helpers.py | moredatarequired/python-stitch-client | 222ba24e34614d3acecab41cd78a5c78ab8ea782 | [
"Apache-2.0"
] | 27 | 2015-01-02T06:10:25.000Z | 2022-02-20T21:54:13.000Z | transit/helpers.py | moredatarequired/python-stitch-client | 222ba24e34614d3acecab41cd78a5c78ab8ea782 | [
"Apache-2.0"
] | 20 | 2015-01-05T04:07:52.000Z | 2022-02-20T19:08:15.000Z | ## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import itertools
from transit.pyversion import imap, izip
cycle = itertools.cycle
| 27.1875 | 75 | 0.725287 |
97e737d9c2d51a5e35ef3bbd28e5bc15aadb06de | 1,779 | py | Python | part4/matplotlib/seoul_to_cn_gb_kw.py | tls1403/PythonTest | 069f23b25ec655aa199d13aef9c14d2e33366861 | [
"MIT"
] | null | null | null | part4/matplotlib/seoul_to_cn_gb_kw.py | tls1403/PythonTest | 069f23b25ec655aa199d13aef9c14d2e33366861 | [
"MIT"
] | null | null | null | part4/matplotlib/seoul_to_cn_gb_kw.py | tls1403/PythonTest | 069f23b25ec655aa199d13aef9c14d2e33366861 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
#
from matplotlib import font_manager,rc
font_path ="D:/5674-833_4th/part4/malgun.ttf"
font_name = font_manager.FontProperties(fname=font_path).get_name()
rc('font',family = font_name)
df = pd.read_excel('D:/5674-833_4th/part4/ .xlsx',engine = 'openpyxl',header =0)
df = df.fillna(method='ffill') #
#
mask = (df[''] == '') & (df[''] != '')
df_seoul = df[mask]
df_seoul = df_seoul.drop([''],axis= 1) # column
df_seoul.rename({'':''},axis=1,inplace=True) # column
df_seoul.set_index('',inplace = True)
print(df_seoul)
# , ,
col_years = list(map(str,range(1970,2018)))
df_3 = df_seoul.loc[['','',''],col_years]
#
plt.style.use('ggplot')
# (figure 1 )
fig = plt.figure(figsize=(20,5))
ax =fig.add_subplot(1,1,1)
#axe plot
ax.plot(col_years,df_3.loc['',:],marker = 'o',markerfacecolor = 'green',
markersize = 10,color = 'olive',linewidth = 2, label = ' -> ')
ax.plot(col_years,df_3.loc['',:],marker = 'o',markerfacecolor = 'blue',
markersize = 10, color = 'skyblue', linewidth = 2 , label = ' -> ')
ax.plot(col_years,df_3.loc['',:],marker = 'o',markerfacecolor = 'red',
markersize =10, color = 'magenta',linewidth = 2, label = ' -> ')
#
ax.legend(loc = 'best')
#
ax.set_title(' -> , , ',size = 20 )
#
ax.set_xlabel('',size =12)
ax.set_ylabel(' ',size =12)
# 90
ax.set_xticklabels(col_years,rotation = 90)
#
ax.tick_params(axis = "x", labelsize =10)
ax.tick_params(axis = "y", labelsize= 10)
plt.show() | 30.672414 | 90 | 0.675098 |
97e73f20826e580f553c50fa8510c0e35ee9a048 | 365 | py | Python | blsqpy/query.py | BLSQ/blsqpy | 52fcbd655780e78eccceb2a61280262194c2416c | [
"MIT"
] | null | null | null | blsqpy/query.py | BLSQ/blsqpy | 52fcbd655780e78eccceb2a61280262194c2416c | [
"MIT"
] | 7 | 2018-12-18T10:11:34.000Z | 2019-03-27T07:09:38.000Z | blsqpy/query.py | BLSQ/blsqpy | 52fcbd655780e78eccceb2a61280262194c2416c | [
"MIT"
] | 2 | 2018-12-12T12:31:40.000Z | 2019-02-25T12:34:48.000Z | import os
from jinja2 import Environment, FileSystemLoader
QUERIES_DIR = os.path.dirname(os.path.abspath(__file__))
| 36.5 | 94 | 0.715068 |
97e7b0008c9dde06dac12b270121649a12a1ff61 | 8,507 | py | Python | SINE.py | EduardoMCF/SINE | 061960b65164ae612a5cb63c540eb8a488505073 | [
"MIT"
] | null | null | null | SINE.py | EduardoMCF/SINE | 061960b65164ae612a5cb63c540eb8a488505073 | [
"MIT"
] | null | null | null | SINE.py | EduardoMCF/SINE | 061960b65164ae612a5cb63c540eb8a488505073 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pyaudio, wave
import numpy as np
from collections import OrderedDict as OD
from struct import pack
from math import fmod
from os import system
pi = np.pi
p = pyaudio.PyAudio()
octaves = {
'C0': 16.35, 'C#0': 17.32, 'D0': 18.35, 'D#0': 19.45, 'E0': 20.6, 'F0': 21.83, 'F#0': 23.12, 'G0': 24.5, 'G#0': 25.96, 'A0': 27.5, 'A#0': 29.14, 'B0': 30.87,
'C1': 32.70, 'C#1': 34.65, 'D1': 36.71, 'D#1': 38.89, 'E1': 41.20, 'F1': 43.65, 'F#1': 46.25, 'G1': 49.0, 'G#1': 51.91, 'A1': 55.0, 'A#1': 58.27, 'B1': 61.74,
'C2': 65.41, 'C#2': 69.3, 'D2': 73.42, 'D#2': 77.78, 'E2': 82.41, 'F2': 87.31, 'F#2': 92.5, 'G2': 98.0, 'G#2': 103.83, 'A2': 110.0, 'A#2': 116.54, 'B2': 123.47,
'C3': 130.81, 'C#3': 138.59, 'D3': 146.83, 'D#3': 155.56, 'E3': 164.81, 'F3': 174.62, 'F#3': 185.0, 'G3': 196.0, 'G#3': 207.65, 'A3': 220.0, 'A#3': 233.08, 'B3': 246.94,
'C4': 261.62, 'C#4': 277.19, 'D4': 293.67, 'D#4': 311.12, 'E4': 329.62, 'F4': 349.23, 'F#4': 370.0, 'G4': 392.0, 'G#4': 415.31, 'A4': 440.0, 'A#4': 466.17, 'B4': 493.88,
'C5': 523.25, 'C#5': 554.37, 'D5': 587.33, 'D#5': 622.25, 'E5': 659.25, 'F5': 698.46, 'F#5': 739.99, 'G5': 783.99, 'G#5': 830.61, 'A5': 880.0, 'A#5': 932.33, 'B5': 987.77,
'C6': 1046.5, 'C#6': 1108.74, 'D6': 1174.66, 'D#6': 1244.5, 'E6': 1318.5, 'F6': 1396.92, 'F#6': 1479.98, 'G6': 1567.98, 'G#6': 1661.22, 'A6': 1760.0, 'A#6': 1864.66,'B6': 1975.54,
'C7': 2093.0, 'C#7': 2217.48, 'D7': 2349.32, 'D#7': 2489.0, 'E7': 2637.0, 'F7': 2793.84, 'F#7': 2959.96, 'G7': 3135.96, 'G#7': 3322.44,'A7': 3520.0, 'A#7': 3729.32, 'B7': 3951.08,
'C8': 4186.0, 'C#8': 4434.96, 'D8': 4698.64, 'D#8': 4978.0, 'E8': 5274.0, 'F8': 5587.68, 'F#8': 5919.92, 'G8': 6271.92, 'G#8': 6644.88, 'A8': 7040.0, 'A#8': 7458.64, 'B8': 7902.16,
'.': 0
}
choice1 = int(input('Select an option:\n1 - Generate sine wave\n2 - Generate song\n3 - Load wav file\n\nYour choice (1,2 or 3): '))
if choice1 not in [1,2,3]: raise ValueError('Invalid choice: %i' %choice1)
options = {1: getParamsSineWave, 2:getParamsSong, 3:getParamsFile}
param = options[choice1]()
system('cls||clear')
dialog = 'Select an option:\n1 - Play\n2 - Plot\n3 - Save\n4 - Exit\n\nYour choice (1,2,3 or 4): '
dialog2 = 'Select an option:\n1 - Play\n2 - Plot\n3 - Exit\n\nYour choice (1,2 or 3): '
while True:
choice2 = int(input(dialog)) if choice1 in [1,2] else int(input(dialog2))
if choice1 in [1,2]:
dataSine = generateSineWave(*param.values()) if choice1 == 1 else None
dataSong = generateSong(*param.values()) if choice1 == 2 else None
if choice2 == 1:
playAudio(dataSine, param['samplingFreq']) if choice1 == 1 else playAudio(dataSong,param['samplingFreq'])
elif choice2 == 2:
plot(dataSine, samplingFreq = param['samplingFreq']) if choice1 == 1 else plot(dataSong, samplingFreq = param['samplingFreq'])
elif choice2 == 3:
fileName = input('File name: ')
saveFile(fileName,dataSine if choice1 == 1 else dataSong,param['samplingFreq'])
elif choice2 == 4:
break
elif choice1 == 3:
if choice2 == 1:
playAudioFromFile(param)
elif choice2 == 2:
plotFromFile(param)
elif choice2 == 3:
break
system("cls||clear")
p.terminate() | 48.611429 | 288 | 0.611379 |
97e7c3ef3fb80b92eda0926518e235c327df3ae0 | 1,603 | py | Python | setup.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | setup.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | setup.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | from setuptools import setup
from setuptools import find_packages
from lagom.version import __version__
# Read content of README.md
with open('README.md', 'r') as f:
long_description = f.read()
setup(name='lagom',
version=__version__,
author='Xingdong Zuo',
author_email='zuoxingdong@hotmail.com',
description='lagom: A light PyTorch infrastructure to quickly prototype reinforcement learning algorithms.',
# Long description of README markdown, shows in Python Package Index
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/zuoxingdong/lagom',
# Install dependencies
install_requires=['numpy',
'scipy',
'pandas',
'matplotlib',
'seaborn',
'scikit-image',
'jupyterlab',
'gym',
'cma'],
tests_require=['pytest'],
# Only Python 3+
python_requires='>=3',
# List all lagom packages (folder with __init__.py), useful to distribute a release
packages=find_packages(),
# tell pip some metadata (e.g. Python version, OS etc.)
classifiers=['Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
) | 37.27907 | 114 | 0.5733 |
97e804ef9c7c1c0635aab0477304f63f5daafe96 | 2,046 | py | Python | plugins_inactive/plugin_wikipediasearch.py | ademaro/Irene-Voice-Assistant | 34a71892258d993dc227e6653281444f091e86ae | [
"MIT"
] | null | null | null | plugins_inactive/plugin_wikipediasearch.py | ademaro/Irene-Voice-Assistant | 34a71892258d993dc227e6653281444f091e86ae | [
"MIT"
] | null | null | null | plugins_inactive/plugin_wikipediasearch.py | ademaro/Irene-Voice-Assistant | 34a71892258d993dc227e6653281444f091e86ae | [
"MIT"
] | null | null | null | import os
import time
import pyautogui
# from voiceassmain import play_voice_assistant_speech
from vacore import VACore
# based on EnjiRouz realization https://github.com/EnjiRouz/Voice-Assistant-App/blob/master/app.py
#
| 33.540984 | 118 | 0.655425 |
97e922fd511e37dd6ba6caa81bbded4c80d22dc7 | 316 | py | Python | todo/management/urls.py | Sanguet/todo-challenge | 8eabc02081e7ce6b33408558d4a4a39edee3944c | [
"MIT"
] | null | null | null | todo/management/urls.py | Sanguet/todo-challenge | 8eabc02081e7ce6b33408558d4a4a39edee3944c | [
"MIT"
] | null | null | null | todo/management/urls.py | Sanguet/todo-challenge | 8eabc02081e7ce6b33408558d4a4a39edee3944c | [
"MIT"
] | null | null | null | # Django
from django.urls import include, path
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from .views import tasks as task_views
router = DefaultRouter()
router.register(r'tasks', task_views.TaskViewSet, basename='task')
urlpatterns = [
path('', include(router.urls))
]
| 19.75 | 66 | 0.762658 |
97e9830408b6514215e19bea044829eb96f15f7c | 7,936 | py | Python | dnd5e/items.py | MegophrysNasuta/dnd5e | 431c0c219052ddf5c62a500bd14f17fab3574648 | [
"MIT"
] | null | null | null | dnd5e/items.py | MegophrysNasuta/dnd5e | 431c0c219052ddf5c62a500bd14f17fab3574648 | [
"MIT"
] | null | null | null | dnd5e/items.py | MegophrysNasuta/dnd5e | 431c0c219052ddf5c62a500bd14f17fab3574648 | [
"MIT"
] | null | null | null | import enum
from typing import Any, List, Optional, Tuple
RangeIncrement = Tuple[int, int]
def __str__(self):
str_rep = ['<%s: %s']
str_rep_contents = [self.__class__.__name__, self.name]
if self.has_range:
str_rep.append(' %s')
str_rep_contents.append(self.range_increment)
str_rep.append(' %s (%s)>')
str_rep_contents.extend([self.damage, self.damage_type.value])
return ''.join(str_rep) % tuple(str_rep_contents)
class SimpleWeapon(Weapon):
pass
class MartialWeapon(Weapon):
pass
| 33.344538 | 79 | 0.606981 |
97eb5eb44132b5d87929c59ff9f174afa27e84b4 | 7,094 | py | Python | dbd/cli/dbdcli.py | AlexRogalskiy/dbd | ac2c6fb673861321b23fbf2a57d9e39fa5cb5352 | [
"BSD-3-Clause"
] | 33 | 2022-01-09T09:32:17.000Z | 2022-03-05T18:52:11.000Z | dbd/cli/dbdcli.py | zsvoboda/dbd | ac2c6fb673861321b23fbf2a57d9e39fa5cb5352 | [
"BSD-3-Clause"
] | 2 | 2022-02-16T19:14:13.000Z | 2022-02-16T19:14:34.000Z | dbd/cli/dbdcli.py | zsvoboda/dbd | ac2c6fb673861321b23fbf2a57d9e39fa5cb5352 | [
"BSD-3-Clause"
] | null | null | null | import importlib.metadata
import logging
import os
import shutil
from typing import Dict, Any, List
import click
from sqlalchemy import text
from dbd.log.dbd_exception import DbdException
from dbd.config.dbd_profile import DbdProfile
from dbd.config.dbd_project import DbdProject
from dbd.executors.model_executor import ModelExecutor, InvalidModelException
from dbd.log.dbd_logger import setup_logging
log = logging.getLogger(__name__)
this_script_dir = os.path.dirname(__file__)
def print_version():
"""
Prints DBD version
"""
click.echo(f"You're using DBD version {importlib.metadata.version('dbd')}.")
# noinspection PyUnusedLocal
def __echo_validation_errors(validation_errors: Dict[str, Any]):
"""
Top level function for printing validation errors
:param validation_errors:
:return:
"""
__echo_validation_level(validation_errors)
class InvalidValidationErrorStructure(DbdException):
pass
def __echo_validation_level(level_validation_errors: Dict[str, Any], indent: int = 0):
"""
Echo validation error line (called recursively on all Dict values)
:param level_validation_errors: Dict with validation result
:param indent: indentation level
"""
for (k, v) in level_validation_errors.items():
if isinstance(v, str):
msg = f"{k}:{v}"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
elif isinstance(v, Dict):
msg = f"{k}:"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
__echo_validation_level(v, indent + 1)
elif isinstance(v, List):
msg = f"{k}:{str(v)}"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
else:
raise InvalidValidationErrorStructure(f"Invalid validation result: '{v}' isn't supported type.")
| 34.436893 | 116 | 0.623203 |
97eb87e8a632182f8518b1d3afd5e6530ac981a5 | 9,901 | py | Python | bestiary/serializers.py | Itori/swarfarm | 7192e2d8bca093b4254023bbec42b6a2b1887547 | [
"Apache-2.0"
] | 66 | 2017-09-11T04:46:00.000Z | 2021-03-13T00:02:42.000Z | bestiary/serializers.py | Itori/swarfarm | 7192e2d8bca093b4254023bbec42b6a2b1887547 | [
"Apache-2.0"
] | 133 | 2017-09-24T21:28:59.000Z | 2021-04-02T10:35:31.000Z | bestiary/serializers.py | Itori/swarfarm | 7192e2d8bca093b4254023bbec42b6a2b1887547 | [
"Apache-2.0"
] | 28 | 2017-08-30T19:04:32.000Z | 2020-11-16T04:09:00.000Z | from rest_framework import serializers
from bestiary import models
| 31.233438 | 122 | 0.620644 |
97ec6821afa2d1990aea0fcfa7884edc560b6cc4 | 56,761 | py | Python | Code/ConvNetAbel.py | abel-gr/AbelNN | e9f54a6a3844a504ff82e4bae97d43064834e90a | [
"MIT"
] | 1 | 2021-11-05T16:01:15.000Z | 2021-11-05T16:01:15.000Z | Code/ConvNetAbel.py | abel-gr/AbelNN | e9f54a6a3844a504ff82e4bae97d43064834e90a | [
"MIT"
] | null | null | null | Code/ConvNetAbel.py | abel-gr/AbelNN | e9f54a6a3844a504ff82e4bae97d43064834e90a | [
"MIT"
] | null | null | null | # Copyright Abel Garcia. All Rights Reserved.
# https://github.com/abel-gr/AbelNN
import numpy as np
import copy as copy
import random
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import text
import math
| 37.590066 | 233 | 0.448847 |
97ef61709a2ecbbabd5edf5fdc1f79875ed56c5b | 1,365 | py | Python | trading_ig/config.py | schwankner/ig-markets-api-python-library | 7a6add860e0abefcc252da232524e8ad0be86692 | [
"BSD-3-Clause"
] | 1 | 2021-03-01T09:51:59.000Z | 2021-03-01T09:51:59.000Z | trading_ig/config.py | schwankner/ig-markets-api-python-library | 7a6add860e0abefcc252da232524e8ad0be86692 | [
"BSD-3-Clause"
] | null | null | null | trading_ig/config.py | schwankner/ig-markets-api-python-library | 7a6add860e0abefcc252da232524e8ad0be86692 | [
"BSD-3-Clause"
] | 1 | 2022-01-04T21:17:10.000Z | 2022-01-04T21:17:10.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import logging
ENV_VAR_ROOT = "IG_SERVICE"
CONFIG_FILE_NAME = "trading_ig_config.py"
logger = logging.getLogger(__name__)
try:
from trading_ig_config import config
logger.info("import config from %s" % CONFIG_FILE_NAME)
except Exception:
logger.warning("can't import config from config file")
try:
config = ConfigEnvVar(ENV_VAR_ROOT)
logger.info("import config from environment variables '%s_...'"
% ENV_VAR_ROOT)
except Exception:
logger.warning("can't import config from environment variables")
raise("""Can't import config - you might create a '%s' filename or use
environment variables such as '%s_...'""" % (CONFIG_FILE_NAME, ENV_VAR_ROOT))
| 29.673913 | 78 | 0.650549 |
97ef67beb062520b730797c508d9465eec545451 | 6,434 | py | Python | train.py | jmlipman/MedicDeepLabv3Plus | 4eb5c6c21765db24502d434d01c0ee9b9fd66b27 | [
"MIT"
] | 1 | 2021-11-23T16:41:24.000Z | 2021-11-23T16:41:24.000Z | train.py | jmlipman/MedicDeepLabv3Plus | 4eb5c6c21765db24502d434d01c0ee9b9fd66b27 | [
"MIT"
] | null | null | null | train.py | jmlipman/MedicDeepLabv3Plus | 4eb5c6c21765db24502d434d01c0ee9b9fd66b27 | [
"MIT"
] | 1 | 2021-09-08T02:02:11.000Z | 2021-09-08T02:02:11.000Z | # Example usage:
# python train.py --device cuda --epochs 10 --input /home/miguelv/data/in/train/ --output /home/miguelv/data/out/delete/test/25/
import os, time, torch, json
import numpy as np
import nibabel as nib
from lib.utils import *
from lib.losses import Loss
from torch.utils.data import DataLoader
from datetime import datetime
from lib.models.MedicDeepLabv3Plus import MedicDeepLabv3Plus
from lib.data.DataWrapper import DataWrapper
def get_arguments():
"""Gets (and parses) the arguments from the command line.
Args:
`args`: If None, it takes the arguments from the command line.
Else, it will parse `args` (used for testing with sacred)
"""
parser = argparse.ArgumentParser()
# Data
parser.add_argument("--input", type=str, required=True,
help="Directory with the data for optimizing MedicDeepLabv3+")
# Training
parser.add_argument("--epochs", type=int, default=300,
help="Epochs. If 0: only evaluate")
parser.add_argument("--batch_size", type=int, default=1,
help="Batch size")
parser.add_argument("--lr", type=float, default="1e-4",
help="Learning rate")
parser.add_argument("--wd", type=float, default="0",
help="Weight decay")
parser.add_argument("--filters", type=int, default=32,
help="Number of filters (fewer filters -> lower GPU requirements)")
# Validation
parser.add_argument("--validation", type=str, default="",
help="Directory with the data for validation")
parser.add_argument("--val_interval", type=int, default=1,
help="After how many epochs data is validated")
parser.add_argument("--val_metrics", type=str, default="dice",
help="List of metrics to measure during validation")
# Other
parser.add_argument("--output", type=str, required=True,
help="Output directory (if it doesn't exist, it will create it)")
parser.add_argument("--gpu", type=int, default=0, dest="device",
help="GPU Device. Write -1 if no GPU is available")
parser.add_argument("--model_state", type=str, default="",
help="File that contains the saved parameters of the model")
parsed = parser.parse_args()
# --input
if not os.path.isdir(parsed.input):
raise Exception("The input folder `" + parsed.input + "` does not exist")
# --output
if os.path.exists(parsed.output):
if os.path.isfile(parsed.output):
raise Exception("The provided path for the --output `" + parsed.output + "` corresponds to an existing file. Provide a non-existing path or a folder.")
elif os.path.isdir(parsed.output):
files = [int(f) for f in os.listdir(parsed.output) if f.isdigit()]
parsed.output = os.path.join(parsed.output, str(len(files)+1), "")
os.makedirs(parsed.output)
else:
raise Exception("The provided path for the --output `" + parsed.output + "` is invalid. Provide a non-existing path or a folder.")
else:
parsed.output = os.path.join(parsed.output, "1", "")
os.makedirs(parsed.output)
# --validation
if parsed.validation != "" and not os.path.isdir(parsed.validation):
raise Exception("The validaiton folder `" + parsed.validation + "` does not exist")
if parsed.validation == "":
print("> Note: No validation data was provided, so validation won't be done during MedicDeepLabv3+ optimization")
# --gpu
if parsed.device >= torch.cuda.device_count():
if torch.cuda.device_count() == 0:
print("> No available GPUs. Add --gpu -1 to not use GPU. NOTE: This may take FOREVER to run.")
else:
print("> Available GPUs:")
for i in range(torch.cuda.device_count()):
print(" > GPU #"+str(i)+" ("+torch.cuda.get_device_name(i)+")")
raise Exception("The GPU #"+str(parsed.device)+" does not exist. Check available GPUs.")
if parsed.device > -1:
parsed.device = "cuda:"+str(parsed.device)
else:
parsed.device = "cpu"
# Metrics to be evaluated during evaluation
allowed_metrics = ["dice", "HD", "compactness"]
# Metrics to be evaluated during validation
parsed.val_metrics = parsed.val_metrics.split(",")
for m in parsed.val_metrics:
if not m in allowed_metrics:
raise Exception("Wrong --val_metrics: "+str(m)+". Only allowed: "+str(allowed_metrics))
return parsed
if __name__ == "__main__":
# Get command-line arguments
args = get_arguments()
# Train MedicDeepLabv3+
main(args)
| 38.526946 | 164 | 0.63553 |
97efd3b3f7f5f7bf285460221c0433426399a499 | 2,053 | py | Python | src/graph_util.py | oonat/inverse-distance-weighted-trust-based-recommender | 3f559f3e7dbc565da373f6297362ddf307b2d0ec | [
"BSD-3-Clause"
] | null | null | null | src/graph_util.py | oonat/inverse-distance-weighted-trust-based-recommender | 3f559f3e7dbc565da373f6297362ddf307b2d0ec | [
"BSD-3-Clause"
] | null | null | null | src/graph_util.py | oonat/inverse-distance-weighted-trust-based-recommender | 3f559f3e7dbc565da373f6297362ddf307b2d0ec | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from toml_parser import Parser
from scipy.sparse.csgraph import dijkstra, csgraph_from_dense
from sklearn.metrics.pairwise import nan_euclidean_distances
from math import sqrt
| 26.320513 | 113 | 0.754506 |
97efd442d5baa89669000d346b5c499ecd9f4c0b | 203 | py | Python | qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py | mike0164/scikit-rf | 0af25754b097ee24089ea7e0eacde426a51df563 | [
"BSD-3-Clause"
] | 379 | 2015-01-25T12:19:19.000Z | 2022-03-29T14:01:07.000Z | qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py | mike0164/scikit-rf | 0af25754b097ee24089ea7e0eacde426a51df563 | [
"BSD-3-Clause"
] | 456 | 2015-01-06T19:15:55.000Z | 2022-03-31T06:42:57.000Z | qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py | mike0164/scikit-rf | 0af25754b097ee24089ea7e0eacde426a51df563 | [
"BSD-3-Clause"
] | 211 | 2015-01-06T17:14:06.000Z | 2022-03-31T01:36:00.000Z | from skrf.vi.vna import rs_zva
| 20.3 | 44 | 0.665025 |
97efe95631dbd9f43d8fc44a21511eb903a34116 | 1,507 | py | Python | rules/taxonomic_classification/utils.py | dahak-metagenomics/taco-taxonomic-classification | 854cae4f1b2427746a1faa6a0e0aefbfb11c5523 | [
"BSD-3-Clause"
] | null | null | null | rules/taxonomic_classification/utils.py | dahak-metagenomics/taco-taxonomic-classification | 854cae4f1b2427746a1faa6a0e0aefbfb11c5523 | [
"BSD-3-Clause"
] | null | null | null | rules/taxonomic_classification/utils.py | dahak-metagenomics/taco-taxonomic-classification | 854cae4f1b2427746a1faa6a0e0aefbfb11c5523 | [
"BSD-3-Clause"
] | null | null | null | def container_image_is_external(biocontainers, app):
"""
Return a boolean: is this container going to be run
using an external URL (quay.io/biocontainers),
or is it going to use a local, named Docker image?
"""
d = biocontainers[app]
if (('use_local' in d) and (d['use_local'] is True)):
# This container does not use an external url
return False
else:
# This container uses a quay.io url
return True
def container_image_name(biocontainers, app):
"""
Get the name of a container image for app,
using params dictionary biocontainers.
Verification:
- Check that the user provides 'local' if 'use_local' is True
- Check that the user provides both 'quayurl' and 'version'
"""
if container_image_is_external(biocontainers,app):
try:
qurl = biocontainers[k]['quayurl']
qvers = biocontainers[k]['version']
quayurls.append(qurl + ":" + qvers)
return quayurls
except KeyError:
err = "Error: quay.io URL for %s biocontainer "%(k)
err += "could not be determined"
raise Exception(err)
else:
try:
return biocontainers[app]['local']
except KeyError:
err = "Error: the parameters provided specify a local "
err += "container image should be used for %s, but none "%(app)
err += "was specified using the 'local' key."
raise Exception(err)
| 33.488889 | 75 | 0.606503 |
97f060a2b95bbc614a022bf67e45afe532ebb45d | 37,531 | py | Python | Contents/Libraries/Shared/guessit/rules/properties/episodes.py | slvxstar/Kinopoisk.bundle | dcb96c870c3a96fcf33b8d13d79d47f0a7cbf5fb | [
"MIT"
] | 7 | 2021-02-11T08:03:00.000Z | 2022-01-23T22:33:32.000Z | Contents/Libraries/Shared/guessit/rules/properties/episodes.py | slvxstar/Kinopoisk.bundle | dcb96c870c3a96fcf33b8d13d79d47f0a7cbf5fb | [
"MIT"
] | null | null | null | Contents/Libraries/Shared/guessit/rules/properties/episodes.py | slvxstar/Kinopoisk.bundle | dcb96c870c3a96fcf33b8d13d79d47f0a7cbf5fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
episode, season, disc, episode_count, season_count and episode_details properties
"""
import copy
from collections import defaultdict
from rebulk import Rebulk, RemoveMatch, Rule, AppendMatch, RenameMatch
from rebulk.match import Match
from rebulk.remodule import re
from rebulk.utils import is_iterable
from .title import TitleFromPosition
from ..common import dash, alt_dash, seps, seps_no_fs
from ..common.formatters import strip
from ..common.numeral import numeral, parse_numeral
from ..common.pattern import is_disabled
from ..common.validators import compose, seps_surround, seps_before, int_coercable
from ...reutils import build_or_pattern
def episodes(config):
"""
Builder for rebulk object.
:param config: rule configuration
:type config: dict
:return: Created Rebulk object
:rtype: Rebulk
"""
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
def is_season_episode_disabled(context):
"""Whether season and episode rules should be enabled."""
return is_disabled(context, 'episode') or is_disabled(context, 'season')
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'])
episode_max_range = config['episode_max_range']
season_max_range = config['season_max_range']
def episodes_season_chain_breaker(matches):
"""
Break chains if there's more than 100 offset between two neighbor values.
:param matches:
:type matches:
:return:
:rtype:
"""
eps = matches.named('episode')
if len(eps) > 1 and abs(eps[-1].value - eps[-2].value) > episode_max_range:
return True
seasons = matches.named('season')
if len(seasons) > 1 and abs(seasons[-1].value - seasons[-2].value) > season_max_range:
return True
return False
rebulk.chain_defaults(chain_breaker=episodes_season_chain_breaker)
def season_episode_conflict_solver(match, other):
"""
Conflict solver for episode/season patterns
:param match:
:param other:
:return:
"""
if match.name != other.name:
if match.name == 'episode' and other.name == 'year':
return match
if match.name in ('season', 'episode'):
if other.name in ('video_codec', 'audio_codec', 'container', 'date'):
return match
if (other.name == 'audio_channels' and 'weak-audio_channels' not in other.tags
and not match.initiator.children.named(match.name + 'Marker')) or (
other.name == 'screen_size' and not int_coercable(other.raw)):
return match
if other.name in ('season', 'episode') and match.initiator != other.initiator:
if (match.initiator.name in ('weak_episode', 'weak_duplicate')
and other.initiator.name in ('weak_episode', 'weak_duplicate')):
return '__default__'
for current in (match, other):
if 'weak-episode' in current.tags or 'x' in current.initiator.raw.lower():
return current
return '__default__'
season_words = config['season_words']
episode_words = config['episode_words']
of_words = config['of_words']
all_words = config['all_words']
season_markers = config['season_markers']
season_ep_markers = config['season_ep_markers']
disc_markers = config['disc_markers']
episode_markers = config['episode_markers']
range_separators = config['range_separators']
weak_discrete_separators = list(sep for sep in seps_no_fs if sep not in range_separators)
strong_discrete_separators = config['discrete_separators']
discrete_separators = strong_discrete_separators + weak_discrete_separators
max_range_gap = config['max_range_gap']
def ordering_validator(match):
"""
Validator for season list. They should be in natural order to be validated.
episode/season separated by a weak discrete separator should be consecutive, unless a strong discrete separator
or a range separator is present in the chain (1.3&5 is valid, but 1.3-5 is not valid and 1.3.5 is not valid)
"""
values = match.children.to_dict()
if 'season' in values and is_iterable(values['season']):
# Season numbers must be in natural order to be validated.
if not list(sorted(values['season'])) == values['season']:
return False
if 'episode' in values and is_iterable(values['episode']):
# Season numbers must be in natural order to be validated.
if not list(sorted(values['episode'])) == values['episode']:
return False
def is_consecutive(property_name):
"""
Check if the property season or episode has valid consecutive values.
:param property_name:
:type property_name:
:return:
:rtype:
"""
previous_match = None
valid = True
for current_match in match.children.named(property_name):
if previous_match:
match.children.previous(current_match,
lambda m: m.name == property_name + 'Separator')
separator = match.children.previous(current_match,
lambda m: m.name == property_name + 'Separator', 0)
if separator.raw not in range_separators and separator.raw in weak_discrete_separators:
if not 0 < current_match.value - previous_match.value <= max_range_gap + 1:
valid = False
if separator.raw in strong_discrete_separators:
valid = True
break
previous_match = current_match
return valid
return is_consecutive('episode') and is_consecutive('season')
# S01E02, 01x02, S01S02S03
rebulk.chain(formatter={'season': int, 'episode': int},
tags=['SxxExx'],
abbreviations=[alt_dash],
children=True,
private_parent=True,
validate_all=True,
validator={'__parent__': ordering_validator},
conflict_solver=season_episode_conflict_solver,
disabled=is_season_episode_disabled) \
.regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)@?' +
build_or_pattern(episode_markers + disc_markers, name='episodeMarker') + r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}).repeater('+') \
.regex(build_or_pattern(episode_markers + disc_markers + discrete_separators + range_separators,
name='episodeSeparator',
escape=True) +
r'(?P<episode>\d+)').repeater('*') \
.chain() \
.regex(r'(?P<season>\d+)@?' +
build_or_pattern(season_ep_markers, name='episodeMarker') +
r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.chain() \
.regex(r'(?P<season>\d+)@?' +
build_or_pattern(season_ep_markers, name='episodeMarker') +
r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.regex(build_or_pattern(season_ep_markers + discrete_separators + range_separators,
name='episodeSeparator',
escape=True) +
r'(?P<episode>\d+)').repeater('*') \
.chain() \
.regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.regex(build_or_pattern(season_markers + discrete_separators + range_separators,
name='seasonSeparator',
escape=True) +
r'(?P<season>\d+)').repeater('*')
# episode_details property
for episode_detail in ('Special', 'Pilot', 'Unaired', 'Final'):
rebulk.string(episode_detail, value=episode_detail, name='episode_details',
disabled=lambda context: is_disabled(context, 'episode_details'))
def validate_roman(match):
"""
Validate a roman match if surrounded by separators
:param match:
:type match:
:return:
:rtype:
"""
if int_coercable(match.raw):
return True
return seps_surround(match)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'],
validate_all=True, validator={'__parent__': seps_surround}, children=True, private_parent=True,
conflict_solver=season_episode_conflict_solver)
rebulk.chain(abbreviations=[alt_dash],
formatter={'season': parse_numeral, 'count': parse_numeral},
validator={'__parent__': compose(seps_surround, ordering_validator),
'season': validate_roman,
'count': validate_roman},
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'season')) \
.defaults(validator=None) \
.regex(build_or_pattern(season_words, name='seasonMarker') + '@?(?P<season>' + numeral + ')') \
.regex(r'' + build_or_pattern(of_words) + '@?(?P<count>' + numeral + ')').repeater('?') \
.regex(r'@?' + build_or_pattern(range_separators + discrete_separators + ['@'],
name='seasonSeparator', escape=True) +
r'@?(?P<season>\d+)').repeater('*')
rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>\d+)' +
r'(?:v(?P<version>\d+))?' +
r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4
abbreviations=[dash], formatter={'episode': int, 'version': int, 'count': int},
disabled=lambda context: context.get('type') == 'episode' or is_disabled(context, 'episode'))
rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>' + numeral + ')' +
r'(?:v(?P<version>\d+))?' +
r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4
abbreviations=[dash],
validator={'episode': validate_roman},
formatter={'episode': parse_numeral, 'version': int, 'count': int},
disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode'))
rebulk.regex(r'S?(?P<season>\d+)-?(?:xE|Ex|E|x)-?(?P<other>' + build_or_pattern(all_words) + ')',
tags=['SxxExx'],
abbreviations=[dash],
validator=None,
formatter={'season': int, 'other': lambda match: 'Complete'},
disabled=lambda context: is_disabled(context, 'season'))
# 12, 13
rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d{2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{2})').repeater('*')
# 012, 013
rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'0(?P<episode>\d{1,2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])0(?P<episode>\d{1,2})').repeater('*')
# 112, 113
rebulk.chain(tags=['weak-episode'],
formatter={'episode': int, 'version': int},
name='weak_episode',
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d{3,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{3,4})').repeater('*')
# 1, 2, 3
rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d)') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{1,2})').repeater('*')
# e112, e113, 1e18, 3e19
# TODO: Enhance rebulk for validator to be used globally (season_episode_validator)
rebulk.chain(formatter={'season': int, 'episode': int, 'version': int},
disabled=lambda context: is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<season>\d{1,2})?(?P<episodeMarker>e)(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('*')
# ep 112, ep113, ep112, ep113
rebulk.chain(abbreviations=[dash], formatter={'episode': int, 'version': int},
disabled=lambda context: is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'ep-?(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>ep|e|x|-)(?P<episode>\d{1,4})').repeater('*')
# cap 112, cap 112_114
rebulk.chain(abbreviations=[dash],
tags=['see-pattern'],
formatter={'season': int, 'episode': int},
disabled=is_season_episode_disabled) \
.defaults(validator=None) \
.regex(r'(?P<seasonMarker>cap)-?(?P<season>\d{1,2})(?P<episode>\d{2})') \
.regex(r'(?P<episodeSeparator>-)(?P<season>\d{1,2})(?P<episode>\d{2})').repeater('?')
# 102, 0102
rebulk.chain(tags=['weak-episode', 'weak-duplicate'],
formatter={'season': int, 'episode': int, 'version': int},
name='weak_duplicate',
conflict_solver=season_episode_conflict_solver,
disabled=lambda context: (context.get('episode_prefer_number', False) or
context.get('type') == 'movie') or is_season_episode_disabled(context)) \
.defaults(validator=None) \
.regex(r'(?P<season>\d{1,2})(?P<episode>\d{2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>x|-)(?P<episode>\d{2})').repeater('*')
rebulk.regex(r'v(?P<version>\d+)', children=True, private_parent=True, formatter=int,
disabled=lambda context: is_disabled(context, 'version'))
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'])
# TODO: List of words
# detached of X count (season/episode)
rebulk.regex(r'(?P<episode>\d+)-?' + build_or_pattern(of_words) +
r'-?(?P<count>\d+)-?' + build_or_pattern(episode_words) + '?',
abbreviations=[dash], children=True, private_parent=True, formatter=int,
disabled=lambda context: is_disabled(context, 'episode'))
rebulk.regex(r'Minisodes?', name='episode_format', value="Minisode",
disabled=lambda context: is_disabled(context, 'episode_format'))
rebulk.rules(WeakConflictSolver, RemoveInvalidSeason, RemoveInvalidEpisode,
SeePatternRange(range_separators + ['_']),
EpisodeNumberSeparatorRange(range_separators),
SeasonSeparatorRange(range_separators), RemoveWeakIfMovie, RemoveWeakIfSxxExx,
RemoveWeakDuplicate, EpisodeDetailValidator, RemoveDetachedEpisodeNumber, VersionValidator,
RemoveWeak, RenameToAbsoluteEpisode, CountValidator, EpisodeSingleDigitValidator, RenameToDiscMatch)
return rebulk
| 43.640698 | 119 | 0.588154 |
97f09a874f39695917154d611858caf14ea0be1a | 76,767 | py | Python | cwinpy/heterodyne/heterodyne.py | nigeltrc72/cwinpy | f90cf46e20c4d5abd09dc0540d4694ca6d5d9b42 | [
"MIT"
] | 5 | 2021-02-25T13:04:43.000Z | 2022-01-15T22:37:33.000Z | cwinpy/heterodyne/heterodyne.py | nigeltrc72/cwinpy | f90cf46e20c4d5abd09dc0540d4694ca6d5d9b42 | [
"MIT"
] | 4 | 2021-02-24T12:17:50.000Z | 2021-12-09T16:41:33.000Z | cwinpy/heterodyne/heterodyne.py | nigeltrc72/cwinpy | f90cf46e20c4d5abd09dc0540d4694ca6d5d9b42 | [
"MIT"
] | 1 | 2021-02-24T11:40:32.000Z | 2021-02-24T11:40:32.000Z | """
Run heterodyne pre-processing of gravitational-wave data.
"""
import ast
import configparser
import copy
import os
import shutil
import signal
import sys
import tempfile
from argparse import ArgumentParser
import cwinpy
import numpy as np
from bilby_pipe.bilbyargparser import BilbyArgParser
from bilby_pipe.job_creation.dag import Dag
from bilby_pipe.utils import (
BilbyPipeError,
check_directory_exists_and_if_not_mkdir,
parse_args,
)
from configargparse import ArgumentError
from ..condor.hetnodes import HeterodyneInput, HeterodyneNode, MergeHeterodyneNode
from ..data import HeterodynedData
from ..info import (
ANALYSIS_SEGMENTS,
CVMFS_GWOSC_DATA_SERVER,
CVMFS_GWOSC_DATA_TYPES,
CVMFS_GWOSC_FRAME_CHANNELS,
HW_INJ,
HW_INJ_RUNTIMES,
HW_INJ_SEGMENTS,
RUNTIMES,
)
from ..parfile import PulsarParameters
from ..utils import (
LAL_BINARY_MODELS,
LAL_EPHEMERIS_TYPES,
check_for_tempo2,
initialise_ephemeris,
sighandler,
)
from .base import Heterodyne, generate_segments, remote_frame_cache
def create_heterodyne_parser():
"""
Create the argument parser.
"""
description = """\
A script to heterodyne raw gravitational-wave strain data based on the \
expected evolution of the gravitational-wave signal from a set of pulsars."""
parser = BilbyArgParser(
prog=sys.argv[0],
description=description,
ignore_unknown_config_file_keys=False,
allow_abbrev=False,
)
parser.add("--config", type=str, is_config_file=True, help="Configuration ini file")
parser.add(
"--version",
action="version",
version="%(prog)s {version}".format(version=cwinpy.__version__),
)
parser.add(
"--periodic-restart-time",
default=14400,
type=int,
help=(
"Time after which the job will be self-evicted with code 130. "
"After this, condor will restart the job. Default is 14400s. "
"This is used to decrease the chance of HTCondor hard evictions."
),
)
parser.add(
"--overwrite",
action="store_true",
default=False,
help=(
"Set this flag to make sure any previously generated heterodyned "
'files are overwritten. By default the analysis will "resume" '
"from where it left off (by checking whether output files, as set "
'using "--output" and "--label" arguments, already exist), such '
"as after forced Condor eviction for checkpointing purposes. "
"Therefore, this flag is needs to be explicitly given (the "
"default is False) if not wanting to use resume and overwrite "
"existing files."
),
)
dataparser = parser.add_argument_group("Data inputs")
dataparser.add(
"--starttime",
required=True,
type=int,
help=("The start time of the data to be heterodyned in GPS seconds."),
)
dataparser.add(
"--endtime",
required=True,
type=int,
help=("The end time of the data to be heterodyned in GPS seconds."),
)
dataparser.add(
"--stride",
default=3600,
type=int,
help=(
"The number of seconds to stride through the data (i.e., this "
"number of seconds of data will be read in in one go), Defaults "
"to 3600."
),
)
dataparser.add(
"--detector",
required=True,
type=str,
help=("The name of the detectors for which the data is to be heterodyned."),
)
dataparser.add(
"--frametype",
type=str,
help=(
'The "frame type" name of the data to be heterodyned. If this '
"is not given the correct data set will be attempted to be found "
"using the channel name."
),
)
dataparser.add(
"--channel",
required=True,
type=str,
help=(
'The "channel" within the gravitational-wave data file(s) '
'(either a GW frame ".gwf", or HDF5 file) containing the strain '
"data to be heterodyned. The channel name should contain the "
"detector name prefix as the first two characters followed by a "
'colon, e.g., "L1:GWOSC-4KHZ_R1_STRAIN"'
),
)
dataparser.add(
"--host",
type=str,
help=(
"The server name for finding the gravitational-wave data files. "
'Use "datafind.ligo.org:443" for open data available via CVMFS. '
"To use open data available from the GWOSC use "
'"https://www.gw-openscience.org".'
),
)
dataparser.add(
"--outputframecache",
type=str,
help=(
"If given this should give a file path to which a list of "
"gravitational-wave data file paths, as found by the code, will "
"be written. If not given then the file list will not be output."
),
)
dataparser.add(
"--appendframecache",
action="store_true",
default=False,
help=(
"If writing out the frame cache to a file, set this to True to "
"append to the file rather than overwriting. Default is False."
),
)
dataparser.add(
"--framecache",
help=(
"Provide a pregenerated cache of gravitational-wave files, either "
"as a single file, or a list of files. Alternatively, you can "
"supply a directory containing the files (which will be "
"searched recursively for gwf and then hdf5 files), which should "
'be used in conjunction with the "frametype" argument. If giving '
"a list, this should be in the form of a Python list, surrounded "
"by quotation marks, e.g., \"['file1.lcf','file2.lcf']\"."
),
)
dataparser.add(
"--heterodyneddata",
help=(
"A string, or dictionary of strings, containing the full file "
"path, or directory path, pointing the the location of "
"pre-heterodyned data. For a single pulsar a file path can be "
"given. For multiple pulsars a directory containing heterodyned "
"files (in HDF5 or txt format) can be given provided that within "
"it the file names contain the pulsar names as supplied in the "
'file input with "--pulsarfiles". Alternatively, a dictionary '
"can be supplied, keyed on the pulsar name, containing a single "
"file path or a directory path as above. If supplying a "
"directory, it can contain multiple heterodyned files for a each "
"pulsar and all will be used. If giving a dictionary it should be "
"surrounded by quotation marks."
),
)
segmentparser = parser.add_argument_group("Analysis segment inputs")
segmentparser.add(
"--segmentlist",
help=(
"Provide a list of data segment start and end times, as "
"list/tuple pairs in the list, or an ASCII text file containing "
"the segment start and end times in two columns. If a list, this "
"should be in the form of a Python list, surrounded by quotation "
'marks, e.g., "[(900000000,900086400),(900100000,900186400)]".'
),
)
segmentparser.add(
"--includeflags",
help=(
"If not providing a segment list then give a string, or list of "
"strings, giving the data DQ flags that will be used to generate "
"a segment list. Lists should be surrounded by quotation marks, "
"e.g., \"['L1:DMT-ANALYSIS_READY:1']\"."
),
)
segmentparser.add(
"--excludeflags",
help=(
"A string, or list of strings, giving the data DQ flags to "
"when generating a segment list. Lists should be surrounded by "
"quotation marks."
),
)
segmentparser.add(
"--outputsegmentlist",
type=str,
help=(
"If generating a segment list it will be output to the file "
"specified by this argument."
),
)
segmentparser.add(
"--appendsegmentlist",
action="store_true",
default=False,
help=(
"If generating a segment list set this to True to append to the "
'file specified by "--outputsegmentlist" rather than '
"overwriting. Default is False."
),
)
segmentparser.add("--segmentserver", type=str, help=("The segment database URL."))
pulsarparser = parser.add_argument_group("Pulsar inputs")
pulsarparser.add(
"--pulsarfiles",
action="append",
help=(
"This specifies the pulsars for which to heterodyne the data. It "
"can be either i) a string giving the path to an individual "
"pulsar Tempo(2)-style parameter file, ii) a string giving the "
"path to a directory containing multiple Tempo(2)-style parameter "
"files (the path will be recursively searched for any file with "
'the extension ".par"), iii) a list of paths to individual '
"pulsar parameter files, iv) a dictionary containing paths to "
"individual pulsars parameter files keyed to their names. If "
"instead, pulsar names are given rather than parameter files it "
"will attempt to extract an ephemeris for those pulsars from the "
"ATNF pulsar catalogue. If such ephemerides are available then "
"they will be used (notification will be given when this is "
"these cases). If providing a list or dictionary it should be "
"surrounded by quotation marks."
),
)
pulsarparser.add(
"--pulsars",
action="append",
help=(
"You can analyse only particular pulsars from those specified by "
'parameter files found through the "--pulsarfiles" argument by '
"passing a string, or list of strings, with particular pulsars "
"names to use."
),
)
outputparser = parser.add_argument_group("Data output inputs")
outputparser.add(
"--output",
help=(
"The base directory into which the heterodyned results will be "
"output. To specify explicit directory paths for individual "
"pulsars this can be a dictionary of directory paths keyed to the "
'pulsar name (in which case the "--label" argument will be used '
"to set the file name), or full file paths, which will be used in "
'place of the "--label" argument. If not given then the current'
"working directory will be used."
),
)
outputparser.add(
"--label",
help=(
"The output format for the heterodyned data files. These can be "
'format strings containing the keywords "psr" for the pulsar '
'name, "det" for the detector, "freqfactor" for the rotation '
'frequency scale factor used, "gpsstart" for the GPS start '
'time, and "gpsend" for the GPS end time. The extension should '
'be given as ".hdf", ".h5", or ".hdf5". E.g., the default '
'is "heterodyne_{psr}_{det}_{freqfactor}_{gpsstart}-{gpsend}.hdf".'
),
)
heterodyneparser = parser.add_argument_group("Heterodyne inputs")
heterodyneparser.add(
"--filterknee",
type=float,
help=(
"The knee frequency (Hz) of the low-pass filter applied after "
"heterodyning the data. This should only be given when "
"heterodying raw strain data and not if re-heterodyning processed "
"data. Default is 0.5 Hz."
),
)
heterodyneparser.add(
"--resamplerate",
type=float,
required=True,
help=(
"The rate in Hz at which to resample the data (via averaging) "
"after application of the heterodyne (and filter if applied)."
),
)
heterodyneparser.add(
"--freqfactor",
type=float,
help=(
"The factor applied to the pulsars rotational parameters when "
"defining the gravitational-wave phase evolution. For example, "
"the default value of 2 multiplies the phase evolution by 2 under "
"the assumption of a signal emitted from the l=m=2 quadrupole "
"mode of a rigidly rotating triaxial neutron star."
),
)
heterodyneparser.add(
"--crop",
type=int,
help=(
"The number of seconds to crop from the start and end of data "
"segments to remove filter impulse effects and issues prior to "
"lock-loss. Default is 60 seconds."
),
)
heterodyneparser.add(
"--includessb",
action="store_true",
default=False,
help=(
"Set this flag to include removing the modulation of the signal due to "
"Solar System motion and relativistic effects (e.g., Roemer, "
"Einstein, and Shapiro delay) during the heterodyne."
),
)
heterodyneparser.add(
"--includebsb",
action="store_true",
default=False,
help=(
"Set this flag to include removing the modulation of the signal "
"due to binary system motion and relativistic effects during the "
'heterodyne. To use this "--includessb" must also be set.'
),
)
heterodyneparser.add(
"--includeglitch",
action="store_true",
default=False,
help=(
"Set this flag to include removing the effects of the phase "
"evolution of any modelled pulsar glitches during the heterodyne."
),
)
heterodyneparser.add(
"--includefitwaves",
action="store_true",
default=False,
help=(
"Set this to True to include removing the phase evolution of a "
"series of sinusoids designed to model low-frequency timing noise "
"in the pulsar signal during the heterodyne."
),
)
heterodyneparser.add(
"--usetempo2",
action="store_true",
default=False,
help=(
"Set this to True to use Tempo2 (via libstempo) to calculate the "
"signal phase evolution. For this to be used v2.4.2 or greater of "
"libstempo must be installed. When using Tempo2 the "
'"--earthephemeris", "--sunephemeris" and "--timeephemeris" '
"arguments do not need to be supplied. This can only be used when "
"running the full heterodyne in one stage, but not for "
're-heterodyning previous data, as such all the "--include..." '
"arguments will be assumed to be True."
),
)
ephemerisparser = parser.add_argument_group("Solar system ephemeris inputs")
ephemerisparser.add(
"--earthephemeris",
help=(
'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing '
"to the location of a file containing that ephemeris for the "
"Earth. The dictionary must be supplied within quotation marks, "
"e.g., \"{'DE436':'earth_DE436.txt'}\". If a pulsar requires a "
"specific ephemeris that is not provided in this dictionary, then "
"the code will automatically attempt to find or download the "
"required file if available."
),
)
ephemerisparser.add(
"--sunephemeris",
help=(
'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing '
"to the location of a file containing that ephemeris for the "
"Sun. If a pulsar requires a specific ephemeris that is not "
"provided in this dictionary, then the code will automatically "
"attempt to find or download the required file if available."
),
)
ephemerisparser.add(
"--timeephemeris",
help=(
"A dictionary, keyed to time system name, which can be either "
'"TCB" or "TDB", pointing to the location of a file containing '
"that ephemeris for that time system. If a pulsar requires a "
"specific ephemeris that is not provided in this dictionary, then "
"the code will automatically attempt to find or download the "
"required file if available."
),
)
cfparser = parser.add_argument_group("Configuration inputs")
cfparser.add(
"--cwinpy-heterodyne-dag-config-file",
help=(
"A path to the cwinpy_heterodyne_dag configuration file can be "
"supplied if this was has been used to setup the heterodyne job."
),
)
return parser
def heterodyne(**kwargs):
"""
Run heterodyne within Python. See the
`class::~cwinpy.heterodyne.Heterodyne` class for the required arguments.
Returns
-------
het: `class::~cwinpy.heterodyne.Heterodyne`
The heterodyning class object.
"""
if "cli" in kwargs or "config" in kwargs:
if "cli" in kwargs:
kwargs.pop("cli")
# get command line arguments
parser = create_heterodyne_parser()
# parse config file or command line arguments
if "config" in kwargs:
cliargs = ["--config", kwargs["config"]]
else:
cliargs = sys.argv[1:]
try:
args, _ = parse_args(cliargs, parser)
except BilbyPipeError as e:
raise IOError("{}".format(e))
# convert args to a dictionary
hetkwargs = vars(args)
if "config" in kwargs:
# update with other keyword arguments
hetkwargs.update(kwargs)
else:
hetkwargs = kwargs
# check non-standard arguments that could be Python objects
nsattrs = [
"framecache",
"heterodyneddata",
"segmentlist",
"includeflags",
"excludeflags",
"pulsarfiles",
"pulsars",
"output",
"earthephemeris",
"sunephemeris",
"timeephemeris",
]
for attr in nsattrs:
value = hetkwargs.pop(attr, None)
if isinstance(value, str):
# check whether the value can be evaluated as a Python object
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
# if the value was a string within a string, e.g., '"[2.3]"',
# evaluate again just in case it contains a Python object!
if isinstance(value, str):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
hetkwargs[attr] = value
elif value is not None:
hetkwargs[attr] = value
# check if pulsarfiles is a single entry list containing a dictionary
if isinstance(hetkwargs["pulsarfiles"], list):
if len(hetkwargs["pulsarfiles"]) == 1:
try:
value = ast.literal_eval(hetkwargs["pulsarfiles"][0])
if isinstance(value, dict):
# switch to passing the dictionary
hetkwargs["pulsarfiles"] = value
except SyntaxError:
pass
signal.signal(signal.SIGALRM, handler=sighandler)
signal.alarm(hetkwargs.pop("periodic_restart_time", 14400))
# remove any None values
for key in hetkwargs.copy():
if hetkwargs[key] is None:
hetkwargs.pop(key)
# convert "overwrite" to "resume"
hetkwargs["resume"] = not hetkwargs.pop("overwrite", False)
# remove "config" from hetkwargs
if "config" in hetkwargs:
hetkwargs.pop("config")
# set up the run
het = Heterodyne(**hetkwargs)
# heterodyne the data
het.heterodyne()
return het
def heterodyne_cli(**kwargs): # pragma: no cover
"""
Entry point to ``cwinpy_heterodyne`` script. This just calls
:func:`cwinpy.heterodyne.heterodyne`, but does not return any objects.
"""
kwargs["cli"] = True # set to show use of CLI
_ = heterodyne(**kwargs)
def create_heterodyne_merge_parser():
"""
Create the argument parser for merging script.
"""
description = "A script to merge multiple heterodyned data files."
parser = BilbyArgParser(
prog=sys.argv[0],
description=description,
ignore_unknown_config_file_keys=False,
allow_abbrev=False,
)
parser.add("--config", type=str, is_config_file=True, help="Configuration ini file")
parser.add(
"--version",
action="version",
version="%(prog)s {version}".format(version=cwinpy.__version__),
)
parser.add(
"--heterodynedfiles",
action="append",
type=str,
help=("A path, or list of paths, to heterodyned data files to merge together."),
)
parser.add(
"--output",
type=str,
help=("The output file for the merged heterodyned data."),
)
parser.add(
"--overwrite",
action="store_true",
help=("Set if wanting to overwrite an existing merged file."),
)
parser.add(
"--remove",
action="store_true",
help=("Set if wanting to delete individual files being merged."),
)
return parser
def heterodyne_merge(**kwargs):
"""
Merge the output of multiple heterodynes for a specific pulsar.
Parameters
----------
heterodynedfiles: str, list
A string, or list of strings, giving the paths to heterodyned data
files to be read in and merged
output: str
The output file name to write the data to. If not given then the data
will not be output.
overwrite: bool
Set whether to overwrite an existing file. Defaults to False.
remove: bool
Set whether to remove the individual files that form the merged file.
Defaults to False.
Returns
-------
het: `class::~cwinpy.heterodyne.Heterodyne`
The merged heterodyning class object.
"""
if "cli" in kwargs:
# get command line arguments
parser = create_heterodyne_merge_parser()
cliargs = sys.argv[1:]
try:
args, _ = parse_args(cliargs, parser)
except BilbyPipeError as e:
raise IOError("{}".format(e))
# convert args to a dictionary
mergekwargs = vars(args)
else:
mergekwargs = kwargs
if "heterodynedfiles" not in mergekwargs:
raise ArgumentError("'heterodynedfiles' is a required argument")
heterodynedfiles = mergekwargs["heterodynedfiles"]
filelist = (
heterodynedfiles if isinstance(heterodynedfiles, list) else [heterodynedfiles]
)
filelist = [hf for hf in filelist if os.path.isfile(hf)]
if len(filelist) == 0:
raise ValueError("None of the heterodyned files given exists!")
# read in and merge all the files
het = HeterodynedData.read(filelist)
# write out the merged data file
if "output" in mergekwargs:
het.write(mergekwargs["output"], overwrite=mergekwargs.get("overwrite", False))
if mergekwargs.get("remove", False):
# remove the inidividual files
for hf in filelist:
os.remove(hf)
return het
def heterodyne_merge_cli(**kwargs): # pragma: no cover
"""
Entry point to ``cwinpy_heterodyne_merge`` script. This just calls
:func:`cwinpy.heterodyne.heterodyne_merge`, but does not return any
objects.
"""
kwargs["cli"] = True # set to show use of CLI
_ = heterodyne_merge(**kwargs)
def heterodyne_dag(**kwargs):
"""
Run heterodyne_dag within Python. This will create a `HTCondor <https://htcondor.readthedocs.io/>`_
DAG for running multiple ``cwinpy_heterodyne`` instances on a computer cluster. Optional
parameters that can be used instead of a configuration file (for "quick setup") are given in
the "Other parameters" section.
Parameters
----------
config: str
A configuration file, or :class:`configparser:ConfigParser` object,
for the analysis.
Other parameters
----------------
run: str
The name of an observing run for which open data exists, which will be
heterodyned, e.g., "O1".
detector: str, list
The detector, or list of detectors, for which the data will be
heterodyned. If not set then all detectors available for a given run
will be used.
hwinj: bool
Set this to True to analyse the continuous hardware injections for a
given run. No ``pulsar`` argument is required in this case.
samplerate: str:
Select the sample rate of the data to use. This can either be 4k or
16k for data sampled at 4096 or 16384 Hz, respectively. The default
is 4k, except if running on hardware injections for O1 or later, for
which 16k will be used due to being requred for the highest frequency
source. For the S5 and S6 runs only 4k data is avaialble from GWOSC,
so if 16k is chosen it will be ignored.
pulsar: str, list
The path to, or list of paths to, a Tempo(2)-style pulsar parameter
file(s), or directory containing multiple parameter files, to
heterodyne. If a pulsar name is given instead of a parameter file
then an attempt will be made to find the pulsar's ephemeris from the
ATNF pulsar catalogue, which will then be used.
osg: bool
Set this to True to run on the Open Science Grid rather than a local
computer cluster.
output: str,
The location for outputting the heterodyned data. By default the
current directory will be used. Within this directory, subdirectories
for each detector will be created.
joblength: int
The length of data (in seconds) into which to split the individual
analysis jobs. By default this is set to 86400, i.e., one day. If this
is set to 0, then the whole dataset is treated as a single job.
accounting_group_tag: str
For LVK users this sets the computing accounting group tag.
usetempo2: bool
Set this flag to use Tempo2 (if installed) for calculating the signal
phase evolution rather than the default LALSuite functions.
Returns
-------
dag:
An object containing a pycondor :class:`pycondor.Dagman` object.
"""
if "config" in kwargs:
configfile = kwargs.pop("config")
else: # pragma: no cover
parser = ArgumentParser(
description=(
"A script to create a HTCondor DAG to process GW strain data "
"by heterodyning it based on the expected phase evolution for "
"a selection of pulsars."
)
)
parser.add_argument(
"config",
nargs="?",
help=("The configuration file for the analysis"),
default=None,
)
optional = parser.add_argument_group(
"Quick setup arguments (this assumes CVMFS open data access)."
)
optional.add_argument(
"--run",
help=(
"Set an observing run name for which to heterodyne the data. "
"This can be one of {} for which open data exists".format(
list(RUNTIMES.keys())
)
),
)
optional.add_argument(
"--detector",
action="append",
help=(
"The detector for which the data will be heterodyned. This can "
"be used multiple times to specify multiple detectors. If not "
"set then all detectors available for a given run will be "
"used."
),
)
optional.add_argument(
"--hwinj",
action="store_true",
help=(
"Set this flag to analyse the continuous hardware injections "
"for a given run. No '--pulsar' arguments are required in "
"this case."
),
)
optional.add_argument(
"--samplerate",
help=(
"Select the sample rate of the data to use. This can either "
"be 4k or 16k for data sampled at 4096 or 16384 Hz, "
"respectively. The default is 4k, except if running on "
"hardware injections for O1 or later, for which 16k will be "
"used due to being requred for the highest frequency source. "
"For the S5 and S6 runs only 4k data is avaialble from GWOSC, "
"so if 16k is chosen it will be ignored."
),
default="4k",
)
optional.add_argument(
"--pulsar",
action="append",
help=(
"The path to a Tempo(2)-style pulsar parameter file, or "
"directory containing multiple parameter files, to "
"heterodyne. This can be used multiple times to specify "
"multiple pulsar inputs. If a pulsar name is given instead "
"of a parameter file then an attempt will be made to find the "
"pulsar's ephemeris from the ATNF pulsar catalogue, which "
"will then be used."
),
)
optional.add_argument(
"--osg",
action="store_true",
help=(
"Set this flag to run on the Open Science Grid rather than a "
"local computer cluster."
),
)
optional.add_argument(
"--output",
help=(
"The location for outputting the heterodyned data. By default "
"the current directory will be used. Within this directory, "
"subdirectories for each detector will be created."
),
default=os.getcwd(),
)
optional.add_argument(
"--joblength",
type=int,
help=(
"The length of data (in seconds) into which to split the "
"individual analysis jobs. By default this is set to 86400, "
"i.e., one day. If this is set to 0, then the whole dataset "
"is treated as a single job."
),
)
optional.add_argument(
"--accounting-group-tag",
dest="accgroup",
help=("For LVK users this sets the computing accounting group tag"),
)
optional.add_argument(
"--usetempo2",
action="store_true",
help=(
"Set this flag to use Tempo2 (if installed) for calculating "
"the signal phase evolution rather than the default LALSuite "
"functions."
),
)
args = parser.parse_args()
if args.config is not None:
configfile = args.config
else:
# use the "Quick setup" arguments
configfile = configparser.ConfigParser()
run = kwargs.get("run", args.run)
if run not in RUNTIMES:
raise ValueError(f"Requested run '{run}' is not available")
pulsars = []
if kwargs.get("hwinj", args.hwinj):
# use hardware injections for the run
runtimes = HW_INJ_RUNTIMES
segments = HW_INJ_SEGMENTS
pulsars.extend(HW_INJ[run]["hw_inj_files"])
# set sample rate to 16k, expect for S runs
srate = "16k" if run[0] == "O" else "4k"
else:
# use pulsars provided
runtimes = RUNTIMES
segments = ANALYSIS_SEGMENTS
pulsar = kwargs.get("pulsar", args.pulsar)
if pulsar is None:
raise ValueError("No pulsar parameter files have be provided")
pulsars.extend(pulsar if isinstance(list) else [pulsar])
# get sample rate
srate = (
"16k" if (args.samplerate[0:2] == "16" and run[0] == "O") else "4k"
)
detector = kwargs.get("detector", args.detector)
if args.detector is None:
detectors = list(runtimes[run].keys())
else:
detector = detector if isinstance(detector, list) else [detector]
detectors = [det for det in detector if det in runtimes[run]]
if len(detectors) == 0:
raise ValueError(
f"Provided detectors '{detector}' are not valid for the given run"
)
# create required settings
configfile["run"] = {}
configfile["run"]["basedir"] = kwargs.get("output", args.output)
configfile["heterodyne_dag"] = {}
configfile["heterodyne_dag"]["submitdag"] = "True"
if kwargs.get("osg", args.osg):
configfile["heterodyne_dag"]["osg"] = "True"
configfile["heterodyne_job"] = {}
configfile["heterodyne_job"]["getenv"] = "True"
if args.accgroup is not None:
configfile["heterodyne_job"]["accounting_group"] = kwargs.get(
"accounting_group_tag", args.accgroup
)
# add pulsars/pulsar ephemerides
configfile["ephemerides"] = {}
configfile["ephemerides"]["pulsarfiles"] = str(pulsars)
# add heterodyne settings
configfile["heterodyne"] = {}
configfile["heterodyne"]["detectors"] = str(detectors)
configfile["heterodyne"]["starttimes"] = str(
{det: runtimes[run][det][0] for det in detectors}
)
configfile["heterodyne"]["endtimes"] = str(
{det: runtimes[run][det][1] for det in detectors}
)
configfile["heterodyne"]["frametypes"] = str(
{det: CVMFS_GWOSC_DATA_TYPES[run][srate][det] for det in detectors}
)
configfile["heterodyne"]["channels"] = str(
{det: CVMFS_GWOSC_FRAME_CHANNELS[run][srate][det] for det in detectors}
)
configfile["heterodyne"]["host"] = CVMFS_GWOSC_DATA_SERVER
if args.hwinj:
configfile["heterodyne"]["includeflags"] = str(
{det: segments[run][det]["includesegments"] for det in detectors}
)
configfile["heterodyne"]["excludeflags"] = str(
{det: segments[run][det]["excludesegments"] for det in detectors}
)
else:
configfile["heterodyne"]["includeflags"] = str(
{det: segments[run][det] for det in detectors}
)
configfile["heterodyne"]["outputdir"] = str(
{
det: os.path.join(kwargs.get("output", args.output), det)
for det in detectors
}
)
configfile["heterodyne"]["overwrite"] = "False"
# set whether to use Tempo2 for phase evolution
if kwargs.get("usetempo2", args.usetempo2):
configfile["heterodyne"]["usetempo2"] = "True"
# split the analysis into on average day long chunks
if kwargs.get("joblength", args.joblength) is None:
configfile["heterodyne"]["joblength"] = "86400"
else:
configfile["heterodyne"]["joblength"] = str(
kwargs.get("joblength", args.joblength)
)
# merge the resulting files and remove individual files
configfile["merge"] = {}
configfile["merge"]["merge"] = "True"
configfile["merge"]["remove"] = "True"
configfile["merge"]["overwrite"] = "True"
if isinstance(configfile, configparser.ConfigParser):
config = configfile
else:
config = configparser.ConfigParser()
try:
config.read_file(open(configfile, "r"))
except Exception as e:
raise IOError(f"Problem reading configuration file '{configfile}'\n: {e}")
return HeterodyneDAGRunner(config, **kwargs)
def heterodyne_dag_cli(**kwargs): # pragma: no cover
"""
Entry point to the cwinpy_heterodyne_dag script. This just calls
:func:`cwinpy.heterodyne.heterodyne_dag`, but does not return any objects.
"""
_ = heterodyne_dag(**kwargs)
| 40.746815 | 116 | 0.524809 |
97f1c05811bbe3176ddd3d2e0d9d3415c269f3fe | 5,787 | py | Python | timpani/webserver/webhelpers.py | ollien/Timpani | 0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb | [
"MIT"
] | 3 | 2015-10-16T11:26:53.000Z | 2016-08-28T19:28:52.000Z | timpani/webserver/webhelpers.py | ollien/timpani | 0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb | [
"MIT"
] | 22 | 2015-09-14T23:00:07.000Z | 2016-07-22T08:39:39.000Z | timpani/webserver/webhelpers.py | ollien/timpani | 0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb | [
"MIT"
] | null | null | null | import flask
import functools
import bs4
import urllib.parse
from .. import auth
from .. import themes
from .. import settings
INVALID_PERMISSIONS_FLASH_MESSAGE = "Sorry, you don't have permission to view that page."
#Decorator which checks if a user logged in and capable of using the specified permissions.
#If redirectPage is equal to none,
#the target funciton MUST have the arguments authed and authMessage defined.
#Will return all information that is needed to render a post.
#Prevents fragmentation in various post display methods
#Renders the theme's template if the theme contains one
#Otherwise, it renders the default template
| 43.511278 | 140 | 0.644375 |
97f1ce8901d8660f5836035727b480380b3d1fc2 | 1,542 | py | Python | bot/plugins/keyboard/__init__.py | grahamtito/TelegramFiletoCloud | 63ac4a173102ee73615aa5bcf996e545746a1c27 | [
"Unlicense"
] | null | null | null | bot/plugins/keyboard/__init__.py | grahamtito/TelegramFiletoCloud | 63ac4a173102ee73615aa5bcf996e545746a1c27 | [
"Unlicense"
] | null | null | null | bot/plugins/keyboard/__init__.py | grahamtito/TelegramFiletoCloud | 63ac4a173102ee73615aa5bcf996e545746a1c27 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# This is bot coded by Abhijith N T and used for educational purposes only
# https://github.com/AbhijithNT
# Copyright ABHIJITH N T
# Thank you https://github.com/pyrogram/pyrogram
from pyrogram.types import (
InlineKeyboardMarkup,
InlineKeyboardButton
)
| 23.723077 | 74 | 0.485084 |
97f1fff136972b7db73eca847e9e3cb4870be823 | 4,022 | py | Python | django_storymarket/forms.py | jacobian/django-storymarket | ec43318ddb9964e67220f6fa9675389b637422ce | [
"BSD-3-Clause"
] | 1 | 2019-01-12T10:05:59.000Z | 2019-01-12T10:05:59.000Z | django_storymarket/forms.py | jacobian/django-storymarket | ec43318ddb9964e67220f6fa9675389b637422ce | [
"BSD-3-Clause"
] | null | null | null | django_storymarket/forms.py | jacobian/django-storymarket | ec43318ddb9964e67220f6fa9675389b637422ce | [
"BSD-3-Clause"
] | null | null | null | import logging
import operator
import storymarket
from django import forms
from django.core.cache import cache
from django.conf import settings
from .models import SyncedObject
# Timeout for choices cached from Storymarket. 5 minutes.
CHOICE_CACHE_TIMEOUT = 600
log = logging.getLogger('django_storymarket') | 43.717391 | 96 | 0.544008 |
97f201e4bc64fac90fde4b3a05b02b6bc4e482f8 | 5,773 | py | Python | revisum/snippet.py | medariox/revisum | e92afa047ec66ef80bf3f27e6be81b1505f7151e | [
"MIT"
] | null | null | null | revisum/snippet.py | medariox/revisum | e92afa047ec66ef80bf3f27e6be81b1505f7151e | [
"MIT"
] | null | null | null | revisum/snippet.py | medariox/revisum | e92afa047ec66ef80bf3f27e6be81b1505f7151e | [
"MIT"
] | null | null | null | import pickle
from collections import OrderedDict
from datetime import datetime
from .chunk import Chunk
from .review import Review
from .tokenizer import LineTokenizer
from .utils import norm_path
from .database.snippet import maybe_init, Snippet as DataSnippet
def _serialize_ids(self):
return pickle.dumps(self.chunk_ids, pickle.HIGHEST_PROTOCOL)
def exists(self):
repo_id = self.repo_id(self.snippet_id)
maybe_init(repo_id)
snippet = DataSnippet.get_or_none(snippet_id=self.snippet_id)
return bool(snippet)
def save(self):
repo_id = self.repo_id(self.snippet_id)
maybe_init(repo_id)
snippet = DataSnippet.get_or_none(snippet_id=self.snippet_id)
if snippet:
(DataSnippet
.update(snippet_id=self.snippet_id,
merged=self.merged,
last_mod=datetime.now(),
start=self.start,
length=self.length,
source=self.source_file,
target=self.target_file,
chunk_ids=self._serialize_ids())
.where(DataSnippet.snippet_id == self.snippet_id)
.execute())
else:
(DataSnippet
.create(snippet_id=self.snippet_id,
merged=self.merged,
last_mod=datetime.now(),
start=self.start,
length=self.length,
source=self.source_file,
target=self.target_file,
chunk_ids=self._serialize_ids()))
| 29.01005 | 77 | 0.580634 |
97f20ba0590c9d144a0c17683ec4a0a88ea21ea6 | 46 | py | Python | ainnovation_dcim/workflow/__init__.py | ltxwanzl/ainnovation_dcim | b065489e2aa69729c0fd5142cf75d8caa7788b31 | [
"Apache-2.0"
] | null | null | null | ainnovation_dcim/workflow/__init__.py | ltxwanzl/ainnovation_dcim | b065489e2aa69729c0fd5142cf75d8caa7788b31 | [
"Apache-2.0"
] | null | null | null | ainnovation_dcim/workflow/__init__.py | ltxwanzl/ainnovation_dcim | b065489e2aa69729c0fd5142cf75d8caa7788b31 | [
"Apache-2.0"
] | null | null | null | # default_app_config = '.apps.WorkflowConfig'
| 23 | 45 | 0.782609 |
97f2191d807924b9920f7ca4379d337e4f2f9d92 | 6,361 | py | Python | examples/api-samples/inc_samples/sample33.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | examples/api-samples/inc_samples/sample33.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | examples/api-samples/inc_samples/sample33.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | ####<i>This sample will show how to convert several HTML documents to PDF and merge them to one document</i>
#Import of classes from libraries
import base64
import os
import shutil
import random
import time
from pyramid.renderers import render_to_response
from groupdocs.StorageApi import StorageApi
from groupdocs.AsyncApi import AsyncApi
from groupdocs.ApiClient import ApiClient
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
from groupdocs.models.JobInfo import JobInfo
# Checking value on null
####Set variables and get POST data
| 43.868966 | 111 | 0.562962 |
97f2ebb10db5b5ba4727a38411b745fbfd41201b | 2,503 | py | Python | silver/api/pagination.py | DocTocToc/silver | f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5 | [
"Apache-2.0"
] | 222 | 2017-01-15T10:30:57.000Z | 2022-03-08T20:34:46.000Z | silver/api/pagination.py | DocTocToc/silver | f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5 | [
"Apache-2.0"
] | 141 | 2017-01-11T10:56:49.000Z | 2021-10-12T11:51:00.000Z | silver/api/pagination.py | DocTocToc/silver | f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5 | [
"Apache-2.0"
] | 76 | 2017-01-10T13:50:27.000Z | 2022-03-25T21:37:00.000Z | # Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import replace_query_param, remove_query_param
| 37.924242 | 79 | 0.691171 |
97f305739c9556bc7a629078425a1949c86c0361 | 3,117 | py | Python | process_filing_headers.py | jsfenfen/fec2file | 541a7dc40eb4ebf51d1c610ee19fdefc030fc7e3 | [
"MIT"
] | 1 | 2019-04-24T16:45:07.000Z | 2019-04-24T16:45:07.000Z | process_filing_headers.py | jsfenfen/fec2file | 541a7dc40eb4ebf51d1c610ee19fdefc030fc7e3 | [
"MIT"
] | null | null | null | process_filing_headers.py | jsfenfen/fec2file | 541a7dc40eb4ebf51d1c610ee19fdefc030fc7e3 | [
"MIT"
] | null | null | null | import os
import fecfile
import json
import csv
import sys
from settings import RAW_ELECTRONIC_DIR, MASTER_HEADER_ROW, HEADER_DUMP_FILE
START_YEAR = 2019
ERROR_HEADERS = ['path', 'error', ]
if __name__ == '__main__':
outfile = open(HEADER_DUMP_FILE, 'w')
dw = csv.DictWriter(outfile, fieldnames=MASTER_HEADER_ROW, extrasaction='ignore')
dw.writeheader()
print("Writing output to %s" % HEADER_DUMP_FILE)
errorfile = open("header_read_errors.csv", 'w')
error_writer = csv.DictWriter(errorfile, fieldnames=ERROR_HEADERS, extrasaction='ignore')
error_writer.writeheader()
for dirName, subdirList, fileList in os.walk(RAW_ELECTRONIC_DIR, topdown=False):
try:
directory_year = int(dirName.split("/")[-1][0:4])
if directory_year < START_YEAR:
print("Ignoring directory %s" % dirName)
continue
except ValueError:
continue
for fname in fileList:
if fname.endswith(".fec"):
full_path = os.path.join(dirName, fname)
#readfile(full_path, dw)
#print("Found file %s" % full_path)
try:
readfile(full_path, dw)
except Exception as e:
print("error reading %s: %s" % (full_path, e))
error_writer.writerow({
'path':full_path,
'error':e
})
| 33.159574 | 111 | 0.62881 |
97f379ae1f9f041646342228c2bcfc62e5962980 | 331 | py | Python | src/python/collector/urls.py | swqqn/django-collector | 014e5974c8c6dda38682a7ae7eb1d4f0295679b8 | [
"MIT"
] | 3 | 2015-11-05T13:42:15.000Z | 2020-01-15T08:00:58.000Z | src/python/collector/urls.py | rentalita/django-collector | 8646e514d26820e317b2b59828dc0e506a19c780 | [
"MIT"
] | null | null | null | src/python/collector/urls.py | rentalita/django-collector | 8646e514d26820e317b2b59828dc0e506a19c780 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('collector.views',
url(r'^blob404/$', 'blob404'),
url(r'^deleted/$', 'deleted'),
url(r'^$', 'create'),
url(r'^(?P<uid>\w+)/$', 'delete'),
)
# Local Variables:
# indent-tabs-mode: nil
# End:
# vim: ai et sw=4 ts=4
| 20.6875 | 51 | 0.586103 |
97f5869664190ff99134b09c60ba7139b7a21527 | 7,658 | py | Python | cdisp/core.py | felippebarbosa/cdisp | d9a612c252495ab017bffccdd7e82bbb555e07dd | [
"BSL-1.0"
] | null | null | null | cdisp/core.py | felippebarbosa/cdisp | d9a612c252495ab017bffccdd7e82bbb555e07dd | [
"BSL-1.0"
] | null | null | null | cdisp/core.py | felippebarbosa/cdisp | d9a612c252495ab017bffccdd7e82bbb555e07dd | [
"BSL-1.0"
] | null | null | null | #-*- coding: utf-8 -*-
"""
Dispersion calculation functions
"""
import numpy # module for array manipulation
import pandas # module for general data analysis
import os # module for general OS manipulation
import scipy # module for scientific manipulation and analysis
##
def set_transverse_mode(data_frame, order_tag, neff_tag = 'neff', complex_neff = False):
""" Function for classification of transverse modes
For this function to work, the frequency and polarization must the the same.
Also the input have to be a Pandas data frame;
"""
if type(x) <> 'pandas.core.frame.DataFrame': raise(ValueError("The object MUST be a Pandas data frame"))
####
No = len(data_frame) # number of modes
order_list = np.array(['%1d' % x for x in np.arange(1, No + 1)][::-1]) # list with the transversal order
neffs = np.array(data_frame[neff_tag]) # neffs of the modes
if complex_neff:
neffs = np.abs(np.array([complex(s.replace('i' , 'j ')) for s in neffs])) # for complex neff
inds = neffs.argsort(kind = 'mergesort') # neff sorting
inds2 = np.array(inds).argsort(kind = 'mergesort') # index resorting (reverse sorting)
order_list_sorted = order_list[inds2] # list with the right (sorted) transversal order
data_frame[order_tag] = order_list_sorted
return data_frame
#######
def data_classification(data_frame, wavelength_tag = 'wlength', frequency_tag = 'freq',
input_tags = ['eig', 'Ptm', 'Ppml', 'Pcore', 'Pbus'],
class_tags = ['polarization', 'ring_bus', 'transverse_mode']):
""" Function for filtering quality factor, losses and classification of polarization and transverse modes
The input have to be a Pandas data frame;
"""
## limits setting
pml_thre = 0.5 # threshold for power in the PMLs
bus_thre = 1.0 # threshold for power in the bus waveguide relative to the ring
tm_thre = 1.0 # threshold for power in the TM mode
## tags for classification
[eigenval_tag, TM_tag, pml_tag, ring_tag, bus_tag] = input_tags
[pol_tag, ring_bus_tag, order_tag] = class_tags
## list of columns
list_col = list(data_frame.columns) # columns names
Neig = list_col.index(eigenval_tag) # index before
list_par = list_col[:Neig] # list of parameters
## create wavelength or frequency colunm
if frequency_tag not in list_col: data_frame[frequency_tag] = scipy.constants.c/data_frame[wavelength_tag]
if wavelength_tag not in list_col: data_frame[wavelength_tag] = scipy.constants.c/data_frame[frequency_tag]
## setting frequency column as the standard for internal use
if frequency_tag not in list_par:
list_par.remove(wavelength_tag)
list_par.append(frequency_tag)
## PML filtering
data_frame = data_frame[data_frame[pml_tag] < pml_thre] # Filter the light that goes to the Pml
## TE and TM modes separation
data_frame[pol_tag] = np.array(pandas.cut(np.array(data_frame[TM_tag]), [0, tm_thre, data_frame[TM_tag].max()], labels = ['TE', 'TM']))
list_tag = [pol_tag]
## waveguide and bus separation
if bus_tag in list_col:
data_frame[ring_bus_tag] = np.array(pandas.cut((np.array(data_frame[bus_tag])/np.array(data_frame[ring_tag]))**(1./4), [0, bus_thre, 1000000], labels = ['ring', 'bus']))
# data_frame[ring_bus_tag] = np.array(pandas.cut(np.array(data_frame[ring_tag]), [0, ring_thre, 100000], labels = ['','ring']))
list_tag = list_tag + [ring_bus_tag]
## transverse mode separation
list_group = list_par + list_tag # list to filter the first time
data_frame = data_frame.groupby(list_group, as_index = False).apply(set_transverse_mode, order_tag) # transverse order
return data_frame, list_group + [order_tag]
####
def find_idx_nearest_val(array, value):
'''function to find the nearest index matching to the value given'''
idx_sorted = np.argsort(array)
sorted_array = np.array(array[idx_sorted])
idx = np.searchsorted(sorted_array, value, side="left")
if idx >= len(array):
idx_nearest = idx_sorted[len(array)-1]
elif idx == 0:
idx_nearest = idx_sorted[0]
else:
if abs(value - sorted_array[idx-1]) < abs(value - sorted_array[idx]):
idx_nearest = idx_sorted[idx-1]
else:
idx_nearest = idx_sorted[idx]
return idx_nearest
###
def dispersion_calculation(data_frame, frequency_tag = 'freq', wavelength_tag = 'wlength',
neff_tag = 'neff', wlength0 = None):
""" functions for dispersion calculation """
## initial definitions
wlength = np.array(data_frame[wavelength_tag]) # wavelength
omega = 2*np.pi*np.array(data_frame[frequency_tag]) # angular frequency
beta = np.array(data_frame[neff_tag])*omega/scipy.constants.c # propagation constant
## dialing with circular waveguides
if 'r0' in data_frame.columns:
rad0 = np.array(data_frame['r0'])
beta = beta/rad0
else: rad0 = 1.0
## dispersion calculations
beta1 = Df(beta*rad0, omega)/rad0 # beta 1
beta2 = Df(beta1*rad0, omega)/rad0 # beta 2
beta3 = Df(beta2*rad0, omega)/rad0 # beta 3
beta4 = Df(beta3*rad0, omega)/rad0 # beta 4
D = -2*np.pi*scipy.constants.c/wlength*beta2 # D parameter
## set up the wlength for phase matching
wlength0 = 0.9e-6
if wlength0 == None: wlength0 = wlength[int(wlength.shape[0]/2)]
elif wlength0 < min(wlength): wlength0 = min(wlength)
elif wlength0 > max(wlength): wlength0 = max(wlength)
omega0 = 2*np.pi*scipy.constants.c/wlength0
## phase matching calculation
idx0 = find_idx_nearest_val(omega, omega0)
Dbeta = calculate_Dbeta(beta, idx0) # propagation constant in
Dbeta2 = beta2[idx0]*(omega - omega[idx0])**2 + beta4[idx0]/12*(omega - omega[idx0])**4
norm_gain = calculate_gain(Dbeta, 1.0e4)
## outputs
n_clad, n_core = 1.0, 3.5
output_tags = ['beta', 'beta1', 'beta2', 'beta3', 'beta4', 'D', 'Dbeta', 'Dbeta_approx', 'beta_norm', 'beta_clad', 'beta_core',
'n_clad', 'n_core', 'gain', 'ng', 'fsr']
outputs = [beta, beta1, beta2, beta3, beta4, D, Dbeta, Dbeta2, beta/scipy.constants.c, n_clad*omega/scipy.constants.c, n_core*omega/scipy.constants.c,
n_clad, n_core, norm_gain, beta1*scipy.constants.c, 1/(2*np.pi*rad0*beta1)]
for m, output in enumerate(outputs):
data_frame[output_tags[m]] = output
return data_frame
###
##
def calculate_Dbeta(x, idx0):
'''calculate Dbeta for a set of date with equally spaced frequencies'''
d = x.shape[0] # array dimension
Dx = np.full(d, np.nan)
idxm = max(-idx0, idx0 - d + 1) # minimum index
idxp = min(idx0 + 1, d - idx0) # maximum index
for idx in range(idxm, idxp):
xm, xp = np.roll(x, idx), np.roll(x, -idx)
Dx[idx0 + idx] = xm[idx0] + xp[idx0] - 2*x[idx0]
return Dx
##
def calculate_gain(Dbeta, Pn):
'''calculate the gain of the 4 wave mixing
** here Pn is normalized such as Pn = gamma*P0'''
return np.sqrt(Pn**2 - (Dbeta/2 + Pn)**2)
| 48.77707 | 177 | 0.657482 |
97f8adb75c2bfb4df0070282016a4be3b8f42059 | 1,280 | py | Python | appname/predict.py | Lambda-ds-31/build_week_spotify | ba5c77b457f8180f80883c61a5011eb3b38ffc95 | [
"MIT"
] | null | null | null | appname/predict.py | Lambda-ds-31/build_week_spotify | ba5c77b457f8180f80883c61a5011eb3b38ffc95 | [
"MIT"
] | 1 | 2021-10-20T20:50:04.000Z | 2021-10-20T20:50:04.000Z | appname/predict.py | Lambda-ds-31/build_week_spotify | ba5c77b457f8180f80883c61a5011eb3b38ffc95 | [
"MIT"
] | 1 | 2022-02-18T13:51:29.000Z | 2022-02-18T13:51:29.000Z | import numpy as np
from data_prep import data
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from os import getenv
client_id = getenv('CLIENT_ID')
client_id_secret = getenv('CLIENT_ID_SECRET')
manager = SpotifyClientCredentials(
client_id = client_id,
client_secret= client_id_secret)
sp = spotipy.Spotify(client_credentials_manager=manager)
def find_knn(track_id, df, k=6):
"""
Takes in the user input song's track_id, and the prep-ed dataframe.
Outputs a list of k-1 nearest neighbors based on audio features
"""
features = sp.audio_features(track_id)[0]
df = data()
user_track = np.array(
[
features['acousticness'],
features['danceability'],
features['duration_ms'],
features['energy'],
features['instrumentalness'],
features['liveness'],
features['loudness'],
features['speechiness'],
features['tempo'],
features['valence']
]
)
df['distances'] = np.linalg.norm(df - user_track, axis=1)
nn_ids = df.sort_values(by='distances').index.to_list()[:k]
if nn_ids[0] == track_id:
nn_ids = nn_ids[1:]
else:
nn_ids = nn_ids[:-1]
return nn_ids
| 27.826087 | 71 | 0.630469 |
97f988da234108443107eea262cb4a176c0459c9 | 176 | py | Python | tests/cpydiff/modules_array_deletion.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 692 | 2016-12-19T23:25:35.000Z | 2022-03-31T14:20:48.000Z | tests/cpydiff/modules_array_deletion.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 509 | 2017-03-28T19:37:18.000Z | 2022-03-31T20:31:43.000Z | tests/cpydiff/modules_array_deletion.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 228 | 2016-12-19T05:03:30.000Z | 2022-03-22T18:13:00.000Z | """
categories: Modules,array
description: Array deletion not implemented
cause: Unknown
workaround: Unknown
"""
import array
a = array.array('b', (1, 2, 3))
del a[1]
print(a)
| 16 | 43 | 0.715909 |
97fa4f4535ac67853dbadcc3ffdf0124a1fb7efd | 10,001 | py | Python | jaysblog/models.py | cRiii/jaysblog | f96ecd82f17750a47147ae3c5e72cf1320be21e5 | [
"MIT"
] | 5 | 2019-10-14T01:51:02.000Z | 2019-11-07T15:01:14.000Z | jaysblog/models.py | cRiii/jaysblog | f96ecd82f17750a47147ae3c5e72cf1320be21e5 | [
"MIT"
] | 1 | 2019-11-07T06:58:26.000Z | 2019-11-07T06:58:26.000Z | jaysblog/models.py | cRiii/jaysblog | f96ecd82f17750a47147ae3c5e72cf1320be21e5 | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Time : 2019/9/17 15:07
@Author : Jay Chen
@FileName: models.py
@GitHub : https://github.com/cRiii
"""
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from jaysblog.extensions import db
from flask_login import UserMixin
def to_dict(self):
res_dict = {
"id": self.id,
"nick_name": self.nick_name,
"email": self.email,
"desc": self.desc,
"avatar_url": self.avatar_url,
"gender": self.gender,
"is_admin": self.is_admin,
}
return res_dict
class Post(BaseModel, db.Model):
__tablename__ = 'b_posts'
id = db.Column(db.Integer, primary_key=True) #
post_title = db.Column(db.String(256), nullable=False) #
post_user_id = db.Column(db.Integer, nullable=False) #
post_digest = db.Column(db.String(512), nullable=True) #
post_content = db.Column(db.Text, nullable=False) #
post_clicks = db.Column(db.Integer, default=0) #
post_like_num = db.Column(db.Integer, default=0) #
post_index_image_url = db.Column(db.String(256)) #
post_status = db.Column(db.Integer, default=1) #
post_can_comment = db.Column(db.Integer, default=1) #
post_comments = db.relationship('Comment', backref='comment_post') #
post_category = db.relationship('Category', back_populates='cg_posts')
post_category_id = db.Column(db.Integer, db.ForeignKey('b_category.id'), nullable=False) #
class Category(BaseModel, db.Model):
__tablename__ = 'b_category'
id = db.Column(db.Integer, primary_key=True) #
cg_name = db.Column(db.String(64), nullable=False, unique=True) #
cg_posts = db.relationship('Post', back_populates='post_category') #
class Comment(BaseModel, db.Model):
__tablename__ = 'b_comments'
id = db.Column(db.Integer, primary_key=True) #
comment_user_id = db.Column(db.Integer, nullable=False) # ID
comment_content = db.Column(db.Text, nullable=False) #
comment_from_admin = db.Column(db.Integer, default=0) #
comment_status = db.Column(db.Integer, default=0) # -1 0: 1:
comment_post_id = db.Column(db.Integer, db.ForeignKey('b_posts.id'), nullable=False) # id
comment_reply = db.relationship('Reply', backref='reply_comment') #
class Reply(BaseModel, db.Model):
__tablename__ = 'b_reply'
id = db.Column(db.Integer, primary_key=True) # id
reply_from_user = db.Column(db.String(32), nullable=False) #
reply_to_user = db.Column(db.String(32), nullable=False) #
reply_content = db.Column(db.Text, nullable=False) #
reply_status = db.Column(db.Integer, default=0) # -1 0: 1:
reply_comment_id = db.Column(db.Integer, db.ForeignKey('b_comments.id'), nullable=False) # id
class Journey(BaseModel, db.Model):
__tablename__ = 'b_journey'
id = db.Column(db.Integer, primary_key=True) # id
journey_title = db.Column(db.String(32), nullable=False) #
journey_desc = db.Column(db.Text, nullable=False) #
journey_time = db.Column(db.DateTime, default=datetime.utcnow) #
class MessageBoard(BaseModel, db.Model):
__tablename__ = 'b_board'
id = db.Column(db.Integer, primary_key=True) # id
board_user = db.Column(db.String(32), nullable=False) #
board_desc = db.Column(db.Text, nullable=False) #
board_status = db.Column(db.Integer, nullable=False, default=0) # -1 0: 1:
board_email = db.Column(db.String(50), nullable=False) #
class UsersLikePosts(BaseModel, db.Model):
__tablename__ = 'b_users_like_posts'
id = db.Column(db.Integer, primary_key=True) #
user_id = db.Column(db.Integer, nullable=False)
user_like_post_id = db.Column(db.Integer, nullable=False)
| 35.97482 | 107 | 0.633137 |
97fa5c7d0604d6e2fc363a4c15650e9b99bf74f3 | 602 | py | Python | 112_Path Sum.py | Alvin1994/leetcode-python3- | ba2bde873c925554cc39f2bd13be81967713477d | [
"Apache-2.0"
] | null | null | null | 112_Path Sum.py | Alvin1994/leetcode-python3- | ba2bde873c925554cc39f2bd13be81967713477d | [
"Apache-2.0"
] | null | null | null | 112_Path Sum.py | Alvin1994/leetcode-python3- | ba2bde873c925554cc39f2bd13be81967713477d | [
"Apache-2.0"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 28.666667 | 68 | 0.521595 |
97faabe77e17c6e2ce8553519c92f2c578ef3f08 | 1,509 | py | Python | telemanom/_globals.py | tonyzeng2019/telemanom | ee1c9252c6ffc9581995aaf479f0d79cf0a2e914 | [
"Apache-2.0"
] | null | null | null | telemanom/_globals.py | tonyzeng2019/telemanom | ee1c9252c6ffc9581995aaf479f0d79cf0a2e914 | [
"Apache-2.0"
] | null | null | null | telemanom/_globals.py | tonyzeng2019/telemanom | ee1c9252c6ffc9581995aaf479f0d79cf0a2e914 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import yaml
import json
import sys
import os
sys.path.append('../venv/lib/python3.5/site-packages')
from elasticsearch import Elasticsearch
sys.path.append('../telemanom') | 27.944444 | 82 | 0.561299 |
97fbc7c518483b22e3bd3fb0a4313e038f0a4e05 | 508 | py | Python | nanome/_internal/_network/_commands/_serialization/_open_url.py | rramji/nanome-lib | 2806598af31cfb4bb6e16366f0b300d2ddcc9c13 | [
"MIT"
] | null | null | null | nanome/_internal/_network/_commands/_serialization/_open_url.py | rramji/nanome-lib | 2806598af31cfb4bb6e16366f0b300d2ddcc9c13 | [
"MIT"
] | null | null | null | nanome/_internal/_network/_commands/_serialization/_open_url.py | rramji/nanome-lib | 2806598af31cfb4bb6e16366f0b300d2ddcc9c13 | [
"MIT"
] | null | null | null | from nanome._internal._util._serializers import _StringSerializer
from nanome._internal._util._serializers import _TypeSerializer
| 25.4 | 65 | 0.720472 |
97fd1501d115786d6770847e5c0def668bf7ecbe | 196 | py | Python | questoes/questao1.py | raulbarcelos/Lista-de-Exercicios-PO | 70933896108b5f9fbdbf541c389ab9354d6ceaf2 | [
"MIT"
] | null | null | null | questoes/questao1.py | raulbarcelos/Lista-de-Exercicios-PO | 70933896108b5f9fbdbf541c389ab9354d6ceaf2 | [
"MIT"
] | null | null | null | questoes/questao1.py | raulbarcelos/Lista-de-Exercicios-PO | 70933896108b5f9fbdbf541c389ab9354d6ceaf2 | [
"MIT"
] | null | null | null | print("********************************")
print("********** QUESTO 01 **********")
print("********************************")
print("******** RAUL BARCELOS *********")
print()
print("Ol mundo")
| 24.5 | 41 | 0.30102 |
97fdbd42de4debdf4f69ae07026eb489c9f50129 | 2,772 | py | Python | CorpusToolkit/ply_parser/test.py | howl-anderson/tools_for_corpus_of_people_daily | 8178d9a62c356f83723d42ced60f8269eed84861 | [
"Apache-2.0"
] | 243 | 2018-09-12T01:05:03.000Z | 2022-03-30T11:25:59.000Z | CorpusToolkit/ply_parser/test.py | nkkkyyy/tools_for_corpus_of_people_daily | 8178d9a62c356f83723d42ced60f8269eed84861 | [
"Apache-2.0"
] | 3 | 2018-10-18T10:13:07.000Z | 2020-09-10T06:34:40.000Z | CorpusToolkit/ply_parser/test.py | nkkkyyy/tools_for_corpus_of_people_daily | 8178d9a62c356f83723d42ced60f8269eed84861 | [
"Apache-2.0"
] | 56 | 2018-09-11T12:56:20.000Z | 2021-11-09T04:02:00.000Z | import logging
from CorpusToolkit.ply_parser import make_parser, lexer
logging.basicConfig(
level=logging.DEBUG,
filename="parselog.txt",
filemode="w",
format="%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
test_data = (
"19980101-01-001-002/m /nt /n /wu /n /n /nrf /nrg",
"19980101-01-001-006/m /p /t /vi /f /wd /rr /dc /a /ui /p [/n /n /vn /n]nt /wu [/ns /n /vn /n]nt /c [/n /n]nt /wd /p /n /rz /n /wd /p [/ns /a /n]ns /n /wu /ns /c /ns /n /wu /s /n /wd /p /n /rz /ud /n /k /wd /vt /a /ud /vn /c /a /ud /vn /wt",
"19980131-04-013-024/m {na4}/rz /n /vt /a /ud /n /wd ",
"19980103-04-003-007/m /n /vt /a /ud /ns /n /wd /p /n /Vg /vt /n /c /n /ud /d /vt /wj /n /rz /vl /vt /a /ud /n /c /n /ud /n /wd /c /ns /n /p /d /vt /d /p /p /n /ud /ad /vt /vl /n /wj /rr /vt /wd /nrf /nrg /n /m /n /p [/jn /n /n]nt /ud /n /wkz /n /a /n /wky /f /vt /ud /wyz /m /qc /n /wyy /wd /p /n {shang5}/f {wei4}/p /n /ud /vn /vn /vt /ul /n /wj /wyz /m /qc /n /wyy /vt /n /p /n /ud /d /vt /wd /vl /vt /uz /n /wu /n /ud /vn /wj /d /p /ns /vt /n /vn /ud /n /wd /d /p /vt /c /vt /n /vl /Ng /n /wj /dc /a /vt /d /ad /vt /ud /n /ns /wd /rz /p /vt /vl /vt /rz /ud /n /wj /d /vt [/ns /n /wkz /f /vl [/ns /n /n]nt /wky /vn /n]nt /ud /nrf /nrg /n /wd /t /p /wkz /ryw /vl /n /wky /Ng /f /vt /wd /n /p /vt /n /vn /vx /vn /ud /b /n /df /vt /wd /vt /ud /n /d /vl /n /ud /vn /wd /c /rz /d /p /vt /n /n /ud /in /vl /n /wd /vt {che1}/n /u /m /Ng /wd /n /u /n /wj /p /vt /ns /vt /a /ud /n /c /a /ud /n /n /ud /n /u /wd /ns /n /ud /r /n /vl /a /ud /wj"
)
s = test_data[3]
| 72.947368 | 1,579 | 0.533911 |
97fdbe6160aa3872cb3be14af73e7667fe00624c | 978 | py | Python | homeassistant/components/hue/v2/helpers.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/hue/v2/helpers.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/hue/v2/helpers.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Helper functions for Philips Hue v2."""
from __future__ import annotations
def normalize_hue_brightness(brightness: float | None) -> float | None:
"""Return calculated brightness values."""
if brightness is not None:
# Hue uses a range of [0, 100] to control brightness.
brightness = float((brightness / 255) * 100)
return brightness
def normalize_hue_transition(transition: float | None) -> float | None:
"""Return rounded transition values."""
if transition is not None:
# hue transition duration is in milliseconds and round them to 100ms
transition = int(round(transition, 1) * 1000)
return transition
def normalize_hue_colortemp(colortemp: int | None) -> int | None:
"""Return color temperature within Hue's ranges."""
if colortemp is not None:
# Hue only accepts a range between 153..500
colortemp = min(colortemp, 500)
colortemp = max(colortemp, 153)
return colortemp
| 32.6 | 76 | 0.682004 |
97fe866f84f325af30eccf7ed7f76920a2b9b84a | 186 | py | Python | incapsula/__init__.py | zanachka/incapsula-cracker-py3 | be1738d0e649e91de75583b694372bc04547fa85 | [
"Unlicense"
] | null | null | null | incapsula/__init__.py | zanachka/incapsula-cracker-py3 | be1738d0e649e91de75583b694372bc04547fa85 | [
"Unlicense"
] | null | null | null | incapsula/__init__.py | zanachka/incapsula-cracker-py3 | be1738d0e649e91de75583b694372bc04547fa85 | [
"Unlicense"
] | null | null | null | from .errors import IncapBlocked, MaxRetriesExceeded, RecaptchaBlocked
from .parsers import ResourceParser, WebsiteResourceParser, IframeResourceParser
from .session import IncapSession
| 46.5 | 80 | 0.876344 |
97feddd1f63ca0959b0312d053d59692a6f28e9d | 3,646 | py | Python | sdk/python/pulumi_civo/get_network.py | dirien/pulumi-civo | f75eb1482bade0d21fb25c9e20e6838791518226 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-04T12:27:02.000Z | 2022-03-14T13:16:43.000Z | sdk/python/pulumi_civo/get_network.py | dirien/pulumi-civo | f75eb1482bade0d21fb25c9e20e6838791518226 | [
"ECL-2.0",
"Apache-2.0"
] | 85 | 2020-08-17T19:03:57.000Z | 2022-03-25T19:17:57.000Z | sdk/python/pulumi_civo/get_network.py | dirien/pulumi-civo | f75eb1482bade0d21fb25c9e20e6838791518226 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2020-08-04T12:27:03.000Z | 2022-03-24T00:56:24.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetNetworkResult',
'AwaitableGetNetworkResult',
'get_network',
]
def get_network(id: Optional[str] = None,
label: Optional[str] = None,
region: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult:
"""
Use this data source to access information about an existing resource.
:param str id: The unique identifier of an existing Network.
:param str label: The label of an existing Network.
:param str region: The region of an existing Network.
"""
__args__ = dict()
__args__['id'] = id
__args__['label'] = label
__args__['region'] = region
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('civo:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value
return AwaitableGetNetworkResult(
default=__ret__.default,
id=__ret__.id,
label=__ret__.label,
name=__ret__.name,
region=__ret__.region)
| 31.162393 | 120 | 0.620954 |
97ff07ce80697d0e69e6e48e82606287cb5eb7ee | 744 | py | Python | Hard/longest_valid_parentheses.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Hard/longest_valid_parentheses.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Hard/longest_valid_parentheses.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | from collections import deque | 32.347826 | 57 | 0.424731 |
97ff3603368750b9661b92eb04ae9042db5bd4fc | 2,358 | py | Python | IMFlask/config.py | iml1111/IMFlask | 96af28460365c305e92ca2720fe6b015713c578f | [
"MIT"
] | 2 | 2020-09-07T11:33:41.000Z | 2020-09-08T14:47:40.000Z | IMFlask/config.py | iml1111/IMFlask | 96af28460365c305e92ca2720fe6b015713c578f | [
"MIT"
] | 1 | 2020-09-07T11:29:00.000Z | 2022-03-31T10:01:06.000Z | IMFlask/config.py | iml1111/IMFlask | 96af28460365c305e92ca2720fe6b015713c578f | [
"MIT"
] | 2 | 2020-10-06T18:25:46.000Z | 2021-09-09T16:00:07.000Z | '''
Flask Application Config
'''
import os
from logging.config import dictConfig
BASEDIR = os.path.abspath(os.path.dirname(__file__))
config = {
'development':DevelopmentConfig,
'production':ProductionConfig,
'testing':TestingConfig,
'default':DevelopmentConfig,
}
| 26.2 | 87 | 0.54665 |
97ff714eac7c0cc920b3005424b8958af7aec6ce | 1,066 | py | Python | cnn/conv_average_pooling.py | nforesperance/Tensorflow-Keras | 12fa74e01c7081b2f5ef899ee9123498ef541483 | [
"MIT"
] | 1 | 2021-01-07T11:05:07.000Z | 2021-01-07T11:05:07.000Z | cnn/conv_average_pooling.py | nforesperance/Tensorflow-Keras | 12fa74e01c7081b2f5ef899ee9123498ef541483 | [
"MIT"
] | null | null | null | cnn/conv_average_pooling.py | nforesperance/Tensorflow-Keras | 12fa74e01c7081b2f5ef899ee9123498ef541483 | [
"MIT"
] | null | null | null | # example of average pooling
from numpy import asarray
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import AveragePooling2D
# define input data
data = [[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0]]
data = asarray(data)
data = data.reshape(1, 8, 8, 1)
# create model
model = Sequential()
model.add(Conv2D(1, (3,3), activation='relu', input_shape=(8, 8, 1)))
model.add(AveragePooling2D())
# summarize model
model.summary()
# define a vertical line detector
detector = [[[[0]],[[1]],[[0]]],
[[[0]],[[1]],[[0]]],
[[[0]],[[1]],[[0]]]]
weights = [asarray(detector), asarray([0.0])]
# store the weights in the model
model.set_weights(weights)
# apply filter to input data
yhat = model.predict(data)
# enumerate rows
for r in range(yhat.shape[1]):
# print each column in the row
print([yhat[0,r,c,0] for c in range(yhat.shape[2])]) | 30.457143 | 69 | 0.594747 |
3f000581137f7e8d12b07f946dab58d61d19c246 | 13,127 | py | Python | acquisitions/models.py | 18F/acqstackdb | 7d939e7deb1cb8749f16fe6b6bc092f5db5c4469 | [
"CC0-1.0"
] | 2 | 2016-06-03T16:33:34.000Z | 2016-07-22T12:10:31.000Z | acquisitions/models.py | 18F/acqstackdb | 7d939e7deb1cb8749f16fe6b6bc092f5db5c4469 | [
"CC0-1.0"
] | 26 | 2016-06-02T11:21:15.000Z | 2016-07-18T14:10:03.000Z | acquisitions/models.py | 18F/acqstackdb | 7d939e7deb1cb8749f16fe6b6bc092f5db5c4469 | [
"CC0-1.0"
] | 2 | 2017-07-14T08:33:32.000Z | 2021-02-15T10:16:18.000Z | from django.db import models
from django.core.validators import RegexValidator, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from smart_selects.db_fields import ChainedForeignKey, ChainedManyToManyField
from ordered_model.models import OrderedModel
# Create your models here.
# Is the acquisition internal or external?
| 37.505714 | 80 | 0.63198 |
3f0006363bb84a90ae81c6bd90ba3b9c73aecdc7 | 714 | py | Python | app/kobo/forms.py | wri/django_kobo | 505d52fc0d49d875af068e58ad959b95d1464dd5 | [
"MIT"
] | 1 | 2018-12-20T07:59:55.000Z | 2018-12-20T07:59:55.000Z | app/kobo/forms.py | wri/django_kobo | 505d52fc0d49d875af068e58ad959b95d1464dd5 | [
"MIT"
] | 9 | 2018-11-06T01:51:28.000Z | 2018-12-21T22:19:42.000Z | app/kobo/forms.py | wri/django_kobo | 505d52fc0d49d875af068e58ad959b95d1464dd5 | [
"MIT"
] | 2 | 2018-11-21T15:13:32.000Z | 2020-02-19T08:39:37.000Z | from django import forms
from .models import Connection, KoboUser, KoboData
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.db.models import Q
| 31.043478 | 165 | 0.644258 |
3f01198a019097c1976dc940001aed540d4f3634 | 713 | py | Python | old/dea/aws/__init__.py | robbibt/odc-tools | e2df2c9ef65dbd5652d97cd88617989b4b724814 | [
"Apache-2.0"
] | null | null | null | old/dea/aws/__init__.py | robbibt/odc-tools | e2df2c9ef65dbd5652d97cd88617989b4b724814 | [
"Apache-2.0"
] | null | null | null | old/dea/aws/__init__.py | robbibt/odc-tools | e2df2c9ef65dbd5652d97cd88617989b4b724814 | [
"Apache-2.0"
] | null | null | null | from odc.aws import (
ec2_metadata,
ec2_current_region,
botocore_default_region,
auto_find_region,
make_s3_client,
s3_url_parse,
s3_fmt_range,
s3_ls,
s3_ls_dir,
s3_find,
get_boto_session,
get_creds_with_retry,
s3_fetch,
)
from odc.aws._find import (
s3_file_info,
norm_predicate,
parse_query,
)
__all__ = (
"ec2_metadata",
"ec2_current_region",
"botocore_default_region",
"auto_find_region",
"make_s3_client",
"s3_url_parse",
"s3_fmt_range",
"s3_ls",
"s3_ls_dir",
"s3_find",
"get_boto_session",
"get_creds_with_retry",
"s3_fetch",
"s3_file_info",
"norm_predicate",
"parse_query",
)
| 16.97619 | 30 | 0.647966 |
3f0241d966136442d63f54ae450fa5bbf000c236 | 883 | py | Python | systems/stage.py | will-nickson/starter_system | bce669250fc58c3966c71e84020e078871a79e4f | [
"MIT"
] | null | null | null | systems/stage.py | will-nickson/starter_system | bce669250fc58c3966c71e84020e078871a79e4f | [
"MIT"
] | null | null | null | systems/stage.py | will-nickson/starter_system | bce669250fc58c3966c71e84020e078871a79e4f | [
"MIT"
] | null | null | null | from log.logger import logger
| 22.641026 | 71 | 0.571914 |
3f02d35a7926f58cae17ffac0f474623fde43a2e | 37,840 | py | Python | pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import hops
| 66.737213 | 754 | 0.742072 |