hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4915e964647e8f80103fac2c1f5e742bca3ba265 | 809 | py | Python | data_io/export_camera_poses.py | tumcms/aerial2pdsm | 1dc4f9c49d3f682dac4a665d081d547baaa0c877 | [
"BSD-3-Clause"
] | null | null | null | data_io/export_camera_poses.py | tumcms/aerial2pdsm | 1dc4f9c49d3f682dac4a665d081d547baaa0c877 | [
"BSD-3-Clause"
] | null | null | null | data_io/export_camera_poses.py | tumcms/aerial2pdsm | 1dc4f9c49d3f682dac4a665d081d547baaa0c877 | [
"BSD-3-Clause"
] | 1 | 2021-11-30T13:02:26.000Z | 2021-11-30T13:02:26.000Z | import plyfile
from config import SparseModel, project_path
from colmap_scripts.read_model import Image
import numpy as np
project = SparseModel("/home/felix/pointclouds/_working/2019_11_07_Muenchen_26_10_2018",
model_path="/home/felix/pointclouds/_working/2019_11_07_Muenchen_26_10_2018/sparse/1")
images = project.images
cameras = project.cameras
poses = []
dt = np.dtype([('x', np.float64), ('y', np.float64), ('z', np.float64), ('img', np.int32)])
for nr, img in images.items():
R = img.qvec2rotmat()
T = img.tvec
cp = np.dot(-R.T, T)
pose = (cp[0], cp[1], cp[2], nr)
poses.append(pose)
vertex = np.array(poses, dtype=dt)
v = plyfile.PlyElement.describe(vertex, "vertices")
plyfile.PlyData([v], text=True).write(project.model_path + "/camera_locations.ply")
| 32.36 | 108 | 0.697157 |
491612be631c7616e895777f18a1f1f4ec09ba72 | 15,815 | py | Python | src/goesrecv/support/generate_interpolator_taps.py | codient/goestools | 862600960681a1a1f3942f18f40b1f17dcdffc40 | [
"BSD-2-Clause"
] | 244 | 2017-11-07T12:12:23.000Z | 2022-03-24T07:24:53.000Z | src/goesrecv/support/generate_interpolator_taps.py | codient/goestools | 862600960681a1a1f3942f18f40b1f17dcdffc40 | [
"BSD-2-Clause"
] | 116 | 2018-03-07T04:02:26.000Z | 2022-03-27T12:08:01.000Z | src/goesrecv/support/generate_interpolator_taps.py | codient/goestools | 862600960681a1a1f3942f18f40b1f17dcdffc40 | [
"BSD-2-Clause"
] | 65 | 2018-05-28T02:44:21.000Z | 2022-03-18T12:27:58.000Z | #!/usr/bin/env python
#
# Create filter taps to use for interpolation filter in
# clock recovery algorithm. These taps are copied from
# GNU Radio at gnuradio/filter/interpolator_taps.h.
#
# This file includes them in natural order and I want
# them stored in reversed order such that they can be
# used directly.
#
filters = [
[ 0.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00, 1.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00 ],
[ -1.54700e-04, 8.53777e-04, -2.76968e-03, 7.89295e-03, 9.98534e-01, -5.41054e-03, 1.24642e-03, -1.98993e-04 ],
[ -3.09412e-04, 1.70888e-03, -5.55134e-03, 1.58840e-02, 9.96891e-01, -1.07209e-02, 2.47942e-03, -3.96391e-04 ],
[ -4.64053e-04, 2.56486e-03, -8.34364e-03, 2.39714e-02, 9.95074e-01, -1.59305e-02, 3.69852e-03, -5.92100e-04 ],
[ -6.18544e-04, 3.42130e-03, -1.11453e-02, 3.21531e-02, 9.93082e-01, -2.10389e-02, 4.90322e-03, -7.86031e-04 ],
[ -7.72802e-04, 4.27773e-03, -1.39548e-02, 4.04274e-02, 9.90917e-01, -2.60456e-02, 6.09305e-03, -9.78093e-04 ],
[ -9.26747e-04, 5.13372e-03, -1.67710e-02, 4.87921e-02, 9.88580e-01, -3.09503e-02, 7.26755e-03, -1.16820e-03 ],
[ -1.08030e-03, 5.98883e-03, -1.95925e-02, 5.72454e-02, 9.86071e-01, -3.57525e-02, 8.42626e-03, -1.35627e-03 ],
[ -1.23337e-03, 6.84261e-03, -2.24178e-02, 6.57852e-02, 9.83392e-01, -4.04519e-02, 9.56876e-03, -1.54221e-03 ],
[ -1.38589e-03, 7.69462e-03, -2.52457e-02, 7.44095e-02, 9.80543e-01, -4.50483e-02, 1.06946e-02, -1.72594e-03 ],
[ -1.53777e-03, 8.54441e-03, -2.80746e-02, 8.31162e-02, 9.77526e-01, -4.95412e-02, 1.18034e-02, -1.90738e-03 ],
[ -1.68894e-03, 9.39154e-03, -3.09033e-02, 9.19033e-02, 9.74342e-01, -5.39305e-02, 1.28947e-02, -2.08645e-03 ],
[ -1.83931e-03, 1.02356e-02, -3.37303e-02, 1.00769e-01, 9.70992e-01, -5.82159e-02, 1.39681e-02, -2.26307e-03 ],
[ -1.98880e-03, 1.10760e-02, -3.65541e-02, 1.09710e-01, 9.67477e-01, -6.23972e-02, 1.50233e-02, -2.43718e-03 ],
[ -2.13733e-03, 1.19125e-02, -3.93735e-02, 1.18725e-01, 9.63798e-01, -6.64743e-02, 1.60599e-02, -2.60868e-03 ],
[ -2.28483e-03, 1.27445e-02, -4.21869e-02, 1.27812e-01, 9.59958e-01, -7.04471e-02, 1.70776e-02, -2.77751e-03 ],
[ -2.43121e-03, 1.35716e-02, -4.49929e-02, 1.36968e-01, 9.55956e-01, -7.43154e-02, 1.80759e-02, -2.94361e-03 ],
[ -2.57640e-03, 1.43934e-02, -4.77900e-02, 1.46192e-01, 9.51795e-01, -7.80792e-02, 1.90545e-02, -3.10689e-03 ],
[ -2.72032e-03, 1.52095e-02, -5.05770e-02, 1.55480e-01, 9.47477e-01, -8.17385e-02, 2.00132e-02, -3.26730e-03 ],
[ -2.86289e-03, 1.60193e-02, -5.33522e-02, 1.64831e-01, 9.43001e-01, -8.52933e-02, 2.09516e-02, -3.42477e-03 ],
[ -3.00403e-03, 1.68225e-02, -5.61142e-02, 1.74242e-01, 9.38371e-01, -8.87435e-02, 2.18695e-02, -3.57923e-03 ],
[ -3.14367e-03, 1.76185e-02, -5.88617e-02, 1.83711e-01, 9.33586e-01, -9.20893e-02, 2.27664e-02, -3.73062e-03 ],
[ -3.28174e-03, 1.84071e-02, -6.15931e-02, 1.93236e-01, 9.28650e-01, -9.53307e-02, 2.36423e-02, -3.87888e-03 ],
[ -3.41815e-03, 1.91877e-02, -6.43069e-02, 2.02814e-01, 9.23564e-01, -9.84679e-02, 2.44967e-02, -4.02397e-03 ],
[ -3.55283e-03, 1.99599e-02, -6.70018e-02, 2.12443e-01, 9.18329e-01, -1.01501e-01, 2.53295e-02, -4.16581e-03 ],
[ -3.68570e-03, 2.07233e-02, -6.96762e-02, 2.22120e-01, 9.12947e-01, -1.04430e-01, 2.61404e-02, -4.30435e-03 ],
[ -3.81671e-03, 2.14774e-02, -7.23286e-02, 2.31843e-01, 9.07420e-01, -1.07256e-01, 2.69293e-02, -4.43955e-03 ],
[ -3.94576e-03, 2.22218e-02, -7.49577e-02, 2.41609e-01, 9.01749e-01, -1.09978e-01, 2.76957e-02, -4.57135e-03 ],
[ -4.07279e-03, 2.29562e-02, -7.75620e-02, 2.51417e-01, 8.95936e-01, -1.12597e-01, 2.84397e-02, -4.69970e-03 ],
[ -4.19774e-03, 2.36801e-02, -8.01399e-02, 2.61263e-01, 8.89984e-01, -1.15113e-01, 2.91609e-02, -4.82456e-03 ],
[ -4.32052e-03, 2.43930e-02, -8.26900e-02, 2.71144e-01, 8.83893e-01, -1.17526e-01, 2.98593e-02, -4.94589e-03 ],
[ -4.44107e-03, 2.50946e-02, -8.52109e-02, 2.81060e-01, 8.77666e-01, -1.19837e-01, 3.05345e-02, -5.06363e-03 ],
[ -4.55932e-03, 2.57844e-02, -8.77011e-02, 2.91006e-01, 8.71305e-01, -1.22047e-01, 3.11866e-02, -5.17776e-03 ],
[ -4.67520e-03, 2.64621e-02, -9.01591e-02, 3.00980e-01, 8.64812e-01, -1.24154e-01, 3.18153e-02, -5.28823e-03 ],
[ -4.78866e-03, 2.71272e-02, -9.25834e-02, 3.10980e-01, 8.58189e-01, -1.26161e-01, 3.24205e-02, -5.39500e-03 ],
[ -4.89961e-03, 2.77794e-02, -9.49727e-02, 3.21004e-01, 8.51437e-01, -1.28068e-01, 3.30021e-02, -5.49804e-03 ],
[ -5.00800e-03, 2.84182e-02, -9.73254e-02, 3.31048e-01, 8.44559e-01, -1.29874e-01, 3.35600e-02, -5.59731e-03 ],
[ -5.11376e-03, 2.90433e-02, -9.96402e-02, 3.41109e-01, 8.37557e-01, -1.31581e-01, 3.40940e-02, -5.69280e-03 ],
[ -5.21683e-03, 2.96543e-02, -1.01915e-01, 3.51186e-01, 8.30432e-01, -1.33189e-01, 3.46042e-02, -5.78446e-03 ],
[ -5.31716e-03, 3.02507e-02, -1.04150e-01, 3.61276e-01, 8.23188e-01, -1.34699e-01, 3.50903e-02, -5.87227e-03 ],
[ -5.41467e-03, 3.08323e-02, -1.06342e-01, 3.71376e-01, 8.15826e-01, -1.36111e-01, 3.55525e-02, -5.95620e-03 ],
[ -5.50931e-03, 3.13987e-02, -1.08490e-01, 3.81484e-01, 8.08348e-01, -1.37426e-01, 3.59905e-02, -6.03624e-03 ],
[ -5.60103e-03, 3.19495e-02, -1.10593e-01, 3.91596e-01, 8.00757e-01, -1.38644e-01, 3.64044e-02, -6.11236e-03 ],
[ -5.68976e-03, 3.24843e-02, -1.12650e-01, 4.01710e-01, 7.93055e-01, -1.39767e-01, 3.67941e-02, -6.18454e-03 ],
[ -5.77544e-03, 3.30027e-02, -1.14659e-01, 4.11823e-01, 7.85244e-01, -1.40794e-01, 3.71596e-02, -6.25277e-03 ],
[ -5.85804e-03, 3.35046e-02, -1.16618e-01, 4.21934e-01, 7.77327e-01, -1.41727e-01, 3.75010e-02, -6.31703e-03 ],
[ -5.93749e-03, 3.39894e-02, -1.18526e-01, 4.32038e-01, 7.69305e-01, -1.42566e-01, 3.78182e-02, -6.37730e-03 ],
[ -6.01374e-03, 3.44568e-02, -1.20382e-01, 4.42134e-01, 7.61181e-01, -1.43313e-01, 3.81111e-02, -6.43358e-03 ],
[ -6.08674e-03, 3.49066e-02, -1.22185e-01, 4.52218e-01, 7.52958e-01, -1.43968e-01, 3.83800e-02, -6.48585e-03 ],
[ -6.15644e-03, 3.53384e-02, -1.23933e-01, 4.62289e-01, 7.44637e-01, -1.44531e-01, 3.86247e-02, -6.53412e-03 ],
[ -6.22280e-03, 3.57519e-02, -1.25624e-01, 4.72342e-01, 7.36222e-01, -1.45004e-01, 3.88454e-02, -6.57836e-03 ],
[ -6.28577e-03, 3.61468e-02, -1.27258e-01, 4.82377e-01, 7.27714e-01, -1.45387e-01, 3.90420e-02, -6.61859e-03 ],
[ -6.34530e-03, 3.65227e-02, -1.28832e-01, 4.92389e-01, 7.19116e-01, -1.45682e-01, 3.92147e-02, -6.65479e-03 ],
[ -6.40135e-03, 3.68795e-02, -1.30347e-01, 5.02377e-01, 7.10431e-01, -1.45889e-01, 3.93636e-02, -6.68698e-03 ],
[ -6.45388e-03, 3.72167e-02, -1.31800e-01, 5.12337e-01, 7.01661e-01, -1.46009e-01, 3.94886e-02, -6.71514e-03 ],
[ -6.50285e-03, 3.75341e-02, -1.33190e-01, 5.22267e-01, 6.92808e-01, -1.46043e-01, 3.95900e-02, -6.73929e-03 ],
[ -6.54823e-03, 3.78315e-02, -1.34515e-01, 5.32164e-01, 6.83875e-01, -1.45993e-01, 3.96678e-02, -6.75943e-03 ],
[ -6.58996e-03, 3.81085e-02, -1.35775e-01, 5.42025e-01, 6.74865e-01, -1.45859e-01, 3.97222e-02, -6.77557e-03 ],
[ -6.62802e-03, 3.83650e-02, -1.36969e-01, 5.51849e-01, 6.65779e-01, -1.45641e-01, 3.97532e-02, -6.78771e-03 ],
[ -6.66238e-03, 3.86006e-02, -1.38094e-01, 5.61631e-01, 6.56621e-01, -1.45343e-01, 3.97610e-02, -6.79588e-03 ],
[ -6.69300e-03, 3.88151e-02, -1.39150e-01, 5.71370e-01, 6.47394e-01, -1.44963e-01, 3.97458e-02, -6.80007e-03 ],
[ -6.71985e-03, 3.90083e-02, -1.40136e-01, 5.81063e-01, 6.38099e-01, -1.44503e-01, 3.97077e-02, -6.80032e-03 ],
[ -6.74291e-03, 3.91800e-02, -1.41050e-01, 5.90706e-01, 6.28739e-01, -1.43965e-01, 3.96469e-02, -6.79662e-03 ],
[ -6.76214e-03, 3.93299e-02, -1.41891e-01, 6.00298e-01, 6.19318e-01, -1.43350e-01, 3.95635e-02, -6.78902e-03 ],
[ -6.77751e-03, 3.94578e-02, -1.42658e-01, 6.09836e-01, 6.09836e-01, -1.42658e-01, 3.94578e-02, -6.77751e-03 ],
[ -6.78902e-03, 3.95635e-02, -1.43350e-01, 6.19318e-01, 6.00298e-01, -1.41891e-01, 3.93299e-02, -6.76214e-03 ],
[ -6.79662e-03, 3.96469e-02, -1.43965e-01, 6.28739e-01, 5.90706e-01, -1.41050e-01, 3.91800e-02, -6.74291e-03 ],
[ -6.80032e-03, 3.97077e-02, -1.44503e-01, 6.38099e-01, 5.81063e-01, -1.40136e-01, 3.90083e-02, -6.71985e-03 ],
[ -6.80007e-03, 3.97458e-02, -1.44963e-01, 6.47394e-01, 5.71370e-01, -1.39150e-01, 3.88151e-02, -6.69300e-03 ],
[ -6.79588e-03, 3.97610e-02, -1.45343e-01, 6.56621e-01, 5.61631e-01, -1.38094e-01, 3.86006e-02, -6.66238e-03 ],
[ -6.78771e-03, 3.97532e-02, -1.45641e-01, 6.65779e-01, 5.51849e-01, -1.36969e-01, 3.83650e-02, -6.62802e-03 ],
[ -6.77557e-03, 3.97222e-02, -1.45859e-01, 6.74865e-01, 5.42025e-01, -1.35775e-01, 3.81085e-02, -6.58996e-03 ],
[ -6.75943e-03, 3.96678e-02, -1.45993e-01, 6.83875e-01, 5.32164e-01, -1.34515e-01, 3.78315e-02, -6.54823e-03 ],
[ -6.73929e-03, 3.95900e-02, -1.46043e-01, 6.92808e-01, 5.22267e-01, -1.33190e-01, 3.75341e-02, -6.50285e-03 ],
[ -6.71514e-03, 3.94886e-02, -1.46009e-01, 7.01661e-01, 5.12337e-01, -1.31800e-01, 3.72167e-02, -6.45388e-03 ],
[ -6.68698e-03, 3.93636e-02, -1.45889e-01, 7.10431e-01, 5.02377e-01, -1.30347e-01, 3.68795e-02, -6.40135e-03 ],
[ -6.65479e-03, 3.92147e-02, -1.45682e-01, 7.19116e-01, 4.92389e-01, -1.28832e-01, 3.65227e-02, -6.34530e-03 ],
[ -6.61859e-03, 3.90420e-02, -1.45387e-01, 7.27714e-01, 4.82377e-01, -1.27258e-01, 3.61468e-02, -6.28577e-03 ],
[ -6.57836e-03, 3.88454e-02, -1.45004e-01, 7.36222e-01, 4.72342e-01, -1.25624e-01, 3.57519e-02, -6.22280e-03 ],
[ -6.53412e-03, 3.86247e-02, -1.44531e-01, 7.44637e-01, 4.62289e-01, -1.23933e-01, 3.53384e-02, -6.15644e-03 ],
[ -6.48585e-03, 3.83800e-02, -1.43968e-01, 7.52958e-01, 4.52218e-01, -1.22185e-01, 3.49066e-02, -6.08674e-03 ],
[ -6.43358e-03, 3.81111e-02, -1.43313e-01, 7.61181e-01, 4.42134e-01, -1.20382e-01, 3.44568e-02, -6.01374e-03 ],
[ -6.37730e-03, 3.78182e-02, -1.42566e-01, 7.69305e-01, 4.32038e-01, -1.18526e-01, 3.39894e-02, -5.93749e-03 ],
[ -6.31703e-03, 3.75010e-02, -1.41727e-01, 7.77327e-01, 4.21934e-01, -1.16618e-01, 3.35046e-02, -5.85804e-03 ],
[ -6.25277e-03, 3.71596e-02, -1.40794e-01, 7.85244e-01, 4.11823e-01, -1.14659e-01, 3.30027e-02, -5.77544e-03 ],
[ -6.18454e-03, 3.67941e-02, -1.39767e-01, 7.93055e-01, 4.01710e-01, -1.12650e-01, 3.24843e-02, -5.68976e-03 ],
[ -6.11236e-03, 3.64044e-02, -1.38644e-01, 8.00757e-01, 3.91596e-01, -1.10593e-01, 3.19495e-02, -5.60103e-03 ],
[ -6.03624e-03, 3.59905e-02, -1.37426e-01, 8.08348e-01, 3.81484e-01, -1.08490e-01, 3.13987e-02, -5.50931e-03 ],
[ -5.95620e-03, 3.55525e-02, -1.36111e-01, 8.15826e-01, 3.71376e-01, -1.06342e-01, 3.08323e-02, -5.41467e-03 ],
[ -5.87227e-03, 3.50903e-02, -1.34699e-01, 8.23188e-01, 3.61276e-01, -1.04150e-01, 3.02507e-02, -5.31716e-03 ],
[ -5.78446e-03, 3.46042e-02, -1.33189e-01, 8.30432e-01, 3.51186e-01, -1.01915e-01, 2.96543e-02, -5.21683e-03 ],
[ -5.69280e-03, 3.40940e-02, -1.31581e-01, 8.37557e-01, 3.41109e-01, -9.96402e-02, 2.90433e-02, -5.11376e-03 ],
[ -5.59731e-03, 3.35600e-02, -1.29874e-01, 8.44559e-01, 3.31048e-01, -9.73254e-02, 2.84182e-02, -5.00800e-03 ],
[ -5.49804e-03, 3.30021e-02, -1.28068e-01, 8.51437e-01, 3.21004e-01, -9.49727e-02, 2.77794e-02, -4.89961e-03 ],
[ -5.39500e-03, 3.24205e-02, -1.26161e-01, 8.58189e-01, 3.10980e-01, -9.25834e-02, 2.71272e-02, -4.78866e-03 ],
[ -5.28823e-03, 3.18153e-02, -1.24154e-01, 8.64812e-01, 3.00980e-01, -9.01591e-02, 2.64621e-02, -4.67520e-03 ],
[ -5.17776e-03, 3.11866e-02, -1.22047e-01, 8.71305e-01, 2.91006e-01, -8.77011e-02, 2.57844e-02, -4.55932e-03 ],
[ -5.06363e-03, 3.05345e-02, -1.19837e-01, 8.77666e-01, 2.81060e-01, -8.52109e-02, 2.50946e-02, -4.44107e-03 ],
[ -4.94589e-03, 2.98593e-02, -1.17526e-01, 8.83893e-01, 2.71144e-01, -8.26900e-02, 2.43930e-02, -4.32052e-03 ],
[ -4.82456e-03, 2.91609e-02, -1.15113e-01, 8.89984e-01, 2.61263e-01, -8.01399e-02, 2.36801e-02, -4.19774e-03 ],
[ -4.69970e-03, 2.84397e-02, -1.12597e-01, 8.95936e-01, 2.51417e-01, -7.75620e-02, 2.29562e-02, -4.07279e-03 ],
[ -4.57135e-03, 2.76957e-02, -1.09978e-01, 9.01749e-01, 2.41609e-01, -7.49577e-02, 2.22218e-02, -3.94576e-03 ],
[ -4.43955e-03, 2.69293e-02, -1.07256e-01, 9.07420e-01, 2.31843e-01, -7.23286e-02, 2.14774e-02, -3.81671e-03 ],
[ -4.30435e-03, 2.61404e-02, -1.04430e-01, 9.12947e-01, 2.22120e-01, -6.96762e-02, 2.07233e-02, -3.68570e-03 ],
[ -4.16581e-03, 2.53295e-02, -1.01501e-01, 9.18329e-01, 2.12443e-01, -6.70018e-02, 1.99599e-02, -3.55283e-03 ],
[ -4.02397e-03, 2.44967e-02, -9.84679e-02, 9.23564e-01, 2.02814e-01, -6.43069e-02, 1.91877e-02, -3.41815e-03 ],
[ -3.87888e-03, 2.36423e-02, -9.53307e-02, 9.28650e-01, 1.93236e-01, -6.15931e-02, 1.84071e-02, -3.28174e-03 ],
[ -3.73062e-03, 2.27664e-02, -9.20893e-02, 9.33586e-01, 1.83711e-01, -5.88617e-02, 1.76185e-02, -3.14367e-03 ],
[ -3.57923e-03, 2.18695e-02, -8.87435e-02, 9.38371e-01, 1.74242e-01, -5.61142e-02, 1.68225e-02, -3.00403e-03 ],
[ -3.42477e-03, 2.09516e-02, -8.52933e-02, 9.43001e-01, 1.64831e-01, -5.33522e-02, 1.60193e-02, -2.86289e-03 ],
[ -3.26730e-03, 2.00132e-02, -8.17385e-02, 9.47477e-01, 1.55480e-01, -5.05770e-02, 1.52095e-02, -2.72032e-03 ],
[ -3.10689e-03, 1.90545e-02, -7.80792e-02, 9.51795e-01, 1.46192e-01, -4.77900e-02, 1.43934e-02, -2.57640e-03 ],
[ -2.94361e-03, 1.80759e-02, -7.43154e-02, 9.55956e-01, 1.36968e-01, -4.49929e-02, 1.35716e-02, -2.43121e-03 ],
[ -2.77751e-03, 1.70776e-02, -7.04471e-02, 9.59958e-01, 1.27812e-01, -4.21869e-02, 1.27445e-02, -2.28483e-03 ],
[ -2.60868e-03, 1.60599e-02, -6.64743e-02, 9.63798e-01, 1.18725e-01, -3.93735e-02, 1.19125e-02, -2.13733e-03 ],
[ -2.43718e-03, 1.50233e-02, -6.23972e-02, 9.67477e-01, 1.09710e-01, -3.65541e-02, 1.10760e-02, -1.98880e-03 ],
[ -2.26307e-03, 1.39681e-02, -5.82159e-02, 9.70992e-01, 1.00769e-01, -3.37303e-02, 1.02356e-02, -1.83931e-03 ],
[ -2.08645e-03, 1.28947e-02, -5.39305e-02, 9.74342e-01, 9.19033e-02, -3.09033e-02, 9.39154e-03, -1.68894e-03 ],
[ -1.90738e-03, 1.18034e-02, -4.95412e-02, 9.77526e-01, 8.31162e-02, -2.80746e-02, 8.54441e-03, -1.53777e-03 ],
[ -1.72594e-03, 1.06946e-02, -4.50483e-02, 9.80543e-01, 7.44095e-02, -2.52457e-02, 7.69462e-03, -1.38589e-03 ],
[ -1.54221e-03, 9.56876e-03, -4.04519e-02, 9.83392e-01, 6.57852e-02, -2.24178e-02, 6.84261e-03, -1.23337e-03 ],
[ -1.35627e-03, 8.42626e-03, -3.57525e-02, 9.86071e-01, 5.72454e-02, -1.95925e-02, 5.98883e-03, -1.08030e-03 ],
[ -1.16820e-03, 7.26755e-03, -3.09503e-02, 9.88580e-01, 4.87921e-02, -1.67710e-02, 5.13372e-03, -9.26747e-04 ],
[ -9.78093e-04, 6.09305e-03, -2.60456e-02, 9.90917e-01, 4.04274e-02, -1.39548e-02, 4.27773e-03, -7.72802e-04 ],
[ -7.86031e-04, 4.90322e-03, -2.10389e-02, 9.93082e-01, 3.21531e-02, -1.11453e-02, 3.42130e-03, -6.18544e-04 ],
[ -5.92100e-04, 3.69852e-03, -1.59305e-02, 9.95074e-01, 2.39714e-02, -8.34364e-03, 2.56486e-03, -4.64053e-04 ],
[ -3.96391e-04, 2.47942e-03, -1.07209e-02, 9.96891e-01, 1.58840e-02, -5.55134e-03, 1.70888e-03, -3.09412e-04 ],
[ -1.98993e-04, 1.24642e-03, -5.41054e-03, 9.98534e-01, 7.89295e-03, -2.76968e-03, 8.53777e-04, -1.54700e-04 ],
[ 0.00000e+00, 0.00000e+00, 0.00000e+00, 1.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00 ]
]
print("static const int NUM_TAPS = 8;")
print("static const int NUM_STEPS = 128;")
print("static const mmseTaps[NUM_STEPS+1][NUM_TAPS] = {")
for taps in filters:
body = ", ".join("%.5e" % t for t in reversed(taps))
print("{ " + body + " },")
print("};")
| 104.735099 | 117 | 0.613152 |
49172233318b11d598754579aedad36e5f01b8f1 | 1,688 | py | Python | openpype/hosts/blender/plugins/publish/extract_blend_animation.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | 1 | 2022-02-08T15:40:41.000Z | 2022-02-08T15:40:41.000Z | openpype/hosts/blender/plugins/publish/extract_blend_animation.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | 2 | 2022-03-18T01:46:03.000Z | 2022-03-18T01:46:16.000Z | openpype/hosts/blender/plugins/publish/extract_blend_animation.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | import os
import bpy
import openpype.api
| 30.690909 | 79 | 0.566943 |
4917259ce7d453d0c463913b457ccefb5c69c0f6 | 2,772 | py | Python | src/TamaTou.py | hirmiura/starsector-mod-Font_Replacement_for_Orbitron | ad7b5e3f4d8afd1a2aa97a420a2ec9a3aaf9b3d7 | [
"MIT"
] | 1 | 2022-01-17T02:58:46.000Z | 2022-01-17T02:58:46.000Z | src/TamaTou.py | hirmiura/starsector-mod-Font_Replacement_for_Orbitron | ad7b5e3f4d8afd1a2aa97a420a2ec9a3aaf9b3d7 | [
"MIT"
] | null | null | null | src/TamaTou.py | hirmiura/starsector-mod-Font_Replacement_for_Orbitron | ad7b5e3f4d8afd1a2aa97a420a2ec9a3aaf9b3d7 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
# Copyright 2022 hirmiura (https://github.com/hirmiura)
#
# TamaTou
#
#
# 1. fontforge(fontforge-console.bat)
# 2.
# 3. ffpython TamaTou.py
# 4.
#
# Orbitron Tama
# Noto No Toufu Tou
# TamaTou
#
import fontforge
for weight in ['Regular', 'Bold']:
print('')
fn = fontforge.open(f'Orbitron-{weight}.ttf')
fn.encoding = 'UnicodeFull'
fn.save(f'tmp1-{weight}.sfd')
fn.close()
print(f' {weight}')
fn = fontforge.open(f'NotoSansJP-{weight}.otf')
fn.encoding = 'UnicodeFull'
fn.cidFlatten()
# fn.ascent = 800
# fn.descent = 200
# fn.upos = -125
# fn.em = 1000
fn.save(f'tmp2-{weight}.sfd')
fn.close()
print('')
name = 'TamaTou'
copyright = 'Copyright (c) 2022, Hiroshi Miura (https://github.com/hirmiura) with Reserved Font Name TamaTou.'
version = '1.0.0'
license = 'Open Font License'
fn = fontforge.open(f'tmp1-{weight}.sfd')
fn.fontname = name
fn.familyname = name
fn.fullname = name
fn.weight = weight
fn.version = version
fn.sfntRevision = None
fn.copyright = copyright
fn.appendSFNTName(0x411, 0, copyright)
fn.appendSFNTName(0x411, 1, name)
fn.appendSFNTName(0x411, 2, '')
fn.appendSFNTName(0x411, 3, '')
fn.appendSFNTName(0x411, 4, name)
fn.appendSFNTName(0x411, 5, version)
fn.appendSFNTName(0x411, 6, name + '-' + weight)
fn.appendSFNTName(0x411, 7, '')
fn.appendSFNTName(0x411, 8, '')
fn.appendSFNTName(0x411, 9, '')
fn.appendSFNTName(0x411, 10, '')
fn.appendSFNTName(0x411, 11, '')
fn.appendSFNTName(0x411, 12, '')
fn.appendSFNTName(0x411, 13, license)
fn.appendSFNTName(0x411, 14, '')
fn.appendSFNTName(0x411, 15, '')
fn.appendSFNTName(0x411, 16, name)
fn.appendSFNTName(0x411, 17, '')
fn.appendSFNTName(0x409, 0, copyright)
fn.appendSFNTName(0x409, 1, name)
fn.appendSFNTName(0x409, 2, '')
fn.appendSFNTName(0x409, 3, '')
fn.appendSFNTName(0x409, 4, name)
fn.appendSFNTName(0x409, 5, version)
fn.appendSFNTName(0x409, 6, name + '-' + weight)
fn.appendSFNTName(0x409, 7, '')
fn.appendSFNTName(0x409, 8, '')
fn.appendSFNTName(0x409, 9, '')
fn.appendSFNTName(0x409, 10, '')
fn.appendSFNTName(0x409, 11, '')
fn.appendSFNTName(0x409, 12, '')
fn.appendSFNTName(0x409, 13, license)
fn.appendSFNTName(0x409, 14, '')
fn.appendSFNTName(0x409, 15, '')
fn.appendSFNTName(0x409, 16, name)
fn.appendSFNTName(0x409, 17, '')
# fn.mergeFonts(f'tmp1-{weight}.sfd')
fn.mergeFonts(f'tmp2-{weight}.sfd')
fn.save(f'tmp3-{weight}.sfd')
fn.generate(f'TamaTou-{weight}.otf')
fn.close()
| 30.461538 | 114 | 0.636724 |
4918381c344c6f579cf53ea1bf560dc12227d2bf | 2,623 | py | Python | bambu/bambu.py | westurner/pandasrdf | c194b1eb9928488bc19b82d3cab409158cd413a3 | [
"BSD-3-Clause"
] | 2 | 2016-07-01T10:48:04.000Z | 2017-01-24T16:53:44.000Z | bambu/bambu.py | westurner/pandasrdf | c194b1eb9928488bc19b82d3cab409158cd413a3 | [
"BSD-3-Clause"
] | 1 | 2016-06-20T10:54:53.000Z | 2017-02-07T05:47:38.000Z | bambu/bambu.py | westurner/pandasrdf | c194b1eb9928488bc19b82d3cab409158cd413a3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
bambu
------
pandas RDF functionality
Installation
--------------
::
# pip install pandas
pip install rdflib
"""
import sys
import pandas as pd
import rdflib
def bambu():
"""
mainfunc
"""
pass
def to_rdf(df):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
rdflib.Graph: a serializable RDFLib Graph
"""
def read_rdf(path, **kwargs):
"""
Args:
path (str): path to an RDF source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
def to_rdfa(df, **kwargs):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
(list, StringIO): namespaces, RDFa table
"""
def read_rdfa(path, **kwargs):
"""
Args:
path (str): path to an RDF source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
def to_jsonld(df, **kwargs):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
(context, StringIO): JSONLD context, JSONLD data
"""
def read_jsonld(path, **kwargs):
"""
Args:
path (str): path to a JSONLD source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
import unittest
if __name__ == "__main__":
sys.exit(main())
| 18.602837 | 63 | 0.552421 |
491871e30f2b60781d5b69aef6ac73571b60d676 | 19,637 | py | Python | homework1/problem3/local/mort_icu.py | criticaldata/hst953-2021 | b18c8235a6c878a4a7d3d330a9b69421f0217273 | [
"MIT"
] | 1 | 2022-03-15T15:52:45.000Z | 2022-03-15T15:52:45.000Z | homework1/problem3/local/mort_icu.py | MDenu/HST-homework | fff0f277ee18735acbe84dfe8c428e92991b28fa | [
"MIT"
] | null | null | null | homework1/problem3/local/mort_icu.py | MDenu/HST-homework | fff0f277ee18735acbe84dfe8c428e92991b28fa | [
"MIT"
] | 3 | 2021-09-10T19:14:54.000Z | 2021-09-26T22:23:05.000Z | # Generates the following data files from MIMIC:
# adult_icu.gz: data from adult ICUs
# n_icu.gz: data from neonatal ICUs
# adult_notes.gz: clinical notes from adult ICUs
# Import libraries
import numpy as np
import pandas as pd
import psycopg2
from scipy.stats import ks_2samp
import os
import random
# Ouput directory to generate the files
mimicdir = os.path.expanduser("./mimic_data/")
random.seed(42)
# create a database connection
sqluser = 'mimicuser'
dbname = 'mimic'
schema_name = 'mimiciii'
# Connect to local postgres version of mimic
con = psycopg2.connect(dbname=dbname, user=sqluser, host='127.0.0.1', password='PASSWORD')
cur = con.cursor()
cur.execute('SET search_path to ' + schema_name)
#========helper function for imputing missing values
def replace(group):
"""
takes in a pandas group, and replaces the
null value with the mean of the none null
values of the same group
"""
mask = group.isnull()
group[mask] = group[~mask].mean()
return group
#========get the icu details
# this query extracts the following:
# Unique ids for the admission, patient and icu stay
# Patient gender
# admission & discharge times
# length of stay
# age
# ethnicity
# admission type
# in hospital death?
# in icu death?
# one year from admission death?
# first hospital stay
# icu intime, icu outime
# los in icu
# first icu stay?
denquery = \
"""
-- This query extracts useful demographic/administrative information for patient ICU stays
--DROP MATERIALIZED VIEW IF EXISTS icustay_detail CASCADE;
--CREATE MATERIALIZED VIEW icustay_detail as
--ie is the icustays table
--adm is the admissions table
SELECT ie.subject_id, ie.hadm_id, ie.icustay_id
, pat.gender
, adm.admittime, adm.dischtime, adm.diagnosis
, ROUND( (CAST(adm.dischtime AS DATE) - CAST(adm.admittime AS DATE)) , 4) AS los_hospital
, ROUND( (CAST(adm.admittime AS DATE) - CAST(pat.dob AS DATE)) / 365, 4) AS age
, adm.ethnicity, adm.ADMISSION_TYPE
--, adm.hospital_expire_flag
, CASE when adm.deathtime between ie.intime and ie.outtime THEN 1 ELSE 0 END AS mort_icu
, DENSE_RANK() OVER (PARTITION BY adm.subject_id ORDER BY adm.admittime) AS hospstay_seq
, CASE
WHEN DENSE_RANK() OVER (PARTITION BY adm.subject_id ORDER BY adm.admittime) = 1 THEN 1
ELSE 0 END AS first_hosp_stay
-- icu level factors
, ie.intime, ie.outtime
, ie.FIRST_CAREUNIT
, ROUND( (CAST(ie.outtime AS DATE) - CAST(ie.intime AS DATE)) , 4) AS los_icu
, DENSE_RANK() OVER (PARTITION BY ie.hadm_id ORDER BY ie.intime) AS icustay_seq
-- first ICU stay *for the current hospitalization*
, CASE
WHEN DENSE_RANK() OVER (PARTITION BY ie.hadm_id ORDER BY ie.intime) = 1 THEN 1
ELSE 0 END AS first_icu_stay
FROM icustays ie
INNER JOIN admissions adm
ON ie.hadm_id = adm.hadm_id
INNER JOIN patients pat
ON ie.subject_id = pat.subject_id
WHERE adm.has_chartevents_data = 1
ORDER BY ie.subject_id, adm.admittime, ie.intime;
"""
den = pd.read_sql_query(denquery,con)
#----drop patients with less than 48 hour
den['los_icu_hr'] = (den.outtime - den.intime).astype('timedelta64[h]')
den = den[(den.los_icu_hr >= 48)]
den = den[(den.age<300)]
den.drop('los_icu_hr', axis = 1, inplace = True)
# den.isnull().sum()
#----clean up
# micu --> medical
# csru --> cardiac surgery recovery unit
# sicu --> surgical icu
# tsicu --> Trauma Surgical Intensive Care Unit
# NICU --> Neonatal
den['adult_icu'] = np.where(den['first_careunit'].isin(['PICU', 'NICU']), 0, 1)
den['gender'] = np.where(den['gender']=="M", 1, 0)
# no need to yell
den.ethnicity = den.ethnicity.str.lower()
den.ethnicity.loc[(den.ethnicity.str.contains('^white'))] = 'white'
den.ethnicity.loc[(den.ethnicity.str.contains('^black'))] = 'black'
den.ethnicity.loc[(den.ethnicity.str.contains('^hisp')) | (den.ethnicity.str.contains('^latin'))] = 'hispanic'
den.ethnicity.loc[(den.ethnicity.str.contains('^asia'))] = 'asian'
den.ethnicity.loc[~(den.ethnicity.str.contains('|'.join(['white', 'black', 'hispanic', 'asian'])))] = 'other'
den = pd.concat([den, pd.get_dummies(den['ethnicity'], prefix='eth')], axis = 1)
den = pd.concat([den, pd.get_dummies(den['admission_type'], prefix='admType')], axis = 1)
den.drop(['diagnosis', 'hospstay_seq', 'los_icu','icustay_seq', 'admittime', 'dischtime','los_hospital', 'intime', 'outtime', 'ethnicity', 'admission_type', 'first_careunit'], axis = 1, inplace = True)
#========= 48 hour vitals query
# these are the normal ranges. useful to clean
# up the data
vitquery = \
"""
-- This query pivots the vital signs for the first 48 hours of a patient's stay
-- Vital signs include heart rate, blood pressure, respiration rate, and temperature
-- DROP MATERIALIZED VIEW IF EXISTS vitalsfirstday CASCADE;
-- create materialized view vitalsfirstday as
SELECT pvt.subject_id, pvt.hadm_id, pvt.icustay_id
-- Easier names
, min(case when VitalID = 1 then valuenum else null end) as HeartRate_Min
, max(case when VitalID = 1 then valuenum else null end) as HeartRate_Max
, avg(case when VitalID = 1 then valuenum else null end) as HeartRate_Mean
, min(case when VitalID = 2 then valuenum else null end) as SysBP_Min
, max(case when VitalID = 2 then valuenum else null end) as SysBP_Max
, avg(case when VitalID = 2 then valuenum else null end) as SysBP_Mean
, min(case when VitalID = 3 then valuenum else null end) as DiasBP_Min
, max(case when VitalID = 3 then valuenum else null end) as DiasBP_Max
, avg(case when VitalID = 3 then valuenum else null end) as DiasBP_Mean
, min(case when VitalID = 4 then valuenum else null end) as MeanBP_Min
, max(case when VitalID = 4 then valuenum else null end) as MeanBP_Max
, avg(case when VitalID = 4 then valuenum else null end) as MeanBP_Mean
, min(case when VitalID = 5 then valuenum else null end) as RespRate_Min
, max(case when VitalID = 5 then valuenum else null end) as RespRate_Max
, avg(case when VitalID = 5 then valuenum else null end) as RespRate_Mean
, min(case when VitalID = 6 then valuenum else null end) as TempC_Min
, max(case when VitalID = 6 then valuenum else null end) as TempC_Max
, avg(case when VitalID = 6 then valuenum else null end) as TempC_Mean
, min(case when VitalID = 7 then valuenum else null end) as SpO2_Min
, max(case when VitalID = 7 then valuenum else null end) as SpO2_Max
, avg(case when VitalID = 7 then valuenum else null end) as SpO2_Mean
, min(case when VitalID = 8 then valuenum else null end) as Glucose_Min
, max(case when VitalID = 8 then valuenum else null end) as Glucose_Max
, avg(case when VitalID = 8 then valuenum else null end) as Glucose_Mean
FROM (
select ie.subject_id, ie.hadm_id, ie.icustay_id
, case
when itemid in (211,220045) and valuenum > 0 and valuenum < 300 then 1 -- HeartRate
when itemid in (51,442,455,6701,220179,220050) and valuenum > 0 and valuenum < 400 then 2 -- SysBP
when itemid in (8368,8440,8441,8555,220180,220051) and valuenum > 0 and valuenum < 300 then 3 -- DiasBP
when itemid in (456,52,6702,443,220052,220181,225312) and valuenum > 0 and valuenum < 300 then 4 -- MeanBP
when itemid in (615,618,220210,224690) and valuenum > 0 and valuenum < 70 then 5 -- RespRate
when itemid in (223761,678) and valuenum > 70 and valuenum < 120 then 6 -- TempF, converted to degC in valuenum call
when itemid in (223762,676) and valuenum > 10 and valuenum < 50 then 6 -- TempC
when itemid in (646,220277) and valuenum > 0 and valuenum <= 100 then 7 -- SpO2
when itemid in (807,811,1529,3745,3744,225664,220621,226537) and valuenum > 0 then 8 -- Glucose
else null end as VitalID
-- convert F to C
, case when itemid in (223761,678) then (valuenum-32)/1.8 else valuenum end as valuenum
from icustays ie
left join chartevents ce
on ie.subject_id = ce.subject_id and ie.hadm_id = ce.hadm_id and ie.icustay_id = ce.icustay_id
and ce.charttime between ie.intime and ie.intime + interval '48' hour
-- exclude rows marked as error
and ce.error IS DISTINCT FROM 1
where ce.itemid in
(
-- HEART RATE
211, --"Heart Rate"
220045, --"Heart Rate"
-- Systolic/diastolic
51, -- Arterial BP [Systolic]
442, -- Manual BP [Systolic]
455, -- NBP [Systolic]
6701, -- Arterial BP #2 [Systolic]
220179, -- Non Invasive Blood Pressure systolic
220050, -- Arterial Blood Pressure systolic
8368, -- Arterial BP [Diastolic]
8440, -- Manual BP [Diastolic]
8441, -- NBP [Diastolic]
8555, -- Arterial BP #2 [Diastolic]
220180, -- Non Invasive Blood Pressure diastolic
220051, -- Arterial Blood Pressure diastolic
-- MEAN ARTERIAL PRESSURE
456, --"NBP Mean"
52, --"Arterial BP Mean"
6702, -- Arterial BP Mean #2
443, -- Manual BP Mean(calc)
220052, --"Arterial Blood Pressure mean"
220181, --"Non Invasive Blood Pressure mean"
225312, --"ART BP mean"
-- RESPIRATORY RATE
618,-- Respiratory Rate
615,-- Resp Rate (Total)
220210,-- Respiratory Rate
224690, -- Respiratory Rate (Total)
-- SPO2, peripheral
646, 220277,
-- GLUCOSE, both lab and fingerstick
807,-- Fingerstick Glucose
811,-- Glucose (70-105)
1529,-- Glucose
3745,-- BloodGlucose
3744,-- Blood Glucose
225664,-- Glucose finger stick
220621,-- Glucose (serum)
226537,-- Glucose (whole blood)
-- TEMPERATURE
223762, -- "Temperature Celsius"
676, -- "Temperature C"
223761, -- "Temperature Fahrenheit"
678 -- "Temperature F"
)
) pvt
group by pvt.subject_id, pvt.hadm_id, pvt.icustay_id
order by pvt.subject_id, pvt.hadm_id, pvt.icustay_id;
"""
vit48 = pd.read_sql_query(vitquery,con)
vit48.isnull().sum()
#===============48 hour labs query
# This query does the following:
# it extracts the lab events in the first 48 hours
# it labels the lab items and cleans up their values
# it will create a set of lab values
# 48 hours.
labquery = \
"""
WITH pvt AS (
--- ie is the icu stay
--- ad is the admissions table
--- le is the lab events table
SELECT ie.subject_id, ie.hadm_id, ie.icustay_id, le.charttime
-- here we assign labels to ITEMIDs
-- this also fuses together multiple ITEMIDs containing the same data
, CASE
when le.itemid = 50868 then 'ANION GAP'
when le.itemid = 50862 then 'ALBUMIN'
when le.itemid = 50882 then 'BICARBONATE'
when le.itemid = 50885 then 'BILIRUBIN'
when le.itemid = 50912 then 'CREATININE'
when le.itemid = 50806 then 'CHLORIDE'
when le.itemid = 50902 then 'CHLORIDE'
when le.itemid = 50809 then 'GLUCOSE'
when le.itemid = 50931 then 'GLUCOSE'
when le.itemid = 50810 then 'HEMATOCRIT'
when le.itemid = 51221 then 'HEMATOCRIT'
when le.itemid = 50811 then 'HEMOGLOBIN'
when le.itemid = 51222 then 'HEMOGLOBIN'
when le.itemid = 50813 then 'LACTATE'
when le.itemid = 50960 then 'MAGNESIUM'
when le.itemid = 50970 then 'PHOSPHATE'
when le.itemid = 51265 then 'PLATELET'
when le.itemid = 50822 then 'POTASSIUM'
when le.itemid = 50971 then 'POTASSIUM'
when le.itemid = 51275 then 'PTT'
when le.itemid = 51237 then 'INR'
when le.itemid = 51274 then 'PT'
when le.itemid = 50824 then 'SODIUM'
when le.itemid = 50983 then 'SODIUM'
when le.itemid = 51006 then 'BUN'
when le.itemid = 51300 then 'WBC'
when le.itemid = 51301 then 'WBC'
ELSE null
END AS label
, -- add in some sanity checks on the values
-- the where clause below requires all valuenum to be > 0,
-- so these are only upper limit checks
CASE
when le.itemid = 50862 and le.valuenum > 10 then null -- g/dL 'ALBUMIN'
when le.itemid = 50868 and le.valuenum > 10000 then null -- mEq/L 'ANION GAP'
when le.itemid = 50882 and le.valuenum > 10000 then null -- mEq/L 'BICARBONATE'
when le.itemid = 50885 and le.valuenum > 150 then null -- mg/dL 'BILIRUBIN'
when le.itemid = 50806 and le.valuenum > 10000 then null -- mEq/L 'CHLORIDE'
when le.itemid = 50902 and le.valuenum > 10000 then null -- mEq/L 'CHLORIDE'
when le.itemid = 50912 and le.valuenum > 150 then null -- mg/dL 'CREATININE'
when le.itemid = 50809 and le.valuenum > 10000 then null -- mg/dL 'GLUCOSE'
when le.itemid = 50931 and le.valuenum > 10000 then null -- mg/dL 'GLUCOSE'
when le.itemid = 50810 and le.valuenum > 100 then null -- % 'HEMATOCRIT'
when le.itemid = 51221 and le.valuenum > 100 then null -- % 'HEMATOCRIT'
when le.itemid = 50811 and le.valuenum > 50 then null -- g/dL 'HEMOGLOBIN'
when le.itemid = 51222 and le.valuenum > 50 then null -- g/dL 'HEMOGLOBIN'
when le.itemid = 50813 and le.valuenum > 50 then null -- mmol/L 'LACTATE'
when le.itemid = 50960 and le.valuenum > 60 then null -- mmol/L 'MAGNESIUM'
when le.itemid = 50970 and le.valuenum > 60 then null -- mg/dL 'PHOSPHATE'
when le.itemid = 51265 and le.valuenum > 10000 then null -- K/uL 'PLATELET'
when le.itemid = 50822 and le.valuenum > 30 then null -- mEq/L 'POTASSIUM'
when le.itemid = 50971 and le.valuenum > 30 then null -- mEq/L 'POTASSIUM'
when le.itemid = 51275 and le.valuenum > 150 then null -- sec 'PTT'
when le.itemid = 51237 and le.valuenum > 50 then null -- 'INR'
when le.itemid = 51274 and le.valuenum > 150 then null -- sec 'PT'
when le.itemid = 50824 and le.valuenum > 200 then null -- mEq/L == mmol/L 'SODIUM'
when le.itemid = 50983 and le.valuenum > 200 then null -- mEq/L == mmol/L 'SODIUM'
when le.itemid = 51006 and le.valuenum > 300 then null -- 'BUN'
when le.itemid = 51300 and le.valuenum > 1000 then null -- 'WBC'
when le.itemid = 51301 and le.valuenum > 1000 then null -- 'WBC'
ELSE le.valuenum
END AS valuenum
FROM icustays ie
LEFT JOIN labevents le
ON le.subject_id = ie.subject_id
AND le.hadm_id = ie.hadm_id
-- TODO: they are using lab times 6 hours before the start of the
-- ICU stay.
AND le.charttime between (ie.intime - interval '6' hour)
AND (ie.intime + interval '48' hour)
AND le.itemid IN
(
-- comment is: LABEL | CATEGORY | FLUID | NUMBER OF ROWS IN LABEVENTS
50868, -- ANION GAP | CHEMISTRY | BLOOD | 769895
50862, -- ALBUMIN | CHEMISTRY | BLOOD | 146697
50882, -- BICARBONATE | CHEMISTRY | BLOOD | 780733
50885, -- BILIRUBIN, TOTAL | CHEMISTRY | BLOOD | 238277
50912, -- CREATININE | CHEMISTRY | BLOOD | 797476
50902, -- CHLORIDE | CHEMISTRY | BLOOD | 795568
50806, -- CHLORIDE, WHOLE BLOOD | BLOOD GAS | BLOOD | 48187
50931, -- GLUCOSE | CHEMISTRY | BLOOD | 748981
50809, -- GLUCOSE | BLOOD GAS | BLOOD | 196734
51221, -- HEMATOCRIT | HEMATOLOGY | BLOOD | 881846
50810, -- HEMATOCRIT, CALCULATED | BLOOD GAS | BLOOD | 89715
51222, -- HEMOGLOBIN | HEMATOLOGY | BLOOD | 752523
50811, -- HEMOGLOBIN | BLOOD GAS | BLOOD | 89712
50813, -- LACTATE | BLOOD GAS | BLOOD | 187124
50960, -- MAGNESIUM | CHEMISTRY | BLOOD | 664191
50970, -- PHOSPHATE | CHEMISTRY | BLOOD | 590524
51265, -- PLATELET COUNT | HEMATOLOGY | BLOOD | 778444
50971, -- POTASSIUM | CHEMISTRY | BLOOD | 845825
50822, -- POTASSIUM, WHOLE BLOOD | BLOOD GAS | BLOOD | 192946
51275, -- PTT | HEMATOLOGY | BLOOD | 474937
51237, -- INR(PT) | HEMATOLOGY | BLOOD | 471183
51274, -- PT | HEMATOLOGY | BLOOD | 469090
50983, -- SODIUM | CHEMISTRY | BLOOD | 808489
50824, -- SODIUM, WHOLE BLOOD | BLOOD GAS | BLOOD | 71503
51006, -- UREA NITROGEN | CHEMISTRY | BLOOD | 791925
51301, -- WHITE BLOOD CELLS | HEMATOLOGY | BLOOD | 753301
51300 -- WBC COUNT | HEMATOLOGY | BLOOD | 2371
)
AND le.valuenum IS NOT null
AND le.valuenum > 0 -- lab values cannot be 0 and cannot be negative
LEFT JOIN admissions ad
ON ie.subject_id = ad.subject_id
AND ie.hadm_id = ad.hadm_id
),
ranked AS (
SELECT pvt.*, DENSE_RANK() OVER (PARTITION BY
pvt.subject_id, pvt.hadm_id,pvt.icustay_id,pvt.label ORDER BY pvt.charttime) as drank
FROM pvt
)
SELECT r.subject_id, r.hadm_id, r.icustay_id
, max(case when label = 'ANION GAP' then valuenum else null end) as ANIONGAP
, max(case when label = 'ALBUMIN' then valuenum else null end) as ALBUMIN
, max(case when label = 'BICARBONATE' then valuenum else null end) as BICARBONATE
, max(case when label = 'BILIRUBIN' then valuenum else null end) as BILIRUBIN
, max(case when label = 'CREATININE' then valuenum else null end) as CREATININE
, max(case when label = 'CHLORIDE' then valuenum else null end) as CHLORIDE
, max(case when label = 'GLUCOSE' then valuenum else null end) as GLUCOSE
, max(case when label = 'HEMATOCRIT' then valuenum else null end) as HEMATOCRIT
, max(case when label = 'HEMOGLOBIN' then valuenum else null end) as HEMOGLOBIN
, max(case when label = 'LACTATE' then valuenum else null end) as LACTATE
, max(case when label = 'MAGNESIUM' then valuenum else null end) as MAGNESIUM
, max(case when label = 'PHOSPHATE' then valuenum else null end) as PHOSPHATE
, max(case when label = 'PLATELET' then valuenum else null end) as PLATELET
, max(case when label = 'POTASSIUM' then valuenum else null end) as POTASSIUM
, max(case when label = 'PTT' then valuenum else null end) as PTT
, max(case when label = 'INR' then valuenum else null end) as INR
, max(case when label = 'PT' then valuenum else null end) as PT
, max(case when label = 'SODIUM' then valuenum else null end) as SODIUM
, max(case when label = 'BUN' then valuenum else null end) as BUN
, max(case when label = 'WBC' then valuenum else null end) as WBC
FROM ranked r
WHERE r.drank = 1
GROUP BY r.subject_id, r.hadm_id, r.icustay_id, r.drank
ORDER BY r.subject_id, r.hadm_id, r.icustay_id, r.drank;
"""
lab48 = pd.read_sql_query(labquery,con)
#=========notes
notesquery = \
"""
SELECT fin.subject_id, fin.hadm_id, fin.icustay_id, string_agg(fin.text, ' ') as chartext
FROM (
select ie.subject_id, ie.hadm_id, ie.icustay_id, ne.text
from icustays ie
left join noteevents ne
on ie.subject_id = ne.subject_id and ie.hadm_id = ne.hadm_id
and ne.charttime between ie.intime and ie.intime + interval '48' hour
--and ne.iserror != '1'
) fin
group by fin.subject_id, fin.hadm_id, fin.icustay_id
order by fin.subject_id, fin.hadm_id, fin.icustay_id;
"""
notes48 = pd.read_sql_query(notesquery,con)
#=====combine all variables
mort_ds = den.merge(vit48,how = 'left', on = ['subject_id', 'hadm_id', 'icustay_id'])
mort_ds = mort_ds.merge(lab48,how = 'left', on = ['subject_id', 'hadm_id', 'icustay_id'])
#======missing values (following joydeep ghosh's paper)
# create means by age group and gender
mort_ds['age_group'] = pd.cut(mort_ds['age'], [-1,5,10,15,20, 25, 40,60, 80, 200],
labels = ['l5','5_10', '10_15', '15_20', '20_25', '25_40', '40_60', '60_80', '80p'])
mort_ds = mort_ds.groupby(['age_group', 'gender'])
mort_ds = mort_ds.transform(replace)
#mort_ds.drop('age_group', 1, inplace =True )
# one missing variable
adult_icu = mort_ds[(mort_ds.adult_icu==1)].dropna()
# create training and testing labels
msk = np.random.rand(len(adult_icu)) < 0.7
adult_icu['train'] = np.where(msk, 1, 0)
adult_icu.to_csv(os.path.join(mimicdir, 'adult_icu.gz'), compression='gzip', index = False)
# notes
adult_notes = notes48.merge(adult_icu[['train', 'subject_id', 'hadm_id', 'icustay_id', 'mort_icu']], how = 'right', on = ['subject_id', 'hadm_id', 'icustay_id'])
adult_notes.to_csv(os.path.join(mimicdir, 'adult_notes.gz'), compression='gzip', index = False)
| 41.254202 | 202 | 0.685135 |
4918d6d679945db227bfcba68023e986105933df | 14,098 | py | Python | release/alert.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | release/alert.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | release/alert.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | import argparse
from collections import defaultdict, Counter
from typing import Any, List, Tuple, Mapping, Optional
import datetime
import hashlib
import json
import logging
import os
import requests
import sys
import boto3
from e2e import GLOBAL_CONFIG
from alerts.default import handle_result as default_handle_result
from alerts.rllib_tests import handle_result as rllib_tests_handle_result
from alerts.long_running_tests import handle_result as \
long_running_tests_handle_result
from alerts.tune_tests import handle_result as tune_tests_handle_result
from alerts.xgboost_tests import handle_result as xgboost_tests_handle_result
SUITE_TO_FN = {
"long_running_tests": long_running_tests_handle_result,
"rllib_tests": rllib_tests_handle_result,
"tune_tests": tune_tests_handle_result,
"xgboost_tests": xgboost_tests_handle_result,
}
GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"] = "alert_state"
GLOBAL_CONFIG["SLACK_WEBHOOK"] = os.environ.get("SLACK_WEBHOOK", "")
GLOBAL_CONFIG["SLACK_CHANNEL"] = os.environ.get("SLACK_CHANNEL",
"#oss-test-cop")
RESULTS_LIMIT = 120
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def mark_as_handled(rds_data_client, update: bool, category: str,
test_suite: str, test_name: str, result_hash: str,
last_notification_dt: datetime.datetime):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
if not update:
sql = (f"""
INSERT INTO {schema}
(category, test_suite, test_name,
last_result_hash, last_notification_dt)
VALUES (:category, :test_suite, :test_name,
:last_result_hash, :last_notification_dt)
""")
else:
sql = (f"""
UPDATE {schema}
SET last_result_hash=:last_result_hash,
last_notification_dt=:last_notification_dt
WHERE category=:category AND test_suite=:test_suite
AND test_name=:test_name
""")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "category",
"value": {
"stringValue": category
}
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite or ""
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "last_result_hash",
"value": {
"stringValue": result_hash
}
},
{
"name": "last_notification_dt",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": last_notification_dt.strftime(
"%Y-%m-%d %H:%M:%S")
},
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def post_alerts_to_slack(channel: str, alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
if len(alerts) == 0:
logger.info("No alerts to post to slack.")
return
markdown_lines = [
f"* {len(alerts)} new release test failures found!*",
"",
]
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(
f" *{test_suite}/{test_name}* failed: {alert}")
for category, alert_list in category_alerts.items():
markdown_lines.append(f"Branch: *{category}*")
markdown_lines.extend(alert_list)
markdown_lines.append("")
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines += [
f"Additionally, {total_non_alerts} tests passed successfully "
f"({', '.join(non_alert_detail)})."
]
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def post_statistics_to_slack(channel: str,
alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
total_alerts = len(alerts)
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(f"`{test_suite}/{test_name}`")
alert_detail = [f"{len(a)} on {c}" for c, a in category_alerts.items()]
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines = [
"*Periodic release test report*", "", f"In the past 24 hours, "
f"*{total_non_alerts}* release tests finished successfully, and "
f"*{total_alerts}* release tests failed."
]
markdown_lines.append("")
if total_alerts:
markdown_lines.append(f"*Failing:* {', '.join(alert_detail)}")
for c, a in category_alerts.items():
markdown_lines.append(f" *{c}*: {', '.join(sorted(a))}")
else:
markdown_lines.append("*Failing:* None")
markdown_lines.append("")
if total_non_alerts:
markdown_lines.append(f"*Passing:* {', '.join(non_alert_detail)}")
else:
markdown_lines.append("*Passing:* None")
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def handle_results_and_get_alerts(
rds_data_client,
fetch_since: Optional[datetime.datetime] = None,
always_try_alert: bool = False,
no_status_update: bool = False):
# First build a map of last notifications
last_notifications_map = {}
for category, test_suite, test_name, last_result_hash, \
last_notification_dt in fetch_latest_alerts(rds_data_client):
last_notifications_map[(category, test_suite,
test_name)] = (last_result_hash,
last_notification_dt)
alerts = []
non_alerts = Counter()
# Then fetch latest results
for result_hash, created_on, category, test_suite, test_name, status, \
results, artifacts, last_logs in fetch_latest_results(
rds_data_client, fetch_since=fetch_since):
key = (category, test_suite, test_name)
try_alert = always_try_alert
if key in last_notifications_map:
# If we have an alert for this key, fetch info
last_result_hash, last_notification_dt = last_notifications_map[
key]
if last_result_hash != result_hash:
# If we got a new result, handle new result
try_alert = True
# Todo: maybe alert again after some time?
else:
try_alert = True
if try_alert:
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(created_on, category, test_suite,
test_name, status, results,
artifacts, last_logs)
else:
alert = handle_fn(created_on, category, test_suite, test_name,
status, results, artifacts, last_logs)
if alert:
logger.warning(
f"Alert raised for test {test_suite}/{test_name} "
f"({category}): {alert}")
alerts.append((category, test_suite, test_name, alert))
else:
logger.debug(
f"No alert raised for test {test_suite}/{test_name} "
f"({category})")
non_alerts[category] += 1
if not no_status_update:
mark_as_handled(rds_data_client, key in last_notifications_map,
category, test_suite, test_name, result_hash,
datetime.datetime.now())
return alerts, non_alerts
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--stats",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
maybe_fetch_slack_webhook()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
if args.stats:
# Only update last 24 hour stats
fetch_since = datetime.datetime.now() - datetime.timedelta(days=1)
alerts, non_alerts = handle_results_and_get_alerts(
rds_data_client,
fetch_since=fetch_since,
always_try_alert=True,
no_status_update=True)
post_statistics_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
else:
alerts, non_alerts = handle_results_and_get_alerts(rds_data_client)
post_alerts_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
| 34.724138 | 79 | 0.589658 |
4918f13347223ad7457de28e1dde690394ca0299 | 2,176 | py | Python | bios-token.py | emahear/openusm | 96bb62b91f4b5520e14d86ae86e1b320404134e6 | [
"MIT"
] | 4 | 2019-08-04T05:50:46.000Z | 2020-04-16T19:24:11.000Z | bios-token.py | emahear/openusm | 96bb62b91f4b5520e14d86ae86e1b320404134e6 | [
"MIT"
] | null | null | null | bios-token.py | emahear/openusm | 96bb62b91f4b5520e14d86ae86e1b320404134e6 | [
"MIT"
] | 6 | 2019-08-03T12:57:47.000Z | 2020-06-08T01:50:43.000Z | import os
import argparse
if __name__ == '__main__':
main()
| 32.477612 | 306 | 0.552849 |
491929694fdf621f13fc8f553f27ac207d5a59c5 | 333 | py | Python | Examples/user_data/sharptrack.py | FedeClaudi/brainrender | b1d8adcef52615fcd86a083be4dc48c68a8b0bb9 | [
"MIT"
] | null | null | null | Examples/user_data/sharptrack.py | FedeClaudi/brainrender | b1d8adcef52615fcd86a083be4dc48c68a8b0bb9 | [
"MIT"
] | null | null | null | Examples/user_data/sharptrack.py | FedeClaudi/brainrender | b1d8adcef52615fcd86a083be4dc48c68a8b0bb9 | [
"MIT"
] | null | null | null | import brainrender
brainrender.SHADER_STYLE = 'cartoon'
from brainrender.scene import Scene
sharptrack_file = 'Examples/example_files/sharptrack_probe_points.mat'
scene = Scene(use_default_key_bindings=True)
scene.add_brain_regions('TH', alpha=.2, wireframe=True)
scene.add_probe_from_sharptrack(sharptrack_file)
scene.render() | 23.785714 | 70 | 0.831832 |
4919e36bd5888cd73891195c85651a0ab2da447b | 7,910 | py | Python | homeassistant/components/metoffice/sensor.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | homeassistant/components/metoffice/sensor.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/metoffice/sensor.py | winning1120xx/home-assistant | 53d4c0ce2d374b5e97bbdc37742656c27adf8eea | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Support for UK Met Office weather service."""
from __future__ import annotations
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
LENGTH_KILOMETERS,
PERCENTAGE,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
UV_INDEX,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTRIBUTION,
CONDITION_CLASSES,
DOMAIN,
METOFFICE_COORDINATES,
METOFFICE_DAILY_COORDINATOR,
METOFFICE_HOURLY_COORDINATOR,
METOFFICE_NAME,
MODE_3HOURLY_LABEL,
MODE_DAILY,
MODE_DAILY_LABEL,
VISIBILITY_CLASSES,
VISIBILITY_DISTANCE_CLASSES,
)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_SITE_ID = "site_id"
ATTR_SITE_NAME = "site_name"
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="name",
name="Station Name",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:label-outline",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="weather",
name="Weather",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:weather-sunny", # but will adapt to current conditions
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="temperature",
name="Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
icon=None,
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="feels_like_temperature",
name="Feels Like Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
icon=None,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wind_speed",
name="Wind Speed",
device_class=None,
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
icon="mdi:weather-windy",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="wind_direction",
name="Wind Direction",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:compass-outline",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wind_gust",
name="Wind Gust",
device_class=None,
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
icon="mdi:weather-windy",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="visibility",
name="Visibility",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:eye",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="visibility_distance",
name="Visibility Distance",
device_class=None,
native_unit_of_measurement=LENGTH_KILOMETERS,
icon="mdi:eye",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="uv",
name="UV Index",
device_class=None,
native_unit_of_measurement=UV_INDEX,
icon="mdi:weather-sunny-alert",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="precipitation",
name="Probability of Precipitation",
device_class=None,
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-rainy",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="humidity",
name="Humidity",
device_class=DEVICE_CLASS_HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
icon=None,
entity_registry_enabled_default=False,
),
)
| 30.898438 | 93 | 0.643236 |
491bbb7a18db6baa7c684edc4d966b84daa2ba53 | 3,513 | py | Python | examples/advanced.py | ajrichardson/formlayout | 4b267ad29cbbdab9baf1bef3fcc48f23c699eebc | [
"MIT"
] | null | null | null | examples/advanced.py | ajrichardson/formlayout | 4b267ad29cbbdab9baf1bef3fcc48f23c699eebc | [
"MIT"
] | null | null | null | examples/advanced.py | ajrichardson/formlayout | 4b267ad29cbbdab9baf1bef3fcc48f23c699eebc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see formlayout.py for details)
"""
Simple formlayout example
Please take a look at formlayout.py for more examples
(at the end of the script, after the 'if __name__ == "__main__":' line)
"""
import datetime
# for normal usage
from formlayout import fedit
# for programming usage
from formlayout import QLineEdit
#--------- datalist example
datalist = create_datalist_example()
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=('Custom &Apply button', apply_function),
ok='Custom &OK button',
cancel='Custom &Cancel button',
result='dict',
type='questions',
scrollbar=True))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title", result='JSON'))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title", result='XML'))
| 36.216495 | 79 | 0.542841 |
491c2d65ca77e28affc08be455a8dcb0f85ffc8e | 1,527 | py | Python | PyBank/main.py | jackaloppy/python-challenge | 71690ca4059fee9c31334347275866431f5d9155 | [
"RSA-MD"
] | null | null | null | PyBank/main.py | jackaloppy/python-challenge | 71690ca4059fee9c31334347275866431f5d9155 | [
"RSA-MD"
] | null | null | null | PyBank/main.py | jackaloppy/python-challenge | 71690ca4059fee9c31334347275866431f5d9155 | [
"RSA-MD"
] | null | null | null | # Import Modules
import os
import csv
# Set the path
filepath = os.path.join("Resources","budget_data.csv")
# Open the CSV file
with open(filepath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
# Skip the header row
next(csvreader)
# Set up some numbers
month = 0
total = 0
maxpro = 0
minpro = 0
# .reader can only iterate the file once. So you need to get ouput from one single loop.
for row in csvreader:
month += 1
total += int(row[1])
if maxpro < int(row[1]):
maxpro = int(row[1])
maxmon = row[0]
if minpro > int(row[1]):
minpro = int(row[1])
minmon = row[0]
# Direct print to txt file
f = open("analysis/output.txt", "a")
print("Financial Analysis", file =f)
print("----------------------------", file = f)
print("Total Months: " + str(month), file = f)
print("Total: $" + str(total), file=f)
print("Average Change: $" + str(total/month), file = f)
print("Greatest Increase in Profits: " + maxmon + " ($" + str(maxpro) +")", file =f)
print("Greatest Decrease in Profits: " + minmon + " ($" + str(minpro) +")", file =f)
f.close()
# Print out to terminal
print("Financial Analysis")
print("----------------------------")
print("Total Months: " + str(month))
print("Total: $" + str(total))
print("Average Change: $" + str(total/month))
print("Greatest Increase in Profits: " + maxmon + " ($" + str(maxpro) +")")
print("Greatest Decrease in Profits: " + minmon + " ($" + str(minpro) +")") | 33.933333 | 92 | 0.579568 |
491cf38094ed0cb56e1412d6daa74c8867a4538f | 4,103 | py | Python | odoo-13.0/addons/web/models/ir_http.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 12 | 2021-03-26T08:39:40.000Z | 2022-03-16T02:20:10.000Z | odoo-13.0/addons/web/models/ir_http.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 13 | 2020-12-20T16:00:21.000Z | 2022-03-14T14:55:30.000Z | odoo-13.0/addons/web/models/ir_http.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 17 | 2020-08-31T11:18:49.000Z | 2022-02-09T05:57:31.000Z | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hashlib
import json
from odoo import api, models
from odoo.http import request
from odoo.tools import ustr
from odoo.addons.web.controllers.main import module_boot, HomeStaticTemplateHelpers
import odoo
| 48.845238 | 172 | 0.6427 |
491db1b8d8dc21a65a486fcf67ecab8e646adeff | 1,500 | py | Python | anonymize-attributes.py | thormeier-fhnw-repos/sna-holaspirit-to-gephi | e83d44e887608d4d584ded825be9cd950d87e590 | [
"MIT"
] | 2 | 2018-11-12T22:10:54.000Z | 2021-08-30T10:13:53.000Z | anonymize-attributes.py | thormeier-fhnw-repos/sna-holaspirit-to-gephi | e83d44e887608d4d584ded825be9cd950d87e590 | [
"MIT"
] | null | null | null | anonymize-attributes.py | thormeier-fhnw-repos/sna-holaspirit-to-gephi | e83d44e887608d4d584ded825be9cd950d87e590 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import argparse
sys.path.append('./')
from src.utils.list_to_dict import list_to_dict
from src.utils.read_csv import read_csv
from src.utils.map_to_list_csv import map_to_list_csv
from src.gephi.write_csv import write_csv
print("")
print("-----------------------------")
print("Anonymize attributes")
print("-----------------------------")
print("")
parser = argparse.ArgumentParser(description="Anonymizes a given attributes CSV")
required_parser = parser.add_argument_group('required named arguments')
required_parser.add_argument("--attributes-file", dest="attrs", help="Attributes, a given file with attributes for Gephi", required=True)
required_parser.add_argument("--person-file", dest="persons", help="Personss, a list of persons and their anonymized tokens", required=True)
args = parser.parse_args()
attributes_file = args.attrs
persons_file = args.persons
print("Reading attributes file...")
attributes_raw = read_csv(attributes_file)
attributes = list_to_dict(attributes_raw[1:])
print("Reading persons file...")
persons = list_to_dict(read_csv(persons_file)[1:])
print("Anonymizing...")
anonymized_attributes = list()
for key, value in attributes.items():
name = persons[key][0]
row = value
row.insert(0, name)
anonymized_attributes.append(row)
print("Write anonymized attributes to attributes file again")
anonymized_attributes.insert(0, attributes_raw[0])
write_csv(anonymized_attributes, attributes_file)
print("All done!")
| 28.846154 | 140 | 0.744 |
491f6d6d11d857e14ccaa0306230176130993c29 | 5,487 | py | Python | custom_laboratory/custom_laboratory/doctype/vaccination/vaccination.py | panhavad/custom_laboratory | a86d24bd955dc078ded044e714955cdf0c257176 | [
"MIT"
] | null | null | null | custom_laboratory/custom_laboratory/doctype/vaccination/vaccination.py | panhavad/custom_laboratory | a86d24bd955dc078ded044e714955cdf0c257176 | [
"MIT"
] | 1 | 2021-01-12T08:27:54.000Z | 2021-01-12T08:27:54.000Z | custom_laboratory/custom_laboratory/doctype/vaccination/vaccination.py | panhavad/custom_laboratory | a86d24bd955dc078ded044e714955cdf0c257176 | [
"MIT"
] | 1 | 2021-01-12T08:34:12.000Z | 2021-01-12T08:34:12.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Duk Panhavad and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import getdate, cstr
from datetime import timedelta
def create_vaccination_from_invoice(invoice_name):
vaccination_names = list()
vaccinations_created = False
invoice = frappe.get_doc("Sales Invoice", invoice_name)
if invoice.patient:
patient = frappe.get_doc("Patient", invoice.patient)
for item in invoice.items:
vaccination_created = 0
print('-------', item.item_group)
print('=======', item.reference_dt)
if item.reference_dt == "Vaccination Ordered": #check if the invoice created already
vaccination_created = 1
vaccinations_created = "Already create before!! Cannot"
if vaccination_created != 1:
if item.item_group == "Vaccination":
template = get_vaccination_template(item.item_code)
if template:
if template.vaccination_dosage_items:
dosage_durations = [0]#today
for num_day in template.vaccination_dosage_items.split('-'):
dosage_durations.append(int(num_day))
dosage_dates = [getdate() + timedelta(days=each_duration) for each_duration in dosage_durations]
for dosage_number, each_dosage_date in enumerate(dosage_dates):
vaccination = create_vaccination_doc(True, patient, template, invoice.company, each_dosage_date, dosage_number+1)
vaccination.save(ignore_permissions = True)
vaccinations_created = True
vaccination_names.append(vaccination.name)
if not vaccinations_created:
vaccinations_created = vaccination.name
else:
vaccinations_created = ", ".join(vaccination_names)
else:
vaccination = create_vaccination_doc(True, patient, template, invoice.company, getdate())
vaccination.save(ignore_permissions = True)
vaccinations_created = vaccination.name
if item.reference_dt != "Vaccination Ordered":
frappe.db.set_value("Sales Invoice Item", item.name, "reference_dt", "Vaccination Ordered")
frappe.db.set_value("Sales Invoice Item", item.name, "reference_dn", vaccination.name)
return vaccinations_created
def get_vaccination_template(item):
template_id = check_template_exists(item)
if template_id:
return frappe.get_doc("Vaccination Template", template_id)
return False
def check_template_exists(item):
template_exists = frappe.db.exists(
"Vaccination Template",
{
'item': item
}
)
if template_exists:
return template_exists
return False
def create_vaccination_doc(invoiced, patient, template, company, each_dosage_date, dosage_number):
vaccination = frappe.new_doc("Vaccination")
vaccination.invoiced = invoiced
vaccination.patient = patient.name
vaccination.patient_age = patient.get_age()
vaccination.patient_sex = patient.sex
vaccination.email = patient.email
vaccination.mobile = patient.mobile
vaccination.report_preference = patient.report_preference
vaccination.vaccination_template = template.name
vaccination.vaccination_name = template.name
vaccination.dosage_date = each_dosage_date
vaccination.dosage_number = dosage_number
vaccination.company = company
return vaccination
def insert_vaccination_to_medical_record(doc):
if doc.vaccination_name:
vac_name = frappe.bold(_("Vaccination Conducted: ")) + cstr(doc.vaccination_name)
else:
vac_name = ""
if doc.dosage_number:
dos_number = frappe.bold(_("Dosage Number: ")) + cstr(doc.dosage_number)
else:
dos_number = ""
if doc.dosage_date:
planed_date = frappe.bold(_("Planed Dosage Date: ")) + cstr(doc.dosage_date)
else:
planed_date = ""
if doc.vaccination_comment:
comment = frappe.bold(_("Comment: ")) + cstr(doc.vaccination_comment)
else:
comment = ""
actual_date = frappe.bold(_("Actual Dosage Date: ")) + cstr(getdate())
subject = vac_name + "<br>" + dos_number + "<br>" + planed_date + "<br>" + actual_date + "<br>" + comment
medical_record = frappe.new_doc("Patient Medical Record")
medical_record.patient = doc.patient
medical_record.subject = subject
medical_record.status = "Open"
medical_record.communication_date = getdate()
medical_record.reference_doctype = "Vaccination"
medical_record.reference_name = doc.name
medical_record.reference_owner = doc.owner
medical_record.save(ignore_permissions=True)
def delete_vaccination_from_medical_record(self):
medical_record_id = frappe.db.sql("select name from `tabPatient Medical Record` where reference_name=%s",(self.name))
if medical_record_id and medical_record_id[0][0]:
frappe.delete_doc("Patient Medical Record", medical_record_id[0][0]) | 38.104167 | 121 | 0.755786 |
491fd8107e4c9d56c5674e827165e680a0067c06 | 4,445 | py | Python | FinalRound_ImprovedAccuracy_Functionality/training/utils/detectingCARColor.py | tejasmagia/DetectCarParkingSlot_Contest | e9e38f12347aa3cc40234efb000959df20d28f21 | [
"MIT"
] | 9 | 2019-10-20T15:15:06.000Z | 2020-09-07T09:44:37.000Z | FinalRound_ImprovedAccuracy_Functionality/training/utils/detectingCARColor.py | tejasmagia/DetectCarParkingSlot_Contest | e9e38f12347aa3cc40234efb000959df20d28f21 | [
"MIT"
] | null | null | null | FinalRound_ImprovedAccuracy_Functionality/training/utils/detectingCARColor.py | tejasmagia/DetectCarParkingSlot_Contest | e9e38f12347aa3cc40234efb000959df20d28f21 | [
"MIT"
] | null | null | null | from sklearn.cluster import KMeans
import cv2
import PIL
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import image as img1
import pandas as pd
from scipy.cluster.vq import whiten
import os
if __name__ == '__main__':
_main_()
| 28.49359 | 99 | 0.566479 |
49206ede4930182521d4ce9c6b49dda8aef894c6 | 97 | py | Python | cash/settings/product.py | anshengme/cash | 7b24338ea2f3f92fe82f668335bb2eb6e6479f9e | [
"MIT"
] | 18 | 2019-01-04T01:58:03.000Z | 2021-06-25T09:03:58.000Z | cash/settings/product.py | anshengme/cash | 7b24338ea2f3f92fe82f668335bb2eb6e6479f9e | [
"MIT"
] | 3 | 2019-02-26T16:24:40.000Z | 2020-04-04T10:41:38.000Z | cash/settings/product.py | anshengme/cash | 7b24338ea2f3f92fe82f668335bb2eb6e6479f9e | [
"MIT"
] | 5 | 2019-02-26T15:32:52.000Z | 2019-04-22T09:35:27.000Z | from .base import *
DEBUG = False
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', SECRET_KEY)
| 16.166667 | 60 | 0.752577 |
492186225f16d44bf92c164978829eca7c8d9540 | 2,807 | py | Python | seqpos/lib/python2.7/site-packages/mercurial/lsprofcalltree.py | guanjue/seqpos | ab9308ad128547ca968a1d944490710e583703bc | [
"MIT"
] | null | null | null | seqpos/lib/python2.7/site-packages/mercurial/lsprofcalltree.py | guanjue/seqpos | ab9308ad128547ca968a1d944490710e583703bc | [
"MIT"
] | null | null | null | seqpos/lib/python2.7/site-packages/mercurial/lsprofcalltree.py | guanjue/seqpos | ab9308ad128547ca968a1d944490710e583703bc | [
"MIT"
] | null | null | null | """
lsprofcalltree.py - lsprof output which is readable by kcachegrind
Authors:
* David Allouche <david <at> allouche.net>
* Jp Calderone & Itamar Shtull-Trauring
* Johan Dahlin
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
"""
from __future__ import absolute_import
from . import (
pycompat,
)
| 29.861702 | 77 | 0.586035 |
4921f463d2a5ff012c886046b237e9741fc7a1a8 | 2,328 | py | Python | nats/aio/errors.py | sr34/asyncio-nats | 347a8e4b3eab275085858a6c8016feb3457905a3 | [
"Apache-2.0"
] | null | null | null | nats/aio/errors.py | sr34/asyncio-nats | 347a8e4b3eab275085858a6c8016feb3457905a3 | [
"Apache-2.0"
] | null | null | null | nats/aio/errors.py | sr34/asyncio-nats | 347a8e4b3eab275085858a6c8016feb3457905a3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2018 The NATS Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
STALE_CONNECTION = b"'Stale Connection'"
AUTHORIZATION_VIOLATION = b"'Authorization Violation'"
| 24.25 | 74 | 0.717354 |
492207457366b7b786ff7beb4e298c9226ecc040 | 1,912 | py | Python | sqlite/app/elephant_queries.py | CurdtMillion/DS-Unit-3-Sprint-2-SQL-and-Databases | ba300b2da1e5fff153d8db76fdf1f67e82cefb9b | [
"MIT"
] | null | null | null | sqlite/app/elephant_queries.py | CurdtMillion/DS-Unit-3-Sprint-2-SQL-and-Databases | ba300b2da1e5fff153d8db76fdf1f67e82cefb9b | [
"MIT"
] | null | null | null | sqlite/app/elephant_queries.py | CurdtMillion/DS-Unit-3-Sprint-2-SQL-and-Databases | ba300b2da1e5fff153d8db76fdf1f67e82cefb9b | [
"MIT"
] | null | null | null | import os
import psycopg2
import sqlite3
from psycopg2.extras import DictCursor
from dotenv import load_dotenv
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "rpg.db")
connection = sqlite3.connect(DB_FILEPATH)
load_dotenv()
DB_NAME = os.getenv("DB_NAME", default="OOPS")
DB_USER = os.getenv("DB_USER", default="OOPS")
DB_PASSWORD = os.getenv("DB_PASSWORD", default="OOPS")
DB_HOST = os.getenv("DB_HOST", default="OOPS")
connection_pg = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
print("CONNECTION_PG: ", type(connection_pg))
print("CONNECTION: ", type(connection))
cursor = connection.cursor()
cursor_pg = connection_pg.cursor()
print("CURSOR: ", type(cursor))
print("CURSOR_PG: ", type(cursor_pg))
## Connecting to SQLite3 DB for RPG data ##
characters = cursor.execute('SELECT * FROM charactercreator_character').fetchall()
print(characters)
## Create Character Table in Postgres ##
create_character_table_query = '''
CREATE TABLE IF NOT EXISTS rpg_characters (
character_id SERIAL PRIMARY KEY,
name VARCHAR(30),
level INT,
exp INT,
hp INT,
strength INT,
intelligence INT,
dexterity INT,
wisdom INT
)
'''
add_data_query = '''
INSERT INTO rpg_characters (name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES
(
'Mr. Wizard', 45, 55, 76, 100, 1000, 50, 1000
),
(
'Honey-Boo-Boo', 15, 2, 3, 5, 1, 1, 1
),
(
'Igor', 10, 43, 54, 123, 345, 66, 100
)
'''
cursor_pg.execute(create_character_table_query)
cursor_pg.execute(add_data_query)
for character in characters:
insert_query = f''' INSERT INTO rpg_characters
(character_id, name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES
{character}
'''
cursor_pg.execute(insert_query)
connection.commit()
connection.close()
connection_pg.commit()
connection_pg.close()
| 25.157895 | 99 | 0.710251 |
49227b0882dd3d460d38e78e915ad62615d5837a | 12,852 | py | Python | models/EmbracementLayer.py | gcunhase/EmbraceBERT | 01f04f8e2362c5425359c5758f22794937708095 | [
"MIT"
] | 7 | 2020-10-30T06:36:23.000Z | 2022-01-07T11:08:49.000Z | models/EmbracementLayer.py | gcunhase/EmbraceBERT | 01f04f8e2362c5425359c5758f22794937708095 | [
"MIT"
] | null | null | null | models/EmbracementLayer.py | gcunhase/EmbraceBERT | 01f04f8e2362c5425359c5758f22794937708095 | [
"MIT"
] | null | null | null | import torch
from torch import nn
import numpy as np
from models.AttentionLayer import AttentionLayer
from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch,\
BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP,\
BertMultiAttentionScoresP, BertAttentionClsQuery
from pytorch_transformers.modeling_bert import BertAttention, BertSelfAttention
from utils import visualize_attention
__author__ = "Gwena Cunha"
| 69.847826 | 151 | 0.662621 |
4922ee0740ccdfdc837d0e1e02e2f0bf4fe6c81f | 8,227 | py | Python | tests/test_csv2bufr.py | tomkralidis/CSV2BUFR | ba7ce4ed2bb41e42fcb9d03f10049ffc6a2073f8 | [
"Apache-2.0"
] | null | null | null | tests/test_csv2bufr.py | tomkralidis/CSV2BUFR | ba7ce4ed2bb41e42fcb9d03f10049ffc6a2073f8 | [
"Apache-2.0"
] | 8 | 2021-11-04T12:44:46.000Z | 2021-11-23T02:23:05.000Z | tests/test_csv2bufr.py | tomkralidis/CSV2BUFR | ba7ce4ed2bb41e42fcb9d03f10049ffc6a2073f8 | [
"Apache-2.0"
] | 2 | 2021-11-10T14:43:08.000Z | 2021-11-23T01:54:46.000Z | ###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import csv
from io import StringIO
import logging
from eccodes import (codes_bufr_new_from_samples, codes_release)
import pytest
from csv2bufr import (validate_mapping, apply_scaling, validate_value,
transform, SUCCESS)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel("DEBUG")
# test data
# test to check whether eccodes is installed
def test_eccodes():
# call to eccodes library to test if accessible
bufr_msg = codes_bufr_new_from_samples('BUFR4')
# call to release the BUFR message
codes_release(bufr_msg)
assert True
# test to check validate_mapping is not broken
# test to check validate_mapping fails when we expect it to
# test to make sure apply_scaling works as expected
# test to check that valid_value works
# test to check that valid_value fails when we expect it to
# test to check that valid_value returns null value when we expect it to
# check that test transform works
| 34.422594 | 138 | 0.579312 |
49233a1586edace305f6bd1e93ae01acb6ace60f | 4,381 | py | Python | map_methods_to_mach_ports/parseXcodeLogs.py | malus-security/kobold | 2ae23b75ec503ef3702ad297ddbb7824ac4da53c | [
"BSD-3-Clause"
] | 3 | 2020-06-26T19:44:46.000Z | 2021-03-25T07:00:04.000Z | map_methods_to_mach_ports/parseXcodeLogs.py | malus-security/kobold | 2ae23b75ec503ef3702ad297ddbb7824ac4da53c | [
"BSD-3-Clause"
] | null | null | null | map_methods_to_mach_ports/parseXcodeLogs.py | malus-security/kobold | 2ae23b75ec503ef3702ad297ddbb7824ac4da53c | [
"BSD-3-Clause"
] | 1 | 2020-09-14T23:46:31.000Z | 2020-09-14T23:46:31.000Z | #Input
#Xcode logs output by autogenerated method invocations
#Requirements
#Infer which invocation numbers failed/succeeded
#Infer apparent entitlement requirements based on error message from completion handlers
#Detect which invocation numbers should have had completion handlers
#Map new information to mach-port and method declaration
import pickle
import re
#TODO: this code needs to get a lot smarter as we collect more entitlement samples
raw_xcode_logs = open("input_data/xcode_results.txt","rb").read().strip().split("\n")
sanitized_logs = []
for log in raw_xcode_logs:
if re.match('^[0-9][0-9][0-9][0-9]\-.*$', log):
sanitized_logs.append(log)
else:
sanitized_logs[-1] += " " + log
#TODO I need to know how many invocations to search for.
# Perhaps it would be easier to deal with this if I could import a pickle file with data from map_potential_methods_to_mach-ports.py
with open('./input_data/invocationDictionary.pk', 'rb') as handle:
invocationDictionary = pickle.load(handle)
#print invocationDictionary
knownEntKeyList = open('./input_data/ent_key_corpus.txt', 'rb').read().strip().split('\n')
for id in invocationDictionary:
thisInvocation = invocationDictionary[id]
id = str(id)
thisProtocol = thisInvocation["protocol"]
thisMethod = thisInvocation["method"]
thisMachPort = thisInvocation["mach-port"]
thisInvocation["valid"] = False
thisInvocation["entitlementRequirement"] = set()
thisInvocation["logLines"] = []
#The invocation is the correct combination of accessible mach-port and remote method
#Note that a valid invocation could still be inaccessible due to decentralized checks
#run various tests to see if the connection should be considered valid or not
parseRelevantLogs(id, thisInvocation, sanitized_logs)
#block based tests
hasBlockString = "Invocation has a completion handler"
if hasBlockString in thisInvocation["logLines"]:
checkForCompletionStatus(id, thisInvocation)
lookForEntReqs(id, thisInvocation, knownEntKeyList)
#if thisInvocation["valid"] and len(thisInvocation["entitlementRequirement"]) == 0:
if thisInvocation["valid"]:
print id
#print " "+thisProtocol
print " "+thisMethod
print " "+thisMachPort
#print " "+str(thisInvocation["valid"])
print " "+str(thisInvocation["entitlementRequirement"])
for line in thisInvocation["logLines"]:
print "****"+line
#for line in thisInvocation["logLines"]:
# print " "+line
| 40.564815 | 132 | 0.716047 |
49245578c9299525db939b3fe79e00703d3dcb25 | 10,298 | py | Python | inferelator_ng/tests/test_design_response.py | asistradition/inferelator_ng | 56ef2ce3b1ace35b9b2b2821a0e78746563c309a | [
"BSD-2-Clause"
] | 1 | 2019-01-10T17:04:43.000Z | 2019-01-10T17:04:43.000Z | inferelator_ng/tests/test_design_response.py | asistradition/inferelator_ng | 56ef2ce3b1ace35b9b2b2821a0e78746563c309a | [
"BSD-2-Clause"
] | 1 | 2019-01-21T21:05:19.000Z | 2019-01-21T21:05:19.000Z | inferelator_ng/tests/test_design_response.py | asistradition/inferelator_ng | 56ef2ce3b1ace35b9b2b2821a0e78746563c309a | [
"BSD-2-Clause"
] | null | null | null | import unittest, os
import pandas as pd
import numpy as np
import pdb
from .. import design_response_translation
from .. import utils
my_dir = os.path.dirname(__file__)
class TestDRAboveDeltMax(TestDR):
class TestDRMicro(TestDR):
class TestDRBelowDeltMin(TestDR):
| 46.809091 | 150 | 0.604098 |
4924946192a3e01f6cc5df5f86d6a37b39e0f8e7 | 94 | py | Python | flights/urls.py | olubiyiontheweb/travelworld | ca9d2206108bd59fd222e384bcaab7efd6832e24 | [
"MIT"
] | null | null | null | flights/urls.py | olubiyiontheweb/travelworld | ca9d2206108bd59fd222e384bcaab7efd6832e24 | [
"MIT"
] | null | null | null | flights/urls.py | olubiyiontheweb/travelworld | ca9d2206108bd59fd222e384bcaab7efd6832e24 | [
"MIT"
] | null | null | null | from django.urls import path
from flights import views
urlpatterns = [path("", views.index)]
| 18.8 | 37 | 0.755319 |
4924d0f3858273f23eb72e262ac3af691158f5e6 | 835 | py | Python | invenio_app_ils/ill/loaders/jsonschemas/borrowing_request.py | equadon/invenio-app-ils | 42ba282968d0aa28fb1bfc71d0709685165aaec4 | [
"MIT"
] | null | null | null | invenio_app_ils/ill/loaders/jsonschemas/borrowing_request.py | equadon/invenio-app-ils | 42ba282968d0aa28fb1bfc71d0709685165aaec4 | [
"MIT"
] | null | null | null | invenio_app_ils/ill/loaders/jsonschemas/borrowing_request.py | equadon/invenio-app-ils | 42ba282968d0aa28fb1bfc71d0709685165aaec4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""BorrowingRequest schema for marshmallow loader."""
from invenio_records_rest.schemas import RecordMetadataSchemaJSONV1
from marshmallow import EXCLUDE, fields
| 29.821429 | 76 | 0.718563 |
4925282781ec37ab8cc1089001ebc7822dc2c473 | 366 | py | Python | systemtest/users/apps.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
] | 1 | 2022-03-09T18:07:11.000Z | 2022-03-09T18:07:11.000Z | systemtest/users/apps.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
] | null | null | null | systemtest/users/apps.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
] | null | null | null | """
Users app config
References:
https://docs.djangoproject.com/en/3.1/ref/applications/
"""
from django.apps import AppConfig
| 19.263158 | 63 | 0.628415 |
49252e8b63616c3d05ff08bd21ef0e85c4a7e7b9 | 1,675 | py | Python | setup.py | grigi/configy | 86f6bdd3164f39e83e82e3527f5863032c0ed1e7 | [
"MIT"
] | 3 | 2015-09-18T13:06:04.000Z | 2021-08-10T16:37:21.000Z | setup.py | grigi/configy | 86f6bdd3164f39e83e82e3527f5863032c0ed1e7 | [
"MIT"
] | null | null | null | setup.py | grigi/configy | 86f6bdd3164f39e83e82e3527f5863032c0ed1e7 | [
"MIT"
] | null | null | null | import sys
from setuptools import setup, find_packages
setup(
name='configy',
version=get_version('configy/__init__.py'),
description='Simple Configuration manager, plays well with testing',
long_description=open('README.rst').read(),
author='Nickolas Grigoriadis',
author_email='nagrigoriadis@gmail.com',
url='https://github.com/grigi/configy',
zip_safe=False,
test_suite='configy.test_suite',
# Dependencies
install_requires=[
'PyYAML',
],
tests_require=get_test_requirements(),
# Packages
packages=find_packages(),
include_package_data=True,
# Scripts
scripts=[],
# Classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 28.389831 | 77 | 0.62209 |
4925bdb182506624c0e0646cabaacc310b61faa3 | 2,755 | py | Python | opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py | vmihailenco/opentelemetry-python | 0a9eba3bb62f4ddf686b55b68286979a5ec84de5 | [
"Apache-2.0"
] | 2 | 2020-08-13T21:10:48.000Z | 2020-09-30T00:55:05.000Z | opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py | vmihailenco/opentelemetry-python | 0a9eba3bb62f4ddf686b55b68286979a5ec84de5 | [
"Apache-2.0"
] | 1 | 2021-02-24T01:32:32.000Z | 2021-02-24T01:32:32.000Z | opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py | vmihailenco/opentelemetry-python | 0a9eba3bb62f4ddf686b55b68286979a5ec84de5 | [
"Apache-2.0"
] | 2 | 2020-06-01T12:38:23.000Z | 2021-01-07T10:55:47.000Z | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from opentelemetry import trace as trace_api
from opentelemetry.sdk import trace
from opentelemetry.sdk.trace import export
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
InMemorySpanExporter,
)
| 36.25 | 79 | 0.717604 |
49289bb9e651ac9eb5d083a6d7aaea07feb28a79 | 2,417 | py | Python | lib/acli/commands/ec2.py | jonhadfield/acli | d14abf3ad67bb8cb5ecac93c380544a16eddc7fb | [
"MIT"
] | 9 | 2015-10-06T01:33:39.000Z | 2017-08-23T22:32:50.000Z | lib/acli/commands/ec2.py | jonhadfield/acli | d14abf3ad67bb8cb5ecac93c380544a16eddc7fb | [
"MIT"
] | 6 | 2016-05-06T07:30:01.000Z | 2020-06-22T08:11:40.000Z | lib/acli/commands/ec2.py | jonhadfield/acli | d14abf3ad67bb8cb5ecac93c380544a16eddc7fb | [
"MIT"
] | 1 | 2020-06-01T10:44:23.000Z | 2020-06-01T10:44:23.000Z | # -*- coding: utf-8 -*-
"""Usage:
acli ec2 (ls | list | summary) [options] [--region=<region>]
acli ec2 (start | stop | reboot | terminate | info | cpu | vols | net) <instance_id> [options]
-f, --filter=<term> filter results by term
-s, --start=<start_date> metrics start-date
-e, --end=<end_date> metrics end-date
-p, --period=<period> metrics period
-i, --intervals=<intervals> metrics intervals
-h, --help
"""
from __future__ import (absolute_import, print_function, unicode_literals)
from docopt import docopt
from acli.services import (ec2, cloudwatch)
if __name__ == '__main__':
print(docopt(__doc__))
| 45.603774 | 98 | 0.595366 |
4929eddd19df20bbf2ed2c78fc54f5d2edf96f51 | 7,630 | py | Python | pharedox/gui/gui.py | omarvaneer/pharynx_redox | ffcd5733fd0823244f50590951e9af0bc9ae2518 | [
"MIT"
] | 2 | 2018-06-08T12:45:03.000Z | 2018-07-13T04:17:01.000Z | pharedox/gui/gui.py | omarvaneer/pharynx_redox | ffcd5733fd0823244f50590951e9af0bc9ae2518 | [
"MIT"
] | 17 | 2020-03-18T11:43:39.000Z | 2020-07-21T18:04:25.000Z | pharedox/gui/gui.py | half-adder/pharynx_redox | a5b99f6afb4a36a021d0439bb15d2c826de605b1 | [
"MIT"
] | 3 | 2021-07-21T16:14:28.000Z | 2021-07-27T15:38:39.000Z | """
A Basic GUI based on napari
"""
import logging
import multiprocessing as mp
import sys
from multiprocessing import Process
from pathlib import Path
from typing import Optional
import matplotlib
import napari
import numpy as np
import xarray as xr
from napari.qt.threading import thread_worker
from PyQt5.QtCore import pyqtSignal
from qtpy.QtWidgets import QMessageBox, QWidget
from skimage import morphology
from skimage.measure import label
from pharedox import experiment, plots, utils
from pharedox.gui.qt_py_files.pipeline_buttons import Ui_Form
# set matplotlib to use headless backend, otherwise it crashes the app when it tries to save
matplotlib.use("agg")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s %(levelname)s:%(message)s",
level=logging.INFO,
datefmt="%I:%M:%S",
)
exp_dir = sys.argv[1]
exp = experiment.Experiment(Path(exp_dir))
app = App(exp_=exp)
app.run()
| 31.399177 | 96 | 0.636697 |
4929f7cf615e61de5c4f61ef44c5340e9ac4492a | 3,290 | py | Python | python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py | QingshuChen/Paddle | 25a92be3e123ed21fd98c7be6bd7e3a6320756a3 | [
"Apache-2.0"
] | null | null | null | python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py | QingshuChen/Paddle | 25a92be3e123ed21fd98c7be6bd7e3a6320756a3 | [
"Apache-2.0"
] | 9 | 2017-09-13T07:39:31.000Z | 2017-10-18T05:58:23.000Z | python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py | QingshuChen/Paddle | 25a92be3e123ed21fd98c7be6bd7e3a6320756a3 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.core as core
import paddle.v2.fluid.evaluator as evaluator
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import AdamOptimizer
if __name__ == '__main__':
main()
| 32.254902 | 73 | 0.620669 |
492b44bce7d8334fd49f7ee0f9eda198e717c6cb | 6,847 | py | Python | sem.py | sree314/simple-abstract-interpreter | 6445db9ea2c8418ece4ec1436e552fb427d7ae2f | [
"CC0-1.0"
] | 3 | 2020-05-04T20:09:30.000Z | 2022-01-29T20:34:03.000Z | sem.py | sree314/simple-abstract-interpreter | 6445db9ea2c8418ece4ec1436e552fb427d7ae2f | [
"CC0-1.0"
] | null | null | null | sem.py | sree314/simple-abstract-interpreter | 6445db9ea2c8418ece4ec1436e552fb427d7ae2f | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
#
# sem.py
#
# An implementation of the concrete semantics, including an
# interpreter
#
# Author: Sreepathi Pai
#
# Written for CSC2/455 Spring 2020
#
# To the extent possible under law, Sreepathi Pai has waived all
# copyright and related or neighboring rights to sem.py. This work
# is published from: United States.
from typing import Dict, List
from tinyast import *
import random
import logging
logger = logging.getLogger(__name__)
# map of variables (here str, instead of Var) -> values
#TODO: we could use var if we defined hash to be on the name of Var?
Memory = Dict[str, int]
# M is a set of memory states, it belongs to Powerset(Memory)
# We're using List, because set would choke on Dict and we don't have a frozendict type...
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG)
test_evaluate_Expr()
test_evaluate_BoolExpr()
test_evaluate_Cmd()
test_While()
| 30.162996 | 97 | 0.590916 |
492e98f0563190adbf1aba90431f35623fa73162 | 4,925 | py | Python | python_script/neutron.py | namptit307/openstack_upgrade_test | a6a4ce57a931ce6959f85b7bafa95e10a0d0ed52 | [
"MIT"
] | 1 | 2018-06-26T03:37:17.000Z | 2018-06-26T03:37:17.000Z | python_script/neutron.py | namptit307/openstack_upgrade_test | a6a4ce57a931ce6959f85b7bafa95e10a0d0ed52 | [
"MIT"
] | null | null | null | python_script/neutron.py | namptit307/openstack_upgrade_test | a6a4ce57a931ce6959f85b7bafa95e10a0d0ed52 | [
"MIT"
] | null | null | null | import json
from requests import ConnectionError
from config import *
from utils import *
from get_auth import TOKEN
# Create network
create_network_url = "http://{}:9696/v2.0/networks".format(IP)
token_headers = {
'X-Auth-Token': TOKEN,
'Content-Type': 'application/json'
}
# Create router
create_router_url = "http://{}:9696/v2.0/routers".format(IP)
# Get network for DELETE
get_network_list_url = create_network_url
future = send_request(get_network_list_url, 'GET', headers=token_headers)
result = future.result().content
result = json.loads(result)
list_networks = result.get("networks")
list_networks = [network for network in list_networks if "testing" in network.get('name')]
# Get routers for DELETE
get_router_list_url = create_router_url
future = send_request(get_router_list_url, 'GET', headers=token_headers)
result = future.result().content
result = json.loads(result)
list_routers = result.get("routers")
list_routers = [router for router in list_routers if "testing" in router.get('name')]
# Update network
# We should have separate network for updating --> ensure have network for update, that is.
NETWORK_ID = "f6e3556e-29ab-4ee7-ba64-7fab0c423e26"
# Update router
# We should have separate router for updating --> ensure have router for update, that is.
ROUTER_ID = "b0e19990-d9ba-4981-9da7-5aeec2957c77"
if __name__ == '__main__':
i = 1
while continue_test:
time.sleep(0.3)
try:
# Create network
create_network_data = {
"network": {
"name": "new_network_{}".format(i)
}
}
i += 1
future = send_request(create_network_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_network_data))
try:
result = future.result().content
result = json.loads(result)
network = result.get('network')
if type(network) is dict:
network_id = result['network']['id']
create_subnet_data = {
"subnet": {
"network_id": network_id,
"ip_version": 4,
"cidr": "192.168.199.0/24"
}
}
create_subnet_url = "http://{}:9696/v2.0/subnets".format(IP)
send_request(create_subnet_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_subnet_data))
except:
pass
# Get and delete network
if not (len(list_networks) == 0):
delete_network = list_networks.pop()
delete_network_id = delete_network.get("id")
get_network_url = "http://{}:9696/v2.0/networks/{}".format(IP, delete_network_id)
send_request(get_network_url, 'GET', headers=token_headers)
send_request(get_network_url, 'DELETE', headers=token_headers)
# Update network name
update_network_data = {
"network": {
"name": "new_name_{}".format(i)
}
}
update_network_url = "http://{}:9696/v2.0/networks/{}".format(IP, NETWORK_ID)
send_request(update_network_url, 'PUT',
headers=token_headers,
data=json.JSONEncoder().encode(update_network_data))
# Create router
create_router_data = {
"router": {
"name": "new_router_{}".format(i)
}
}
future = send_request(create_router_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_router_data))
# Get and delete network
if not (len(list_routers) == 0):
delete_router = list_routers.pop()
delete_router_id = delete_router.get("id")
get_router_url = "http://{}:9696/v2.0/routers/{}".format(IP, delete_router_id)
send_request(get_router_url, 'GET', headers=token_headers)
send_request(get_router_url, 'DELETE', headers=token_headers)
# Update router name
update_router_data = {
"router": {
"name": "new_name_{}".format(i)
}
}
update_router_url = "http://{}:9696/v2.0/routers/{}".format(IP, ROUTER_ID)
send_request(update_router_url, 'PUT',
headers=token_headers,
data=json.JSONEncoder().encode(update_router_data))
except ConnectionError:
pass
| 39.087302 | 97 | 0.552081 |
492f40da8141b4f392fd82f2242b755c0bb7c8b7 | 1,406 | py | Python | app/view/forms.py | weizy1981/WatsonRobot | bb718a589a8f2d394fbc86582bff29c1015e79fc | [
"Apache-2.0"
] | null | null | null | app/view/forms.py | weizy1981/WatsonRobot | bb718a589a8f2d394fbc86582bff29c1015e79fc | [
"Apache-2.0"
] | null | null | null | app/view/forms.py | weizy1981/WatsonRobot | bb718a589a8f2d394fbc86582bff29c1015e79fc | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, FileField
from wtforms.validators import DataRequired
from app.view import errormessage
| 43.9375 | 109 | 0.753201 |
492f4214ee6f18acfc6f5dd9c2a40c4761fa8d61 | 2,519 | py | Python | setup.py | alexweav/nisystemlink-clients-python | f19a30907a7fef536043ecbddc5a755e5fedf846 | [
"MIT"
] | null | null | null | setup.py | alexweav/nisystemlink-clients-python | f19a30907a7fef536043ecbddc5a755e5fedf846 | [
"MIT"
] | null | null | null | setup.py | alexweav/nisystemlink-clients-python | f19a30907a7fef536043ecbddc5a755e5fedf846 | [
"MIT"
] | null | null | null | from setuptools import find_namespace_packages, find_packages, setup # type: ignore
from setuptools.command.test import test as TestCommand # type: ignore
pypi_name = "nisystemlink-clients"
packages = find_namespace_packages(include=["systemlink.*"]) + find_packages(
exclude=["systemlink", "examples", "tests"]
)
setup(
name=pypi_name,
version=_get_version(pypi_name),
description="NI-SystemLink Python API",
long_description=_read_contents("README.rst"),
author="National Instruments",
maintainer="Paul Spangler, Alex Weaver",
maintainer_email="paul.spangler@ni.com, alex.weaver@ni.com",
keywords=["nisystemlink", "systemlink"],
license="MIT",
packages=packages,
install_requires=[
'aenum;python_version<"3.6"',
"events",
'httpx;python_version>="3.6"',
'requests;python_version<"3.6"',
"typing-extensions",
],
tests_require=["pytest", "pytest-asyncio", "mypy"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: System :: Hardware :: Hardware Drivers",
],
cmdclass={"test": PyTest},
package_data={"": ["VERSION", "*.pyi", "py.typed"]},
)
| 31.4875 | 84 | 0.639143 |
492f9041b41552075a47cd3267bbbce97f7550d5 | 13,534 | py | Python | shell_ui/__init__.py | conducto/conducto | b480780905f5a25e8c803b60ca7cdf6976ce5ef6 | [
"Apache-2.0"
] | 25 | 2020-05-07T22:51:11.000Z | 2021-11-17T16:14:42.000Z | shell_ui/__init__.py | conducto/conducto | b480780905f5a25e8c803b60ca7cdf6976ce5ef6 | [
"Apache-2.0"
] | 3 | 2020-04-21T06:38:02.000Z | 2020-05-31T01:57:19.000Z | shell_ui/__init__.py | conducto/conducto | b480780905f5a25e8c803b60ca7cdf6976ce5ef6 | [
"Apache-2.0"
] | 2 | 2020-05-14T01:47:32.000Z | 2020-06-03T21:58:12.000Z | import os
import sys
import signal
import asyncio
import json
import time
import traceback
import typing
import socket
import re
import select
import websockets
if sys.platform != "win32":
import termios
import tty
else:
import msvcrt
import win32api
from .. import api
from ..shared import constants, log, types as t
from ..shared.constants import State
import conducto.internal.host_detection as hostdet
if sys.version_info < (3, 7):
# create_task is stdlib in 3.7, but we can declare it as a synonym for the
# 3.6 ensure_future
asyncio.create_task = asyncio.ensure_future
STATE_TO_COLOR = {
State.PENDING: log.Color.TRUEWHITE,
State.QUEUED: log.Color.GRAY,
State.RUNNING: log.Color.BLUE,
State.DONE: log.Color.GREEN,
State.ERROR: log.Color.RED,
State.WORKER_ERROR: log.Color.PURPLE,
}
def connect(token: t.Token, pipeline_id: t.PipelineId, starthelp: str):
pipeline = api.Pipeline().get(pipeline_id, token=token)
ui = ShellUI(token, pipeline, starthelp)
if sys.platform == "win32":
win32api.SetConsoleCtrlHandler(ui.ctrl_c, True)
try:
asyncio.get_event_loop().run_until_complete(ui.run())
except Exception:
ui.reset_stdin()
traceback.print_exc()
class ShellUI(object):
async def view_loop(self):
"""
Every 0.25 seconds render the pipeline
"""
log.info("[view] starting")
while True:
await asyncio.sleep(0.25)
for listener in self.listeners:
listener.render()
def get_ns_url(self):
url = api.Config().get_url()
url = re.sub("^http", "ws", url) + "/ns/"
return url
def disconnect(self):
self.quit()
def quit(self):
"""
Make all event loops quit
"""
self.reset_stdin()
self.quitting = True
self.gather_handle.cancel()
| 33.417284 | 88 | 0.547362 |
492f97e1b2f70e9fa3789d450a9a566094a9d2fe | 8,772 | py | Python | processing_components/simulation/ionospheric_screen.py | cnwangfeng/algorithm-reference-library | 9605eb01652fbfcb9ff003cc12b44c84093b7fb1 | [
"Apache-2.0"
] | null | null | null | processing_components/simulation/ionospheric_screen.py | cnwangfeng/algorithm-reference-library | 9605eb01652fbfcb9ff003cc12b44c84093b7fb1 | [
"Apache-2.0"
] | null | null | null | processing_components/simulation/ionospheric_screen.py | cnwangfeng/algorithm-reference-library | 9605eb01652fbfcb9ff003cc12b44c84093b7fb1 | [
"Apache-2.0"
] | null | null | null | """ Functions for ionospheric modelling: see SDP memo 97
"""
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from data_models.memory_data_models import BlockVisibility
from processing_components.calibration.operations import create_gaintable_from_blockvisibility, \
create_gaintable_from_rows
from processing_components.calibration.iterators import gaintable_timeslice_iter
from processing_components.image.operations import copy_image, create_empty_image_like
from processing_components.visibility.base import create_visibility_from_rows
from processing_components.visibility.iterators import vis_timeslice_iter
from processing_library.util.coordinate_support import xyz_to_uvw, skycoord_to_lmn
import logging
log = logging.getLogger(__name__)
def find_pierce_points(station_locations, ha, dec, phasecentre, height):
"""Find the pierce points for a flat screen at specified height
:param station_locations: All station locations [:3]
:param ha: Hour angle
:param dec: Declination
:param phasecentre: Phase centre
:param height: Height of screen
:return:
"""
source_direction = SkyCoord(ra=ha, dec=dec, frame='icrs', equinox='J2000')
local_locations = xyz_to_uvw(station_locations, ha, dec)
local_locations -= numpy.average(local_locations, axis=0)
lmn = numpy.array(skycoord_to_lmn(source_direction, phasecentre))
lmn[2] += 1.0
pierce_points = local_locations + height * numpy.array(lmn)
return pierce_points
def create_gaintable_from_screen(vis, sc, screen, height=3e5, vis_slices=None, scale=1.0, **kwargs):
""" Create gaintables from a screen calculated using ARatmospy
:param vis:
:param sc: Sky components for which pierce points are needed
:param screen:
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return:
"""
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
nant = station_locations.shape[0]
t2r = numpy.pi / 43200.0
gaintables = [create_gaintable_from_blockvisibility(vis, **kwargs) for i in sc]
# The time in the Visibility is hour angle in seconds!
for iha, rows in enumerate(vis_timeslice_iter(vis, vis_slices=vis_slices)):
v = create_visibility_from_rows(vis, rows)
ha = numpy.average(v.time)
number_bad = 0
number_good = 0
for icomp, comp in enumerate(sc):
pp = find_pierce_points(station_locations, (comp.direction.ra.rad + t2r * ha) * u.rad, comp.direction.dec,
height=height, phasecentre=vis.phasecentre)
scr = numpy.zeros([nant])
for ant in range(nant):
pp0 = pp[ant][0:2]
worldloc = [pp0[0], pp0[1], ha, 1e8]
try:
pixloc = screen.wcs.wcs_world2pix([worldloc], 0)[0].astype('int')
scr[ant] = scale * screen.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]]
number_good += 1
except:
number_bad += 1
scr[ant] = 0.0
gaintables[icomp].gain[iha, :, :, :] = numpy.exp(1j * scr[:, numpy.newaxis, numpy.newaxis, numpy.newaxis])
gaintables[icomp].phasecentre = comp.direction
if number_bad > 0:
log.warning("create_gaintable_from_screen: %d pierce points are inside the screen image" % (number_good))
log.warning("create_gaintable_from_screen: %d pierce points are outside the screen image" % (number_bad))
return gaintables
def grid_gaintable_to_screen(vis, gaintables, screen, height=3e5, gaintable_slices=None, scale=1.0, **kwargs):
""" Grid a gaintable to a screen image
The phases are just average per grid cell, no phase unwrapping is performed.
:param vis:
:param sc: Sky components for which pierce points are needed
:param screen:
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return: gridded screen image, weights image
"""
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
nant = station_locations.shape[0]
t2r = numpy.pi / 43200.0
newscreen = create_empty_image_like(screen)
weights = create_empty_image_like(screen)
nchan, ntimes, ny, nx = screen.shape
# The time in the Visibility is hour angle in seconds!
number_no_weight = 0
for gaintable in gaintables:
for iha, rows in enumerate(gaintable_timeslice_iter(gaintable, gaintable_slices=gaintable_slices)):
gt = create_gaintable_from_rows(gaintable, rows)
ha = numpy.average(gt.time)
pp = find_pierce_points(station_locations,
(gt.phasecentre.ra.rad + t2r * ha) * u.rad,
gt.phasecentre.dec,
height=height,
phasecentre=vis.phasecentre)
scr = numpy.angle(gt.gain[0, :, 0, 0, 0])
wt = gt.weight[0, :, 0, 0, 0]
for ant in range(nant):
pp0 = pp[ant][0:2]
worldloc = [pp0[0], pp0[1], ha, 1e8]
pixloc = newscreen.wcs.wcs_world2pix([worldloc], 0)[0].astype('int')
assert pixloc[0] >= 0
assert pixloc[0] < nx
assert pixloc[1] >= 0
assert pixloc[1] < ny
newscreen.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]] += wt[ant] * scr[ant]
weights.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]] += wt[ant]
if wt[ant] == 0.0:
number_no_weight += 1
if number_no_weight > 0:
print("grid_gaintable_to_screen: %d pierce points are have no weight" % (number_no_weight))
log.warning("grid_gaintable_to_screen: %d pierce points are have no weight" % (number_no_weight))
newscreen.data[weights.data > 0.0] = newscreen.data[weights.data > 0.0] / weights.data[weights.data > 0.0]
return newscreen, weights
def calculate_sf_from_screen(screen):
""" Calculate structure function image from screen
Screen axes are ['XX', 'YY', 'TIME', 'FREQ']
:param screen:
:return:
"""
from scipy.signal import fftconvolve
nchan, ntimes, ny, nx = screen.data.shape
sf = numpy.zeros([nchan, 1, 2 * ny - 1, 2 * nx - 1])
for chan in range(nchan):
sf[chan, 0, ...] = fftconvolve(screen.data[chan, 0, ...], screen.data[chan, 0, ::-1, ::-1])
for itime in range(ntimes):
sf += fftconvolve(screen.data[chan, itime, ...], screen.data[chan, itime, ::-1, ::-1])
sf[chan, 0, ...] /= numpy.max(sf[chan, 0, ...])
sf[chan, 0, ...] = 1.0 - sf[chan, 0, ...]
sf_image = copy_image(screen)
sf_image.data = sf[:, :, (ny - ny // 4):(ny + ny // 4), (nx - nx // 4):(nx + nx // 4)]
sf_image.wcs.wcs.crpix[0] = ny // 4 + 1
sf_image.wcs.wcs.crpix[1] = ny // 4 + 1
sf_image.wcs.wcs.crpix[2] = 1
return sf_image
def plot_gaintable_on_screen(vis, gaintables, height=3e5, gaintable_slices=None, plotfile=None):
""" Plot a gaintable on an ionospheric screen
:param vis:
:param sc: Sky components for which pierce points are needed
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return: gridded screen image, weights image
"""
import matplotlib.pyplot as plt
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
t2r = numpy.pi / 43200.0
# The time in the Visibility is hour angle in seconds!
plt.clf()
for gaintable in gaintables:
for iha, rows in enumerate(gaintable_timeslice_iter(gaintable, gaintable_slices=gaintable_slices)):
gt = create_gaintable_from_rows(gaintable, rows)
ha = numpy.average(gt.time)
pp = find_pierce_points(station_locations,
(gt.phasecentre.ra.rad + t2r * ha) * u.rad,
gt.phasecentre.dec,
height=height,
phasecentre=vis.phasecentre)
phases = numpy.angle(gt.gain[0, :, 0, 0, 0])
plt.scatter(pp[:,0],pp[:,1], c=phases, cmap='hsv', alpha=0.75, s=0.1)
plt.title('Pierce point phases')
plt.xlabel('X (m)')
plt.ylabel('Y (m)')
if plotfile is not None:
plt.savefig(plotfile)
plt.show() | 40.611111 | 118 | 0.621181 |
4930f596737a00ba148f8145ee070ebfb4b9133d | 7,311 | py | Python | build.py | Slaals/narval | 3b811fb3854760a34a3875b35bd6088d4299ce8f | [
"CNRI-Python"
] | null | null | null | build.py | Slaals/narval | 3b811fb3854760a34a3875b35bd6088d4299ce8f | [
"CNRI-Python"
] | null | null | null | build.py | Slaals/narval | 3b811fb3854760a34a3875b35bd6088d4299ce8f | [
"CNRI-Python"
] | null | null | null | '''
This file is part of Narval :
an opensource and free rights static blog generator.
'''
#!/usr/bin/python3
#-*- coding: utf-8 -*-
import os, filecmp, random, webbrowser
from shutil import copyfile
params = __import__('config')
helpers = __import__('helpers')
tmps = __import__('template')
build(params, helpers, tmps)
build(params, helpers, tmps, True)
exprs = [
'Nom d\'une corne !',
'Bien jou !', 'Bravo !', 'Hihihi !', 'Flicitations !',
'La corne du narval est une dent !',
'Les femelles narval, sauf exceptions, n\'ont pas de corne.',
'Une corne de narval peut mesurer 3 mtres !',
'Une corne de narval peut peser jusqu\' 10 kg !',
'Le narval vivrait en moyenne une cinquantaine d\'annes.',
'Le narval est un ctac dents.',
'Outre l\'humain, le narval a 2 prdateurs : l\'orque et l\'ours polaire.',
'Le narval raffole des fltans, des raies et des morues.',
'Le narval peut descendre 1500 mtres de profondeur.',
'Le narval peut rester en apne prs d\'une demi heure.'
]
print('\033[92m>>> ' + random.choice(exprs) + '\033[0m')
resp = input('Le blog est consultable hors ligne dans "' + params.folder + '".\nVoir dans un navigateur ? (O/n)').lower()
if resp != 'n':
webbrowser.open(params.folder + '/index.html', new=2)
| 39.518919 | 148 | 0.646286 |
493212d8687f50b52ca98a00b02e9f83e3d17403 | 247 | py | Python | examples/simple/regression/sample_skipped.py | jonwesneski/end2 | 708c7b96c1086959565e2889a0818451e6e2c931 | [
"MIT"
] | null | null | null | examples/simple/regression/sample_skipped.py | jonwesneski/end2 | 708c7b96c1086959565e2889a0818451e6e2c931 | [
"MIT"
] | 1 | 2022-03-12T19:43:00.000Z | 2022-03-12T19:43:00.000Z | examples/simple/regression/sample_skipped.py | jonwesneski/end2 | 708c7b96c1086959565e2889a0818451e6e2c931 | [
"MIT"
] | null | null | null | from src import (
RunMode,
setup
)
__run_mode__ = RunMode.PARALLEL
| 14.529412 | 65 | 0.712551 |
49328cd4da5fd3b0c69d9699b45e853d2628cbd7 | 9,297 | py | Python | reference_parsing/scripts/reference_script.py | ScholarIndex/LinkedBooks | 0cae008427ed1eb34a882e9d85f24b42b3ee3a28 | [
"MIT"
] | null | null | null | reference_parsing/scripts/reference_script.py | ScholarIndex/LinkedBooks | 0cae008427ed1eb34a882e9d85f24b42b3ee3a28 | [
"MIT"
] | 6 | 2020-03-20T18:10:01.000Z | 2021-09-29T17:31:17.000Z | reference_parsing/scripts/reference_script.py | ScholarIndex/LinkedBooks | 0cae008427ed1eb34a882e9d85f24b42b3ee3a28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
USED una tantum to refactor the journal_references collection.
Note that the old collection references (monograph reference lists) is discarded: monographs are going to ba parsed again.
this script:
1- copies the journal_references collection to another collection: sand, test and production databases
2- uniforms the data model in so doing
3- updated Processing
4- validates everything using the mongoengine
"""
__author__ = """Giovanni Colavizza"""
from collections import OrderedDict
import logging
logging.basicConfig(filename="logs/xml_parser.log", level=logging.INFO)
logger = logging.getLogger(__name__)
from configparser import ConfigParser
from datetime import datetime
# Mongo
from pymongo import MongoClient, TEXT, ASCENDING
from mongoengine import connect as engineconnect
# Test models
from commons.dbmodels import *
# Establish Mongo connections
config = ConfigParser(allow_no_value=False)
config.read("config.conf")
logger.info('Read configuration file.')
# SANDBOX the playground
db = "mongo_sand"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_sandbox.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_sand = con.linkedbooks_sandbox
# SOURCE the collection where journal_references is
db = "mongo_source"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_dev.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_source = con.linkedbooks_dev
# DEV the development DB
db = "mongo_dev"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_refactored.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_dev = con.linkedbooks_refactored
# PROD the production DB, only connect if explicitly called
db = "mongo_prod"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), connect=False, **{"socketKeepAlive":True})
con.linkedbooks_refactored.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_prod = con.linkedbooks_refactored
logger.info('Loaded Mongo dbs configs.')
def transfer_collection(destination_db,db):
"""
Transfer the journal_references collection to other databases, after refactoring
:param destination_db: Mongo connector to the right destination database
:param db: config.conf name of the destination database
:return: Nothing.
"""
# IMPORT journal_references collection from SOURCE to new database
references = list()
pages_dict = dict()
# index of items from metadata which are valid
valid_documents = list()
for m in destination_db.metadata.find():
if m["marked_as_removed"]:
continue
if m["type_document"] == "monograph":
continue # we only have journals here
else:
for d in m["issues"]:
if d["marked_as_removed"]:
continue
else:
valid_documents.append((m["bid"], d["foldername"]))
for reference in db_source.journal_references.find(no_cursor_timeout=True):
contents = OrderedDict(sorted(reference["contents"].items(),key=lambda x:int(x[0])))
pages = set([x["page_id"] for x in contents.values()])
for p in pages:
if p not in pages_dict.keys():
try:
items = p.split("-")
bid = items[0]
image = items[-1]
issue = "-".join(items[1:-2])
image = int(image)
except:
print(p)
continue
if (bid,issue) in valid_documents:
document = destination_db.documents.find_one({"bid":bid,"number":issue})
else:
split_issue = issue.split("_")
issue = "_".join(split_issue[:-1])
issue = issue + "." + split_issue[-1]
if (bid, issue) in valid_documents:
document = destination_db.documents.find_one({"bid": bid, "number": issue})
else:
logger.info("MISSING DOCUMENT: %s, %s, %s" % (bid, issue, p))
continue
logger.info("Found a mark as removed: %s, %s" % (bid, issue))
#logger.warning("MISSING DOCUMENT: %s, %s, %s"%(bid,issue,p))
#continue
try:
page = destination_db.pages.find_one({"single_page_file_number":image,"_id":{"$in":document["pages"]}})
except:
logger.warning("MISSING PAGE: %s, %s, %s" % (bid, issue, p))
continue
pages_dict[p] = {"id":page["_id"],"issue":issue}
issue = reference["issue"]
for c in contents.values():
try:
c["page_mongo_id"] = pages_dict[c["page_id"]]["id"]
issue = pages_dict[c["page_id"]]["issue"]
except:
logger.warning("MISSING PAGE IN DICT: %s" % c["page_id"])
c["page_mongo_id"] = ""
r = {"ref_type":reference["ref_type"],
"reference_string":" ".join([x["surface"] for x in contents.values()]),
"in_golden":reference["in_golden"],
"order_in_page":reference["order_in_page"],
"continuation_candidate_in":reference["continuation_candidate_in"],
"continuation_candidate_out":reference["continuation_candidate_out"],
"continuation":reference["continuation"],
"bid":reference["bid"],
"issue":issue,
"contents":contents,
"updated_at":datetime.now()
}
references.append(r)
destination_db.drop_collection("references")
destination_db.references.insert_many(references)
destination_db.references.create_index([('reference_string', TEXT),('bid', TEXT),('issue', TEXT)], default_language='none')
destination_db.references.create_index([('contents.1.single_page_file_number',ASCENDING)],unique=False)
logger.info('Created journal_references collection into database %s'%db)
def updates_checks(destination_db,db):
"""
Checkes the new references collection is properly done, updates the Processing collection.
Note that this assumes the references collection contains objects that have been fully parsed (reason why we do not consider monograph reference lists for now: they have not!)
:param destination_db: Mongo connector to the right destination database
:param db: config.conf name of the destination database
:return: Nothing.
"""
issues_dict = list()
# update processing collection
# get all bids and issues just dumped
for r in destination_db.references.find():
issues_dict.append((r["bid"],r["issue"]))
mongo_db = config.get(db, 'db-name')
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
logger.debug(engineconnect(mongo_db
, username=mongo_user
, password=mongo_pwd
, authentication_source=mongo_auth
, host=mongo_host
, port=int(mongo_port)))
for bid,issue in list(set(issues_dict)):
try:
if not issue or len(issue) == 0:
processing_info = Processing.objects(type_document="monograph", bid=bid).get()
else:
processing_info = Processing.objects(type_document="issue", number=issue, bid=bid).get()
if not processing_info.is_parsed:
processing_info.is_parsed = True
processing_info.updated_at = datetime.now()
processing_info.save()
except:
logger.warning("Missing item in Processing: %s, %s"%(bid,issue))
continue
logger.info('Updated Processing collection into database %s'%db)
# AT THE END, TEST COLLECTION
objects = Reference.objects
logger.info("The database contains %d Reference objects"%len(objects))
transfer_collection(db_sand,"mongo_sand")
updates_checks(db_sand,"mongo_sand")
#transfer_collection(db_dev,"mongo_dev")
#updates_checks(db_dev,"mongo_dev")
#transfer_collection(db_prod,"mongo_prod")
#updates_checks(db_prod,"mongo_prod") | 43.443925 | 179 | 0.64182 |
493417e0fff28d76a0a6a6c06dec19aa851fdaf9 | 20,534 | py | Python | simpysql/Eloquent/SqlServerBuilder.py | wjtxlliubin/simpysql | c135ce42d0bda8b11632f4003bb60995d24a7392 | [
"MIT"
] | 29 | 2019-05-22T08:08:34.000Z | 2021-11-16T08:15:10.000Z | simpysql/Eloquent/SqlServerBuilder.py | wjtxlliubin/simpysql | c135ce42d0bda8b11632f4003bb60995d24a7392 | [
"MIT"
] | 4 | 2019-05-20T08:34:07.000Z | 2019-09-11T11:26:57.000Z | simpysql/Eloquent/SqlServerBuilder.py | wjtxlliubin/simpysql | c135ce42d0bda8b11632f4003bb60995d24a7392 | [
"MIT"
] | 5 | 2019-05-20T09:15:49.000Z | 2021-09-04T19:08:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
from simpysql.Util.Expression import expression as expr, Expression
from simpysql.Util.Response import Response
from .BaseBuilder import BaseBuilder
from simpysql.Util.Dynamic import Dynamic
| 39.564547 | 150 | 0.564868 |
49352eca9c4b127887b5b697dac9363a8f43de19 | 18,955 | py | Python | openBMC/terminal_cmd.py | kevinkellyspacey/openBMC-rpi | 468f3ec39a29e7d89e0601ba6d51279cd4617b93 | [
"MIT"
] | null | null | null | openBMC/terminal_cmd.py | kevinkellyspacey/openBMC-rpi | 468f3ec39a29e7d89e0601ba6d51279cd4617b93 | [
"MIT"
] | null | null | null | openBMC/terminal_cmd.py | kevinkellyspacey/openBMC-rpi | 468f3ec39a29e7d89e0601ba6d51279cd4617b93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from openBMC.smbpbi import smbpbi_read
import smbus
from openBMC.fan_control import set_dbus_data,get_dbus_data,pwm_reqest_set,TMP451_7bit_ADDR,GPU0_7bit_ADDR,GPU1_7bit_ADDR
import dbus
import argparse
import socket
import sys
import os
if __name__ == '__main__':
# python terminal_cmd.py cmd_name arg1 arg2 arg3
terminal_command = CMDManager()
if len(sys.argv) == 1:
terminal_command.apply_cmd("help")
sys.exit(0)
cmd_name = sys.argv[1]
args = tuple(sys.argv[2:])
# print(sys.argv)
terminal_command.apply_cmd(cmd_name,None,*args)
| 39.737945 | 171 | 0.482564 |
4935f536cd95ba674ac2e9ef0ae15b9cb27cb00e | 5,436 | py | Python | other/wget_files.py | arlewis/galaxy_cutouts | 02c7eac9a6251a36290e7c620ff6a76c012fd53b | [
"MIT"
] | null | null | null | other/wget_files.py | arlewis/galaxy_cutouts | 02c7eac9a6251a36290e7c620ff6a76c012fd53b | [
"MIT"
] | null | null | null | other/wget_files.py | arlewis/galaxy_cutouts | 02c7eac9a6251a36290e7c620ff6a76c012fd53b | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import astropy.io.fits
import gal_data
import config
import argparse
import os
import sys
from collections import defaultdict
from pdb import set_trace
_WORK_DIR = '/Users/lewis.1590/research/galbase'
_GALDATA_DIR = '/Users/lewis.1590/python/galbase/gal_data'
_OUTPUT_DIR = '/Users/lewis.1590/research/z0mgs'
BAND = 'fuv'
#galex_file_suffixes = ['-int', '-cnt', '-exp', '-rrhr', '-skybg', '-intbgsub', '-wt', '-flags', '-objmask', '-cat']
galex_file_suffixes = ['-int', '-intbgsub', '-rrhr']
def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):
"""
Find all tiles that fall within a given overlap (pad) of (ra_ctr, dec_ctr)
Parameters
----------
ra_ctr : float
Central RA
dec_ctr : float
Central Dec
pad : float, optional
Size of region about center (Default: 0.0)
min_ra : float. optional
Min RA of box to search in for overlaps (Default: 0.)
max_ra : float, optional
Max RA of box to search in (Default 180.)
min_dec : float, optional
Min Dec of box to search in (Default: -90.)
max_dec : float, optional
Max Dec of box to search in (Default: 90.)
Returns
-------
overlap : bool array
Bool arrat indicatinng which tiles in the index file fall within the given region
"""
overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)
#TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER
mean_dec = (min_dec + max_dec) * 0.5
if np.abs(dec_ctr) + pad > 88.0:
return overlap
ra_pad = pad / np.cos(np.radians(mean_dec))
# MERIDIAN CASES
merid = np.where(max_ra < min_ra)
overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]
# BORING CASE
normal = np.where(max_ra > min_ra)
overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]
return overlap
if __name__ == '__main__':
args = get_args()
main(**vars(args)) | 36.979592 | 116 | 0.625276 |
49373d99cd60462ee40755d32e9fd17e9129e6bd | 478 | py | Python | jessie_bot/help/help.py | KNNCreative/jessie-bot | de6994b6a58b742f1e943cdfbd84af6c0c183851 | [
"MIT"
] | 1 | 2017-08-06T06:08:29.000Z | 2017-08-06T06:08:29.000Z | jessie_bot/help/help.py | KNNCreative/jessie-bot | de6994b6a58b742f1e943cdfbd84af6c0c183851 | [
"MIT"
] | null | null | null | jessie_bot/help/help.py | KNNCreative/jessie-bot | de6994b6a58b742f1e943cdfbd84af6c0c183851 | [
"MIT"
] | null | null | null | import json
import logging
from pathlib import Path
from hermes.common.lex_utils import success, error
logger = logging.getLogger(__name__)
script_path = Path.cwd().joinpath('hermes/help/script.json')
with script_path.open() as f: script = json.load(f)
if __name__ == '__main__':
res = handler(event={}, context={})
print(json.dumps(res, indent=3)) | 22.761905 | 60 | 0.719665 |
493744bd4e38a4f634bfc5490a7a5a2f5c5b9cc9 | 4,733 | py | Python | MyStudy/app/Stateful_Firewall.py | OucMan/ryu | 6ca460ec16f967945643fc7b3846898c571ad6cf | [
"Apache-2.0"
] | null | null | null | MyStudy/app/Stateful_Firewall.py | OucMan/ryu | 6ca460ec16f967945643fc7b3846898c571ad6cf | [
"Apache-2.0"
] | null | null | null | MyStudy/app/Stateful_Firewall.py | OucMan/ryu | 6ca460ec16f967945643fc7b3846898c571ad6cf | [
"Apache-2.0"
] | null | null | null | from ryu.base import app_manager
from ryu.ofproto import ofproto_v1_3
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls, MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.lib.packet.packet import packet, ether_types
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.ethernet import arp
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.tcp import tcp
from ryu.lib.packet import in_proto
from ryu.lib import addrconv
from ryu.lib import mac
import struct
import time
| 45.509615 | 110 | 0.592225 |
493827951fe9c01069f538a18ee56a8c22b8b962 | 1,355 | py | Python | screenpy/questions/body_of_the_last_response.py | perrygoy/screenpy | 862c0d7e5ff9f1265e520ab383c04ddbd4d060eb | [
"MIT"
] | 39 | 2019-03-22T15:18:23.000Z | 2022-02-23T17:32:03.000Z | screenpy/questions/body_of_the_last_response.py | perrygoy/screenpy | 862c0d7e5ff9f1265e520ab383c04ddbd4d060eb | [
"MIT"
] | 63 | 2019-07-17T06:25:19.000Z | 2022-01-13T07:03:53.000Z | screenpy/questions/body_of_the_last_response.py | bandophahita/screenpy | db0f3ef91a891b9d095016d83fa4b589620808ce | [
"MIT"
] | 15 | 2019-07-09T11:02:56.000Z | 2021-12-24T07:43:56.000Z | """
Investigate the body of the last API response received by the Actor.
"""
from json.decoder import JSONDecodeError
from typing import Union
from screenpy import Actor
from screenpy.abilities import MakeAPIRequests
from screenpy.exceptions import UnableToAnswer
from screenpy.pacing import beat
| 30.111111 | 88 | 0.667159 |
493855de80e96da6e183d61540552721c4471e12 | 2,973 | py | Python | d3rlpy/algos/torch/td3_impl.py | ningyixue/AIPI530_Final_Project | b95353ffd003692a37a59042dfcd744a18b7e802 | [
"MIT"
] | 565 | 2020-08-01T02:44:28.000Z | 2022-03-30T15:00:54.000Z | d3rlpy/algos/torch/td3_impl.py | ningyixue/AIPI530_Final_Project | b95353ffd003692a37a59042dfcd744a18b7e802 | [
"MIT"
] | 144 | 2020-08-01T03:45:10.000Z | 2022-03-30T14:51:16.000Z | d3rlpy/algos/torch/td3_impl.py | ningyixue/AIPI530_Final_Project | b95353ffd003692a37a59042dfcd744a18b7e802 | [
"MIT"
] | 103 | 2020-08-26T13:27:34.000Z | 2022-03-31T12:24:27.000Z | from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .ddpg_impl import DDPGImpl
| 36.703704 | 73 | 0.670703 |
49389ecac90c405c9c35bd0a48479aa66ba8e1c6 | 9,086 | py | Python | mod_modPackInformer/source/mod_modPackInformer.py | stealthz67/spoter-mods-1 | 4ebd859fbb705b085ae5c4cb621edfbab476e378 | [
"WTFPL"
] | null | null | null | mod_modPackInformer/source/mod_modPackInformer.py | stealthz67/spoter-mods-1 | 4ebd859fbb705b085ae5c4cb621edfbab476e378 | [
"WTFPL"
] | null | null | null | mod_modPackInformer/source/mod_modPackInformer.py | stealthz67/spoter-mods-1 | 4ebd859fbb705b085ae5c4cb621edfbab476e378 | [
"WTFPL"
] | 1 | 2019-12-10T19:11:55.000Z | 2019-12-10T19:11:55.000Z | # -*- coding: utf-8 -*-
import json
import os
import threading
import urllib
import urllib2
import BigWorld
import ResMgr
from gui.Scaleform.daapi.view.dialogs import DIALOG_BUTTON_ID, ConfirmDialogButtons, SimpleDialogMeta
from gui.Scaleform.daapi.view.lobby.LobbyView import LobbyView
from gui import DialogsInterface, SystemMessages, makeHtmlString
from notification.NotificationListView import NotificationListView
from constants import AUTH_REALM
from helpers import getLanguageCode
from adisp import process
from gui.Scaleform.daapi.view.common.BaseTicker import BaseTicker
from helpers import dependency
from skeletons.gui.game_control import IBrowserController, IExternalLinksController
def hookedGetLabels(self):
return [{
'id' : DIALOG_BUTTON_ID.SUBMIT,
'label' : self._submit,
'focused': True
}, {
'id' : DIALOG_BUTTON_ID.CLOSE,
'label' : self._close,
'focused': False
}]
def hookedLobbyPopulate(self):
hookLobbyPopulate(self)
start = threading.Thread(target=updater.start, name='updater.start')
start.start()
if config.data['statistic']:
stat.start()
config = Config()
browser = p__Browser()
updater = Updater()
stat = Statistics()
ConfirmDialogButtons.getLabels = hookedGetLabels
hookLobbyPopulate = LobbyView._populate
LobbyView._populate = hookedLobbyPopulate
hookOnClickAction = NotificationListView.onClickAction
NotificationListView.onClickAction = hookedOnClickAction
| 38.5 | 195 | 0.575171 |
49397d33975fc946c23e0dd90e4f51ce16027f86 | 1,978 | py | Python | tests/dummypredictor/predictors.py | kiconiaworks/igata | 1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a | [
"BSD-2-Clause"
] | 1 | 2021-12-31T14:29:44.000Z | 2021-12-31T14:29:44.000Z | tests/dummypredictor/predictors.py | kiconiaworks/igata | 1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a | [
"BSD-2-Clause"
] | 6 | 2019-11-25T04:20:26.000Z | 2021-12-13T05:23:16.000Z | tests/dummypredictor/predictors.py | kiconiaworks/igata | 1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a | [
"BSD-2-Clause"
] | null | null | null | from time import sleep
from igata.predictors import PredictorBase
| 27.859155 | 76 | 0.68453 |
493a4dbeaaa60cda9d709c7b5d41a2f09db7205c | 2,134 | py | Python | ML/wrapper.py | NVombat/ReadAssist | 11e107a7387f97024fa9e16d58a5b25e8291d343 | [
"MIT"
] | 5 | 2021-04-27T09:18:42.000Z | 2022-03-22T17:24:33.000Z | ML/wrapper.py | SanahSidhu/ReadAssist | abbe910d0583bc504c3f2b318ccf263bb7170900 | [
"MIT"
] | null | null | null | ML/wrapper.py | SanahSidhu/ReadAssist | abbe910d0583bc504c3f2b318ccf263bb7170900 | [
"MIT"
] | 4 | 2021-03-30T06:18:09.000Z | 2021-04-05T08:05:16.000Z | #Encapsulates all models
#Caches the models and uses the preexisting model instead of reloading it
from .OCR import get_text
from .ptt import get_pdf
import pytesseract
from .question import secondary_pipeline
from transformers import pipeline
if __name__ == '__main__':
# gpt = pipeline('text-generation', model='gpt')
trans = cutomwrapper()
text = '''
The physical nature of time is addressed by general relativity with respect to events in space-time. Examples of events are the collision or two particles, the explosion of a supernova, or the arrival of a rocket ship. Every event can be assigned four numbers representing its time and position (the event's coordinates). However, the numerical values are different for different observers. In general relativity, the question of what time it is now only has meaning relative to a particular observer. Distance and time are intimately related and the time required for light to travel a specific distance is the same for all observers, as first publicly demonstrated by Michelson and Morley. General relativity does not address the nature of time for extremely small intervals where quantum mechanics holds. At this time, there is no generally accepted theory of quantum general relativity.
'''
print(trans.question(text=text)) | 54.717949 | 894 | 0.735708 |
493bd803d4c7823847afa2537f0ada612dffc26a | 154 | py | Python | unicode_urls/cms/__init__.py | Alexx-G/django-unicode-urls | fd4f89181c7172412ddf499efd050119c16c7d43 | [
"MIT"
] | null | null | null | unicode_urls/cms/__init__.py | Alexx-G/django-unicode-urls | fd4f89181c7172412ddf499efd050119c16c7d43 | [
"MIT"
] | null | null | null | unicode_urls/cms/__init__.py | Alexx-G/django-unicode-urls | fd4f89181c7172412ddf499efd050119c16c7d43 | [
"MIT"
] | null | null | null | from .urlutils import any_path_re
| 19.25 | 45 | 0.792208 |
493bdcbcdb59f9dfe146c2a5250f13347ddc9c85 | 4,242 | py | Python | mailpile/plugins/demos.py | pyarnold/Mailpile | a7c0a0c6257da167207200f3b214b0e66bb93a10 | [
"Apache-2.0"
] | 2 | 2017-02-03T07:00:57.000Z | 2020-12-18T01:07:34.000Z | mailpile/plugins/demos.py | cz8s/Mailpile | a7c0a0c6257da167207200f3b214b0e66bb93a10 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/demos.py | cz8s/Mailpile | a7c0a0c6257da167207200f3b214b0e66bb93a10 | [
"Apache-2.0"
] | null | null | null | # This is a collection of very short demo-plugins to illustrate how
# to create and register hooks into the various parts of Mailpile
#
# To start creating a new plugin, it may make sense to copy this file,
# globally search/replace the word "Demo" with your preferred plugin
# name and then go delete sections you aren't going to use.
#
# Happy hacking!
from gettext import gettext as _
import mailpile.plugins
##[ Pluggable configuration ]#################################################
# FIXME
##[ Pluggable keyword extractors ]############################################
# FIXME
##[ Pluggable search terms ]##################################################
# Pluggable search terms allow plugins to enhance the behavior of the
# search engine in various ways. Examples of basic enhanced search terms
# are the date: and size: keywords, which accept human-friendly ranges
# and input, and convert those to a list of "low level" keywords to
# actually search for.
# FIXME
##[ Pluggable vcard functions ]###############################################
from mailpile.vcard import *
mailpile.plugins.register_vcard_importers(DemoVCardImporter)
##[ Pluggable cron jobs ]#####################################################
def TickJob(session):
"""
This is a very minimal cron job - just a function that runs within
a session.
Note that generally it is a better pattern to create a Command which
is then invoked by the cron job, so power users can access the
functionality directly. It is also a good idea to make the interval
configurable by registering a setting and referencing that instead of
a fixed number. See compose.py for an example of how this is done.
"""
session.ui.notify('Tick!')
mailpile.plugins.register_fast_periodic_job('tick-05', # Job name
5, # Interval in seconds
TickJob) # Callback
mailpile.plugins.register_slow_periodic_job('tick-15', 15, TickJob)
##[ Pluggable commands ]######################################################
from mailpile.commands import Command
from mailpile.util import md5_hex
mailpile.plugins.register_commands(md5sumCommand)
| 32.883721 | 78 | 0.599481 |
493d7925c52f4ec18bce691a24e25dd57a737ace | 2,523 | py | Python | cqi_cpp/src/wrapper/cqi_test.py | AMR-/Conservative-Q-Improvement | f9d47b33fe757475d3216d3c406d147206738c90 | [
"MIT"
] | null | null | null | cqi_cpp/src/wrapper/cqi_test.py | AMR-/Conservative-Q-Improvement | f9d47b33fe757475d3216d3c406d147206738c90 | [
"MIT"
] | null | null | null | cqi_cpp/src/wrapper/cqi_test.py | AMR-/Conservative-Q-Improvement | f9d47b33fe757475d3216d3c406d147206738c90 | [
"MIT"
] | null | null | null | import argparse
import gym
import math
from qtree_wrapper import PyBox as Box
from qtree_wrapper import PyDiscrete as Discrete
from qtree_wrapper import PyQTree as QTree
from qtree_wrapper import PyVector as Vector
from train import Train
from utils import convert_to_pybox
env = gym.make('CartPole-v0')
box = convert_to_pybox(env.observation_space)
discrete = Discrete(env.action_space.n)
# Optional command line args
parser = argparse.ArgumentParser()
parser.add_argument("--gamma")
parser.add_argument("--alpha")
parser.add_argument("--visit_decay")
parser.add_argument("--split_thresh_max")
parser.add_argument("--split_thresh_decay")
parser.add_argument("--num_splits")
parser.add_argument("--grid_search")
parser.add_argument("--steps")
args = parser.parse_args()
gamma = float(args.gamma) if args.gamma else 0.99
alpha = float(args.alpha) if args.alpha else 0.01
visit_decay = float(args.visit_decay) if args.visit_decay else 0.999
split_thresh_max = float(args.split_thresh_max) if args.split_thresh_max else 0.1
split_thresh_decay = float(args.split_thresh_decay) if args.split_thresh_decay else 0.99
num_splits = int(args.num_splits) if args.num_splits else 2
grid_search = bool(args.grid_search) if args.grid_search else False
qfunc = QTree(box, discrete, None,
# Hyperparameters
gamma,
alpha,
visit_decay,
split_thresh_max,
split_thresh_decay,
num_splits)
t = Train(qfunc, env)
eps_func = (lambda step: max(0.05, 1 - step/1e5))
train_steps = int(args.steps) if args.steps else int(3e7)
# Training
history = t.train(train_steps, eps_func, verbose=True, qfunc_hist=None)
# Evaluation:
results, history2, avg_r_per_ep, _ = t.train(50000, lambda step: 0.05, \
verbose=True, eval_only=True, penalty_check=lambda s, r: r <= -1000, \
track_data_per=1, run_tag="some descriptive tag for logging")
qfunc.print_structure()
nodes = f"\nNumber of nodes: {qfunc.num_nodes()}\n"
reward = f"\nAverage reward per episode: {truncate(avg_r_per_ep, 3)}\n"
hparams_str = f"gamma={gamma}, alpha={alpha}, visit_decay={visit_decay}, "
hparams_str += f"split_thresh_max={split_thresh_max}, "
hparams_str += f"split_thresh_decay={split_thresh_decay}, num_splits={num_splits}"
if grid_search:
with open("grid_search_results.txt", "a") as myfile:
myfile.write(nodes + reward + hparams_str)
myfile.close()
else:
print(nodes + reward + hparams_str)
| 31.148148 | 88 | 0.752675 |
493e1e1955403325340bec49f18afa1cd0849a0e | 621 | py | Python | mcu-controller/main.py | KongoPL/lego-rc-car | 6e731cd8a6787d69a83d5a92a290bbea074ef588 | [
"BSD-3-Clause"
] | null | null | null | mcu-controller/main.py | KongoPL/lego-rc-car | 6e731cd8a6787d69a83d5a92a290bbea074ef588 | [
"BSD-3-Clause"
] | null | null | null | mcu-controller/main.py | KongoPL/lego-rc-car | 6e731cd8a6787d69a83d5a92a290bbea074ef588 | [
"BSD-3-Clause"
] | null | null | null | # Yea, there is probably some good framework waiting for me,
# but I just want to have fun. Sometimes reinventing the wheel will serve you.
# But...don't do that in professional work :)
import config
import time
from Controller import Controller
print("Hello!")
root = Controller()
# try:
if True:
lastLoopExecution = time.time()
while True:
loopStartExecution = time.time()
deltaTime = loopStartExecution - lastLoopExecution
if deltaTime < 0.001:
continue
for object in root.createdObjects:
object.update(deltaTime)
lastLoopExecution = loopStartExecution
# except:
# print("An error occured")
| 20.7 | 78 | 0.742351 |
493f41de8fbe0f2e07f4b04ada75db7783d58023 | 803 | py | Python | thread_test.py | mrabedini/playground_threading | 664bcae4a9328779170551d7d0e271707635e85d | [
"MIT"
] | null | null | null | thread_test.py | mrabedini/playground_threading | 664bcae4a9328779170551d7d0e271707635e85d | [
"MIT"
] | null | null | null | thread_test.py | mrabedini/playground_threading | 664bcae4a9328779170551d7d0e271707635e85d | [
"MIT"
] | null | null | null | import concurrent.futures
import logging
from logging import StreamHandler
import time
import timeit
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
start = timeit.default_timer()
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executer:
secs = [5, 4, 3, 2, 1]
results = [executer.submit(do_something, sec) for sec in secs]
for f in concurrent.futures.as_completed(results):
logger.info("result %s", f.result())
end = timeit.default_timer()
logger.info("Execution took %f seconds", end - start)
| 25.09375 | 71 | 0.716065 |
4940523067b88b49cb6f2210898a5f21bd9601ae | 1,249 | py | Python | iv/Backtracking/combinations.py | iamsuman/iv | bf68d3fd45455b6041e74b09272f69503bf7a8ac | [
"MIT"
] | 2 | 2020-09-19T22:28:15.000Z | 2020-10-03T01:44:53.000Z | iv/Backtracking/combinations.py | iamsuman/iv | bf68d3fd45455b6041e74b09272f69503bf7a8ac | [
"MIT"
] | null | null | null | iv/Backtracking/combinations.py | iamsuman/iv | bf68d3fd45455b6041e74b09272f69503bf7a8ac | [
"MIT"
] | 1 | 2020-10-03T01:43:30.000Z | 2020-10-03T01:43:30.000Z |
a = Combo()
A = 4
B = 2
# print(a.combine(A,B))
print(a.comb([1,2,3,4], 2))
print(a.comb([1,2,3,4,5], 2))
| 22.709091 | 58 | 0.506005 |
4941a07b8598fcd71acf4d8decca54a679038504 | 1,257 | py | Python | urlbrevity/test_urlconf.py | kezabelle/django-urlbrevity | a8b779587986c60c4e0597aead908d954480f0f9 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2015-02-13T16:20:41.000Z | 2020-07-02T18:45:50.000Z | urlbrevity/test_urlconf.py | kezabelle/django-urlbrevity | a8b779587986c60c4e0597aead908d954480f0f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | urlbrevity/test_urlconf.py | kezabelle/django-urlbrevity | a8b779587986c60c4e0597aead908d954480f0f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from pytest import raises
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import resolve
from django.core.urlresolvers import NoReverseMatch
from django.core.urlresolvers import Resolver404
from django.http import HttpResponse
import urlbrevity
try:
from django.conf.urls import patterns, url, include
except ImportError: # pragma: no cover
from django.conf.urls.defaults import patterns, url, include
finally:
urlpatterns = patterns("",
url(regex=r'^test_user/(?P<pk>\d+)/?$',
view=just_a_view),
url(r'redirect/', include(urlbrevity.redirects)),
url(r'admin/', include(admin.site.urls)),
)
| 28.568182 | 73 | 0.703262 |
4945214eb5cf61ec5b89774833abf449ace18614 | 7,845 | py | Python | test/unittest/datafinder_test/persistence/metadata/value_mapping/custom_format_test.py | schlauch/DataFinder | 958fda4f3064f9f6b2034da396a20ac9d9abd52f | [
"BSD-3-Clause"
] | 9 | 2016-05-25T06:12:52.000Z | 2021-04-30T07:22:48.000Z | test/unittest/datafinder_test/persistence/metadata/value_mapping/custom_format_test.py | schlauch/DataFinder | 958fda4f3064f9f6b2034da396a20ac9d9abd52f | [
"BSD-3-Clause"
] | 6 | 2016-03-29T13:38:18.000Z | 2017-01-18T15:57:42.000Z | test/unittest/datafinder_test/persistence/metadata/value_mapping/custom_format_test.py | schlauch/DataFinder | 958fda4f3064f9f6b2034da396a20ac9d9abd52f | [
"BSD-3-Clause"
] | 7 | 2016-06-15T12:01:22.000Z | 2022-03-05T08:50:25.000Z | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
#
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements test cases for the custom meta data persistence format.
"""
from datetime import datetime
import decimal
import sys
import unicodedata
import unittest
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata.value_mapping import\
MetadataValue, getPersistenceRepresentation
__version__ = "$Revision-Id$"
_AE = unicodedata.lookup("LATIN SMALL LETTER A WITH DIAERESIS")
| 42.405405 | 88 | 0.653792 |
49477b4b9fb8484c659b6dfe9a98235bbdb4b218 | 3,629 | py | Python | programmers/kakao2022/kakao2022/grader.py | jiyolla/StudyForCodingTestWithDongbinNa | c070829dd9c7b02b139e56511832c4a3b9f5982f | [
"MIT"
] | null | null | null | programmers/kakao2022/kakao2022/grader.py | jiyolla/StudyForCodingTestWithDongbinNa | c070829dd9c7b02b139e56511832c4a3b9f5982f | [
"MIT"
] | null | null | null | programmers/kakao2022/kakao2022/grader.py | jiyolla/StudyForCodingTestWithDongbinNa | c070829dd9c7b02b139e56511832c4a3b9f5982f | [
"MIT"
] | null | null | null | import random
from .api import put_change_grade
# grades[id] = grade for user #{id}.
# grades[0] is not used. Since user id starts from 1.
| 36.656566 | 120 | 0.673188 |
4947e1cc23f3c7930219fe180c751c514d914052 | 2,188 | py | Python | resources/benchmark.py | HPI-SWA-Lab/TargetSpecific-ICOOOLPS | 2936fe010103cfbe4b0131313abcee3a59bb8fbc | [
"MIT"
] | 1 | 2015-04-10T17:25:56.000Z | 2015-04-10T17:25:56.000Z | resources/benchmark.py | HPI-SWA-Lab/TargetSpecific-ICOOOLPS | 2936fe010103cfbe4b0131313abcee3a59bb8fbc | [
"MIT"
] | null | null | null | resources/benchmark.py | HPI-SWA-Lab/TargetSpecific-ICOOOLPS | 2936fe010103cfbe4b0131313abcee3a59bb8fbc | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.4 # the width of the bars
fig, ax = plt.subplots()
ax.set_ylim(0,11) # outliers only
#ax2.set_ylim(0,35) # most of the data
#ax.spines['bottom'].set_visible(False)
#ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
#ax.tick_params(labeltop='off') # don't put tick labels at the top
ax.xaxis.tick_bottom()
fig.subplots_adjust(hspace=0.1)
# call-site-specific
noneV = (5.729, 6.966, 7.953, 8.524)
rectsNone = ax.bar(ind, noneV, width, color='w', hatch=' ')
#ax2.bar(ind, noneV, width, color='w')
# call-target-specific uncached
classCached = (2.560, 3.616, 5.357, 6.846)
rectsClassCached = ax.bar(ind+width, classCached, width, color='w', hatch='o')
#ax2.bar(ind+width, classCached, width, color='w', hatch='/')
# call-target-specific cached
#classUncached = (2.634, 3.358, 5.583, 6.838)
#rectsClassUncached = ax.bar(ind+2*width, classUncached, width, color='w', hatch='o')
#ax2.bar(ind+2*width, classUncached, width, color='w', hatch='o')
# add some text for labels, title and axes ticks
#ax2.set_ylabel('Runtime (ms)')
#ax.set_title('Average rendering runtime per frame')
ax.set_ylabel('Runtime (s) / 100.000 invocations')
ax.set_xticks(ind+width+0.14)
ax.set_xticklabels( ('(a) 1 target \n (10 kwargs)', '(b) 2 targets \n (10 kwargs; \n 10 kwargs)', '(c) 2 targets \n (10 kwargs; \n 5 kwargs + rest kwargs)', '(d) 1 target \n (5 kwargs + rest kwargs)') )
#ax2.set_yticks(ax2.get_yticks()[:-1])
ax.set_yticks(ax.get_yticks()[1:])
ax.legend( (rectsNone[0], rectsClassCached[0]), ('call-site-specific', 'call-target-specific') , loc=4)
autolabel(rectsNone)
autolabel(rectsClassCached)
plt.show() | 32.656716 | 202 | 0.66042 |
4948d466851a602b9bcffdd7a6596bee89b1c959 | 3,829 | py | Python | talktracker/analysis.py | alTeska/talktracker | 7d2c507bda78a6faf92568291190ea9300e878dc | [
"MIT"
] | 1 | 2018-08-22T09:07:04.000Z | 2018-08-22T09:07:04.000Z | talktracker/analysis.py | alTeska/talktracker | 7d2c507bda78a6faf92568291190ea9300e878dc | [
"MIT"
] | 1 | 2018-10-14T20:06:51.000Z | 2018-10-14T20:06:51.000Z | talktracker/analysis.py | alTeska/talktracker | 7d2c507bda78a6faf92568291190ea9300e878dc | [
"MIT"
] | 3 | 2018-09-21T15:09:26.000Z | 2018-10-13T13:58:06.000Z | from datetime import timedelta
from random import sample, randint
import talktracker as tt
def time_diff(time1, time2):
"""calculate the time different"""
time1_info = timedelta(hours=time1[0], minutes=time1[1], seconds=time1[2])
time2_info = timedelta(hours=time2[0], minutes=time2[1], seconds=time2[2])
diff_in_sec = (time1_info - time2_info).seconds
diff_hours, diff_minutes, diff_seconds = dissect_time(diff_in_sec)
return diff_hours, diff_minutes, diff_seconds
def time_add(time1, time2):
"""calculate the time different"""
time1_info = timedelta(hours=time1[0], minutes=time1[1], seconds=time1[2])
time2_info = timedelta(hours=time2[0], minutes=time2[1], seconds=time2[2])
add_in_sec = (time1_info + time2_info).seconds
add_hours, add_minutes, add_seconds = dissect_time(add_in_sec)
return add_hours, add_minutes, add_seconds
def dissect_time(sec):
"""changes total seconds into hours, minutes, seconds"""
seconds = sec % 60
minutes = (sec // 60) % 60
hours = (sec // 60) // 60
return hours, minutes, seconds
def to_seconds(*args):
"""Converts (hour, min, sec) to seconds only"""
if len(args) == 3:
return args[0] * 60 * 60 + args[1] * 60 + args[2]
elif len(args) == 1:
return args[0][0] * 60 * 60 + args[0][1] * 60 + args[0][2]
else:
raise ValueError("Input must be either three integers, or a tuple of three integers")
def gen_fake_data(teams_n=0, members_n=[], duration=(2, 30, 0)):
""" Sudo code
1. create teams_n teams with randomly generated names
2. for each team create corresponding number of members with randomly generated attributes.
attributes might include:
- age (int)
- country (str, category)
- batch (int)
3. create a session and add the teams to the session
4. randomly pick a team
5. randomly pick a member and assign a time to him/her
6. do 4 and 5 again and again until the total time of the session (total time of the total times of the teams) becomes greater than the given duration
"""
team_names = team_name_list.copy()
member_names = member_name_list.copy()
teams = []
for ind in range(teams_n):
members = []
for _ in range(members_n[ind]):
name = sample(member_names, 1)[0]
member_names.remove(name) # remove this name from the list (without replacement)
age = randint(1, 40)
batch = randint(1, 3)
country = 'Germany'
members.append(tt.Member(name, age=age, batch=batch, country=country))
name = sample(team_names, 1)[0]
team_names.remove(name)
teams.append(tt.Team(name, members=members))
session = tt.Session('Untitled', teams=teams)
return session
""" Generates data for a fake session
Args:
teams_n (int): number of teams
members_n (int or a list): a single number or a list of numbers. of a single number os passed all the team will have similar number of members.
Returns:
a session object with fake data
"""
team_name_list = ["RockStars", "ShadowWalkers", "MiddleEasterns", "Newrons", "Persians",
"Baghalies", "Golabies", "Loosers"]
member_name_list = ["Mohammad", "Annika", "Amir", "Yasaman", "Arman", "Nick", "Nicholas" ,
"Michael", "Aleksndra", "Fati", "Rasoul", "Janne", "Yagmur", "Raja",
"Abdallah", "Viktorja", "Alex", "James", "Marie", "Auguste", "Nora",
"Mathew", "Stefan", "Steffen", "Darya", "Tamara", "Ali", "Niloufar",
"Christoph", "Werner", "Florian", "Bernhard", "Samuel", "Karan", "Elisa",
"Atena", "Milad", "Nazanin", "Rahaa", "Amin", "Ehsan", "Shahab", "Sepideh"] | 36.122642 | 154 | 0.629146 |
494aa2d4e0d2a060a9ff51a1b37123a087e51342 | 2,692 | py | Python | app/internal/module/video_del/queue.py | penM000/eALPluS-video-api | 4ec8f850bd98450b76279f5e30da854dbfaed776 | [
"MIT"
] | null | null | null | app/internal/module/video_del/queue.py | penM000/eALPluS-video-api | 4ec8f850bd98450b76279f5e30da854dbfaed776 | [
"MIT"
] | null | null | null | app/internal/module/video_del/queue.py | penM000/eALPluS-video-api | 4ec8f850bd98450b76279f5e30da854dbfaed776 | [
"MIT"
] | null | null | null | import asyncio
from dataclasses import dataclass, field
from typing import Any
from .encode import encoder
from .database import database
from ..logger import logger
queue = None
encode_tasks = []
| 29.26087 | 75 | 0.609955 |
494b2faa96115baf8681d111a98a087de5ebcb59 | 476 | py | Python | lib/dyson/utils/module.py | luna-test/luna | 6d94439f2747daf96e295837684bdc6607f507dc | [
"Apache-2.0"
] | 3 | 2018-05-21T14:35:11.000Z | 2021-03-25T12:32:25.000Z | lib/dyson/utils/module.py | dyson-framework/dyson | e5a2e12c7bb0ba21ff274feff34c184576d08ff5 | [
"Apache-2.0"
] | 13 | 2018-05-22T01:01:08.000Z | 2018-09-16T22:12:10.000Z | lib/dyson/utils/module.py | luna-test/luna | 6d94439f2747daf96e295837684bdc6607f507dc | [
"Apache-2.0"
] | 1 | 2018-05-21T14:35:17.000Z | 2018-05-21T14:35:17.000Z | import os
from dyson import constants
from abc import abstractmethod
import sys
from dyson.constants import to_boolean
| 18.307692 | 62 | 0.686975 |
494cefc1f9462c0538e6c405bcec6cc75cbab494 | 1,136 | py | Python | misc/texteditor.py | disc0nnctd/myPythonCodesDC | 378b0cf749124ef8b7f8d70f6f298faa6c9f73de | [
"MIT"
] | 1 | 2017-04-30T18:20:32.000Z | 2017-04-30T18:20:32.000Z | misc/texteditor.py | disc0nnctd/myPythonCodesDC | 378b0cf749124ef8b7f8d70f6f298faa6c9f73de | [
"MIT"
] | 1 | 2017-04-30T10:09:45.000Z | 2017-04-30T12:39:19.000Z | misc/texteditor.py | disc0nnctd/myPythonCodesDC | 378b0cf749124ef8b7f8d70f6f298faa6c9f73de | [
"MIT"
] | 1 | 2017-04-30T09:54:08.000Z | 2017-04-30T09:54:08.000Z | """A simple text editor made in Python 2.7."""
from os import path, chdir
workingdir = path.join(path.dirname(__file__), 'texts')
chdir(workingdir)
from Tkinter import Tk, Text, Button
import tkFileDialog
root = Tk("Text Editor")
text = Text(root)
text.grid()
def saveas():
"""Save file."""
try:
t = text.get("1.0", "end-1c") # "1.0" means read from beginning
# "end-1c" means delete last character
savelocation = tkFileDialog.asksaveasfilename()
file1 = open(savelocation, "w")
file1.write(t)
file1.close
except IOError:
pass
def openfile():
"""Open file."""
try:
location = tkFileDialog.askopenfilename()
file1 = open(location, "r")
fileContents = file1.read()
text.delete(1.0, "end")
text.insert(1.0, fileContents)
except IOError:
pass
button = Button(root, text="Open", command=openfile)
button.grid()
button = Button(root, text="Save As", command=saveas)
button.grid()
root.mainloop()
workingdir = path.join(path.dirname(__file__))
chdir(workingdir)
| 25.244444 | 73 | 0.611796 |
494e2056309882919070e0989e358cb4f828bbd0 | 397 | py | Python | 21_DivdeConquer/Step05/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 21_DivdeConquer/Step05/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 21_DivdeConquer/Step05/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | import sys
n, k= map(int, sys.stdin.readline().split())
P = 1000000007
f = [1 for _ in range(n + 1)]
for i in range(2, n + 1):
f[i] = (f[i - 1] * i) % P
A = f[n]
B = (f[n-k]*f[k])%P
print((A % P) * (power(B, P-2) %P) % P) # : N!%P * (K!(N-K)!)^(p-2)%P
| 18.045455 | 76 | 0.476071 |
494e7be275c169f4f4b49f4a379016a1594a2a8b | 135 | py | Python | quicksilver.py | binaryflesh/quicksilver | 0d65259f305beb05efe00f096e48c41b62bfdf57 | [
"MIT"
] | 1 | 2018-12-01T07:52:13.000Z | 2018-12-01T07:52:13.000Z | quicksilver.py | binaryflesh/quicksilver | 0d65259f305beb05efe00f096e48c41b62bfdf57 | [
"MIT"
] | 7 | 2018-12-02T23:31:38.000Z | 2018-12-03T07:44:41.000Z | quicksilver.py | binaryflesh/quicksilver | 0d65259f305beb05efe00f096e48c41b62bfdf57 | [
"MIT"
] | null | null | null | # Quicksilver.py - Agnostic project analyzer that generates resourceful diagrams. WIP
# Copyright (C) 2018 Logan Campos - @binaryflesh
| 45 | 85 | 0.792593 |
494e987723a6b2a0236a0a1b1f66efc147868431 | 4,830 | py | Python | action_controller/scripts/ActionControllerNode.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | 1 | 2021-09-23T09:42:32.000Z | 2021-09-23T09:42:32.000Z | action_controller/scripts/ActionControllerNode.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | action_controller/scripts/ActionControllerNode.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
MIT License
Copyright (c) 2020 rootadminWalker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import rospy
from core.Nodes import ActionEvaluator
if __name__ == '__main__':
node = ActionControllerNode()
| 37.44186 | 105 | 0.561698 |
494ed3dffeb5ac99649e1cc394c891c7296dc5fc | 5,174 | py | Python | model.py | Wentao-Shi/Molecule-RNN | e00d89c7a6c0c341fb790da800087b9e34be5ab8 | [
"MIT"
] | 3 | 2021-08-22T21:26:38.000Z | 2022-01-09T11:16:40.000Z | model.py | shiwentao00/Molecule-RNN | e00d89c7a6c0c341fb790da800087b9e34be5ab8 | [
"MIT"
] | null | null | null | model.py | shiwentao00/Molecule-RNN | e00d89c7a6c0c341fb790da800087b9e34be5ab8 | [
"MIT"
] | null | null | null | # Copyright: Wentao Shi, 2021
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.functional import softmax
| 32.3375 | 71 | 0.55547 |
49503dbeb658d944f139ed75ff92cfc671b7acd3 | 86 | py | Python | day5.py | GuiltyD/Python_code | db03c491824b66d842a7b4ff8aa45644233526a6 | [
"MIT"
] | null | null | null | day5.py | GuiltyD/Python_code | db03c491824b66d842a7b4ff8aa45644233526a6 | [
"MIT"
] | null | null | null | day5.py | GuiltyD/Python_code | db03c491824b66d842a7b4ff8aa45644233526a6 | [
"MIT"
] | null | null | null | f = open('./day4.py')
for chunk in iter(lambda :f.read(10),''):
print(chunk)
| 17.2 | 41 | 0.55814 |
4950672898e66d691bf307ac9e23c9fb67819cb1 | 701 | py | Python | Latest/venv/Lib/site-packages/apptools/io/h5/utils.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-01-09T20:04:31.000Z | 2022-01-09T20:04:31.000Z | Latest/venv/Lib/site-packages/apptools/io/h5/utils.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-02-15T12:01:57.000Z | 2022-03-24T19:48:47.000Z | Latest/venv/Lib/site-packages/apptools/io/h5/utils.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | null | null | null | from contextlib import contextmanager
from .file import H5File
| 24.172414 | 73 | 0.611983 |
4950f58ba8e9dd8055eb104d658977925fab01b1 | 202 | py | Python | src/success_backup_check/tests/test_success_backup_check.py | linuxluigi/success-backup-check | aa3be2dbd8b0106b931bf226614e05af68034077 | [
"MIT"
] | null | null | null | src/success_backup_check/tests/test_success_backup_check.py | linuxluigi/success-backup-check | aa3be2dbd8b0106b931bf226614e05af68034077 | [
"MIT"
] | 7 | 2017-10-20T08:14:08.000Z | 2017-10-31T10:04:19.000Z | src/success_backup_check/tests/test_success_backup_check.py | linuxluigi/success-backup-check | aa3be2dbd8b0106b931bf226614e05af68034077 | [
"MIT"
] | null | null | null | import pytest
import success_backup_check
| 25.25 | 55 | 0.826733 |
4953b0d0a882cec4862d24ffe94ed3594bc14dec | 1,816 | py | Python | insighioNode/lib/networking/modem/modem_sequans.py | insighio/insighioNode | 396b0858ffb265ac66075e8b9d90713ffae7ffb8 | [
"MIT"
] | 5 | 2021-06-11T09:03:12.000Z | 2021-12-22T09:04:57.000Z | insighioNode/lib/networking/modem/modem_sequans.py | insighio/insighioNode | 396b0858ffb265ac66075e8b9d90713ffae7ffb8 | [
"MIT"
] | 1 | 2021-06-11T14:15:05.000Z | 2021-06-11T14:15:33.000Z | insighioNode/lib/networking/modem/modem_sequans.py | insighio/insighioNode | 396b0858ffb265ac66075e8b9d90713ffae7ffb8 | [
"MIT"
] | null | null | null | from modem_base import Modem
from network import LTE
import logging
| 24.540541 | 81 | 0.601872 |
4953cc8e9258070e70193f6a8e92ffeda65bac35 | 1,824 | py | Python | ravendb/tests/jvm_migrated_tests/crud_tests/test_track_entity.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-10-08T17:45:44.000Z | 2018-05-29T12:16:43.000Z | ravendb/tests/jvm_migrated_tests/crud_tests/test_track_entity.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 5 | 2017-02-12T15:50:53.000Z | 2017-09-18T12:25:01.000Z | ravendb/tests/jvm_migrated_tests/crud_tests/test_track_entity.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-07-03T07:59:12.000Z | 2017-09-18T11:22:23.000Z | from ravendb.exceptions.exceptions import NonUniqueObjectException, InvalidOperationException
from ravendb.tests.test_base import UserWithId, TestBase
| 44.487805 | 109 | 0.679276 |
4953e3a0846206727edbbb495ede380b618ab266 | 1,781 | py | Python | PluginSDK/PythonRecon/Python/excel_helper.py | PengJinFa/YAPNew | fafee8031669b24d0cc74876a477c97d0d7ebadc | [
"MIT"
] | 20 | 2016-07-05T05:23:04.000Z | 2021-11-07T14:25:59.000Z | PluginSDK/PythonRecon/Python/excel_helper.py | PengJinFa/YAPNew | fafee8031669b24d0cc74876a477c97d0d7ebadc | [
"MIT"
] | 20 | 2016-06-08T06:36:55.000Z | 2018-04-25T09:52:18.000Z | PluginSDK/PythonRecon/Python/excel_helper.py | PengJinFa/YAPNew | fafee8031669b24d0cc74876a477c97d0d7ebadc | [
"MIT"
] | 21 | 2016-05-31T15:34:09.000Z | 2021-11-07T14:26:03.000Z | from openpyxl import Workbook
from openpyxl.utils import get_column_letter
import numbers
wb = Workbook()
# save to excel data
| 30.706897 | 83 | 0.60977 |
4955e96c4c0b436986516c1c0b6010caa5dbeec8 | 1,971 | py | Python | src/semantic_segmentation/utils/image.py | alteia-ai/ICSS | 088ddb7a8b92c71cc0b95e55d186069b8af50b0a | [
"MIT"
] | 7 | 2022-01-10T19:04:34.000Z | 2022-03-16T03:19:48.000Z | src/semantic_segmentation/utils/image.py | alteia-ai/ICSS | 088ddb7a8b92c71cc0b95e55d186069b8af50b0a | [
"MIT"
] | null | null | null | src/semantic_segmentation/utils/image.py | alteia-ai/ICSS | 088ddb7a8b92c71cc0b95e55d186069b8af50b0a | [
"MIT"
] | null | null | null | import colorsys
import itertools
import numpy as np
import torch
def random_colors(n, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / n, 1, brightness) for i in range(n)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
np.random.shuffle(colors)
return colors
def apply_mask(image, mask, color, i=1, alpha=0.5):
"""Apply the given mask(==i) to the image. Binary mask.
"""
target = image.copy()
for c in range(3):
target[:, :, c] = np.where(
mask == i, image[:, :, c] * (1 - alpha) + alpha * color[c], image[:, :, c]
)
return target
def from_coord_to_patch(img, coords, device):
"""Returns patches of the input image. coors is an output of grouper(n, sliding window(...))"""
if isinstance(img, np.ndarray):
img = torch.from_numpy(img)
image_patches = [img[:, x : x + w, y : y + h] for x, y, w, h in coords]
# image_patches = np.asarray(image_patches)
# image_patches = torch.from_numpy(image_patches).type(torch.FloatTensor)
image_patches = torch.stack(image_patches).to(device)
return image_patches
| 31.790323 | 99 | 0.609335 |
4958f00172a7990bcba76c17970e13446ea6dcfc | 8,498 | py | Python | backend/src/contaxy/operations/deployment.py | ml-tooling/contaxy | 3317a866c2ef641667a2d318885c8b0f5096b56a | [
"MIT"
] | 3 | 2021-10-17T23:25:05.000Z | 2022-02-03T21:40:59.000Z | backend/src/contaxy/operations/deployment.py | ml-tooling/contaxy | 3317a866c2ef641667a2d318885c8b0f5096b56a | [
"MIT"
] | 14 | 2021-11-09T15:24:29.000Z | 2022-03-11T13:26:04.000Z | backend/src/contaxy/operations/deployment.py | ml-tooling/contaxy | 3317a866c2ef641667a2d318885c8b0f5096b56a | [
"MIT"
] | 3 | 2022-01-27T08:31:57.000Z | 2022-02-11T13:38:00.000Z | from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, List, Literal, Optional
from contaxy.schema import Job, JobInput, ResourceAction, Service, ServiceInput
from contaxy.schema.deployment import DeploymentType
# TODO: update_service functionality
| 29.922535 | 166 | 0.619087 |
4959390b2ca88e67ed8b8674132fbee54a9cccd4 | 15,604 | py | Python | perfkitbenchmarker/providers/openstack/os_virtual_machine.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 3 | 2018-04-28T13:06:14.000Z | 2020-06-09T02:39:44.000Z | perfkitbenchmarker/providers/openstack/os_virtual_machine.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 1 | 2018-03-15T21:01:27.000Z | 2018-03-15T21:01:27.000Z | perfkitbenchmarker/providers/openstack/os_virtual_machine.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 6 | 2019-06-11T18:59:57.000Z | 2021-03-02T19:14:42.000Z | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an OpenStack Virtual Machine.
Regions:
User defined
Machine types, or flavors:
run 'openstack flavor list'
Images:
run 'openstack image list'
"""
import json
import logging
import threading
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine, linux_virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.openstack import os_disk
from perfkitbenchmarker.providers.openstack import os_network
from perfkitbenchmarker.providers.openstack import utils as os_utils
from six.moves import range
NONE = 'None'
VALIDATION_ERROR_MESSAGE = '{0} {1} could not be found.'
FLAGS = flags.FLAGS
| 36.543326 | 80 | 0.692771 |
49594197e0bf3a8d4220d5dcfdcb6644bf95fa9a | 5,933 | py | Python | model.py | kevincho840430/CarND-Behavioral-Cloning-P3-master-1 | 042707a17c6dffb19717737b3f78169428bf31f6 | [
"MIT"
] | null | null | null | model.py | kevincho840430/CarND-Behavioral-Cloning-P3-master-1 | 042707a17c6dffb19717737b3f78169428bf31f6 | [
"MIT"
] | null | null | null | model.py | kevincho840430/CarND-Behavioral-Cloning-P3-master-1 | 042707a17c6dffb19717737b3f78169428bf31f6 | [
"MIT"
] | null | null | null | ## Self-driven car project based on nvidia's CNN model
## Package
#torch = 0.4.1.post2
#torchvision = 0.2.1
#numpy = 1.15.2
#opencv-python =3.4.3
# -*- coding: utf-8 -*-
# Import the Stuff
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import cv2
import numpy as np
import csv
# Step1: Read from the log file
samples = []
with open('../data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)
# Step2: Divide the data into training set and validation set
train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])
# Step3a: Define the augmentation, transformation processes, parameters and dataset for dataloader
# Step3b: Creating generator using the dataloader to parallasize the process
transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 255.0) - 0.5)])
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 4}
training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)
validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)
# Step4: Define the network
# Step5: Define optimizer
model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.MSELoss()
# Step6: Check the device and define function to move tensors to that device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is: ', device)
# Step7: Train and validate network based on maximum epochs defined
max_epochs = 22
for epoch in range(max_epochs):
model.to(device)
# Training
train_loss = 0
model.train()
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("training image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
loss.backward()
optimizer.step()
train_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Loss: %.3f '
% (train_loss/(local_batch+1)))
# Validation
model.eval()
valid_loss = 0
with torch.set_grad_enabled(False):
for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("Validation image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
valid_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Valid Loss: %.3f '
% (valid_loss/(local_batch+1)))
# Step8: Define state and save the model wrt to state
state = {
'model': model.module if device == 'cuda' else model,
}
torch.save(state, 'model.h5')
| 31.062827 | 118 | 0.632901 |
4959b0c95664f6b5e44804e1d10f0d164fcb9038 | 2,335 | py | Python | setup.py | sankethvedula/flowtorch | 44a0f0eff842dd33ca17b01f4e02d8cdda005aa8 | [
"MIT"
] | 29 | 2020-12-19T00:29:42.000Z | 2021-08-12T19:11:47.000Z | setup.py | sankethvedula/flowtorch | 44a0f0eff842dd33ca17b01f4e02d8cdda005aa8 | [
"MIT"
] | 30 | 2020-12-29T04:42:38.000Z | 2021-02-19T22:29:38.000Z | setup.py | sankethvedula/flowtorch | 44a0f0eff842dd33ca17b01f4e02d8cdda005aa8 | [
"MIT"
] | 1 | 2021-05-06T21:25:45.000Z | 2021-05-06T21:25:45.000Z | # Copyright (c) Meta Platforms, Inc
import os
import sys
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 7
TEST_REQUIRES = ["numpy", "pytest", "pytest-cov", "scipy"]
DEV_REQUIRES = TEST_REQUIRES + [
"black",
"flake8",
"flake8-bugbear",
"mypy",
"toml",
"usort",
]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
# read in README.md as the long description
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="flowtorch",
description="Normalizing Flows for PyTorch",
author="FlowTorch Development Team",
author_email="info@stefanwebb.me",
license="MIT",
url="https://flowtorch.ai/users",
project_urls={
"Documentation": "https://flowtorch.ai/users",
"Source": "https://www.github.com/facebookincubator/flowtorch",
},
keywords=[
"Deep Learning",
"Bayesian Inference",
"Statistical Modeling",
"Variational Inference",
"PyTorch",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">={}.{}".format(REQUIRED_MAJOR, REQUIRED_MINOR),
install_requires=[
"torch>=1.8.1",
],
setup_requires=["setuptools_scm"],
use_scm_version={
"root": ".",
"relative_to": __file__,
"write_to": os.path.join("flowtorch", "version.py"),
},
packages=find_packages(
include=["flowtorch", "flowtorch.*"],
exclude=["debug", "tests", "website", "examples", "scripts"],
),
extras_require={
"dev": DEV_REQUIRES,
"test": TEST_REQUIRES,
},
)
| 26.534091 | 72 | 0.616702 |
495c1efa0fbbd4eda9cf54a8e5e4784dac2375f0 | 12,923 | py | Python | nn/train.py | julian-carpenter/airynet | 7ad87bbc717107f72d69547e4243373c05dadf70 | [
"MIT"
] | 8 | 2019-03-17T10:45:19.000Z | 2022-01-13T17:36:54.000Z | nn/train.py | julian-carpenter/airynet | 7ad87bbc717107f72d69547e4243373c05dadf70 | [
"MIT"
] | null | null | null | nn/train.py | julian-carpenter/airynet | 7ad87bbc717107f72d69547e4243373c05dadf70 | [
"MIT"
] | 1 | 2019-07-24T05:59:38.000Z | 2019-07-24T05:59:38.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import nn
import tensorflow as tf
from tensorflow.python.client import device_lib
from numpy import min, max
def model_fn(features, labels, mode, params):
"""Model function for airynet network"""
if mode == tf.estimator.ModeKeys.PREDICT:
labels = features["lbl"]
features = features["imgs"]
labels = tf.identity(labels, name="bids")
features = tf.identity(features, name="images")
feat_converted = tf.map_fn(
lambda x: tf.image.convert_image_dtype(x, tf.float32), features)
tf.summary.image("images", feat_converted, max_outputs=3)
cfg = params["config"]
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
# Multiply the learning rate by 0.1 at 30, 60, 120 and 150 epochs.
batches_per_epoch = cfg.dataset_size / cfg.batch_size
boundaries = [
int(batches_per_epoch * epoch) for epoch in [30, 60, 120, 150]
]
# Scale the learning rate linearly with the batch size. When the
# batch size is 128, the learning rate should be 0.1.
lr = cfg.lr * cfg.batch_size / 128
values = [lr * decay for decay in [1, 0.1, 0.01, 1e-3, 1e-4]]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32), boundaries, values)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name="learning_rate")
tf.summary.scalar("learning_rate", learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=cfg.gamma,
use_nesterov=True)
avail_gpus = get_available_gpus()
tower_grads = []
reuse = False
with tf.variable_scope(tf.get_variable_scope()):
print(cfg.resnet_size, cfg.num_classes, cfg.data_format,
cfg.relu_leakiness)
network = select_architecture(cfg.airynet_type)
for dev in avail_gpus:
print("Building inference on: {}".format(dev))
if int(dev[-1]) != 0:
# set scope to reuse if more than one gpu are available
tf.get_variable_scope().reuse_variables()
reuse = True
with tf.device(dev), tf.name_scope(
dev.replace(":", "_").replace("/", "")):
logits = network(features, mode == tf.estimator.ModeKeys.TRAIN,
reuse)
if mode == tf.estimator.ModeKeys.TRAIN:
if cfg.dataset == "cifar10":
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
else:
cross_entropy = tf.losses.sigmoid_cross_entropy(
logits=logits, multi_class_labels=labels)
# get l1_regularizer loss
reg_penalty = tf.reduce_mean(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# get trainable variables
trainable_variables = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES)
# get weight decay_loss
if cfg.use_weight_decay:
loss_weight_decay = tf.reduce_sum(
tf.stack([
tf.nn.l2_loss(i) for i in trainable_variables
])) * cfg.weight_decay
else:
loss_weight_decay = 0.
# define loss, consider to add the weight_decay
loss = cross_entropy + reg_penalty + loss_weight_decay
comp_grad_op = optimizer.compute_gradients(
loss, trainable_variables)
tower_grads.append(comp_grad_op)
if mode == tf.estimator.ModeKeys.TRAIN:
grads = average_gradients(tower_grads, tf.get_default_graph())
if mode == tf.estimator.ModeKeys.TRAIN:
# Batch norm requires update ops to be
# added as a dependency to the train_op
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads, global_step)
else:
train_op = None
loss = tf.constant(0.)
if cfg.dataset == "cifar10":
fc_out_activation_fun = tf.nn.softmax
fc_out_activation_name = "softmax_output"
else:
fc_out_activation_fun = tf.nn.sigmoid
fc_out_activation_name = "sigmoid_output"
predictions = {
"classes":
tf.round(
fc_out_activation_fun(logits,
name=fc_out_activation_name + "_classes")),
"probabilities":
fc_out_activation_fun(logits, name=fc_out_activation_name),
"bunchID":
labels
}
print(logits.get_shape())
if mode == tf.estimator.ModeKeys.PREDICT:
# We calculate the gradients between the output and tha last
# convolutional layer for the GradCam
graph = tf.get_default_graph()
conv_ = graph.get_operation_by_name(
"device_GPU_0/fourth_block/last_block_before_fc").outputs[0]
out_ = graph.get_operation_by_name(
"device_GPU_0/nn_out/final_dense").outputs[0]
out_ = tf.nn.sigmoid(out_)
out_ *= tf.round(out_)
heat_loss_ = []
grads = []
norm = []
normed_grads = []
for class_idx in range(out_.get_shape()[-1]):
print("Building GradCam for class: {}".format(class_idx))
heat_loss_.append(
tf.reduce_mean(out_[:, class_idx],
name="class_loss_{}".format(class_idx)))
curr_grad = tf.gradients(
heat_loss_, conv_, name="class_grads_{}".format(class_idx))[0]
grads.append(curr_grad)
norm.append(
tf.sqrt(tf.reduce_mean(tf.square(curr_grad)),
name="class_norm_{}".format(class_idx)))
normed_grads.append(
tf.divide(tf.convert_to_tensor(grads[class_idx]),
tf.convert_to_tensor(norm[class_idx]) +
tf.constant(1e-5),
name="normalized_grads_{}".format(class_idx)))
tf.identity(tf.convert_to_tensor(normed_grads),
name="normalized_grads")
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Create some metrics for logging purposes
lbl = tf.to_float(labels)
prediction = predictions["classes"]
if mode == tf.estimator.ModeKeys.TRAIN:
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name="cross_entropy")
tf.summary.scalar("metrics/cross_entropy", cross_entropy)
tf.summary.scalar("metrics/reg_penalty", reg_penalty)
# tf.summary.scalar("metrics/weight_decay_loss", weight_decay_loss)
# Calculate the confusion matrix
confusion_matr = tf.to_float(
tf.confusion_matrix(tf.reshape(lbl, [-1]),
tf.reshape(prediction, [-1]),
num_classes=2))
tf.identity(confusion_matr, name="confusion_matr")
# Matthews Correlation Coefficient
TP = confusion_matr[1][1]
TN = confusion_matr[0][0]
FP = confusion_matr[0][1]
FN = confusion_matr[1][0]
MCC = (TP * TN - FP * FN) / (tf.sqrt(
(TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)))
tf.identity(MCC, name="mcc")
tf.summary.scalar("metrics/mcc", MCC)
# Stack lbl and predictions as image for the summary
lbl_vs_prediction = tf.multiply(tf.ones_like(lbl), 255)
lbl_vs_prediction = tf.expand_dims(tf.stack([
tf.multiply(lbl_vs_prediction, lbl),
tf.multiply(lbl_vs_prediction, prediction),
tf.zeros_like(lbl)
],
axis=-1),
axis=0)
tf.identity(lbl_vs_prediction, name="lbl_vs_prediction")
tf.summary.image("metrics/lbl_vs_prediction", lbl_vs_prediction)
lbl_image = tf.expand_dims(tf.expand_dims(lbl, axis=-1), axis=0)
tf.identity(lbl_image, name="lbl_image")
tf.summary.image("metrics/lbl_image", lbl_image)
prediction_image = tf.expand_dims(tf.expand_dims(prediction, axis=-1),
axis=0)
tf.identity(prediction_image, name="prediction_image")
tf.summary.image("metrics/prediction_image", prediction_image)
accuracy = tf.metrics.accuracy(lbl, prediction)
tf.identity(accuracy[1], name="train_accuracy")
tf.summary.scalar("metrics/train_accuracy", accuracy[1])
eval_tp = tf.metrics.true_positives(lbl, prediction)
eval_fp = tf.metrics.false_positives(lbl, prediction)
eval_fn = tf.metrics.false_negatives(lbl, prediction)
eval_precision = tf.metrics.precision(lbl, prediction)
eval_mean_per_class = tf.metrics.mean_per_class_accuracy(
lbl, prediction, cfg.num_classes)
metrics = {
"accuracy": accuracy,
"mean_per_class_accuracy": eval_mean_per_class,
"precision": eval_precision,
"true_positives": eval_tp,
"false_positives": eval_fp,
"false_negatives": eval_fn
}
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def get_available_gpus():
"""
Get a list of available GPU"s
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"]
def average_gradients(tower_grads, graph):
"""
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer
list is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
with graph.name_scope("averaging_gradients"):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g_idx, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g_idx, 0)
# Append on a "tower" dimension which we will
# average over below.
grads.append(expanded_g)
# Average over the "tower" dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are
# shared across towers. So .. we will just return the first
# tower"s pointer to the Variable.
vals = grad_and_vars[0][1]
grad_and_var = (grad, vals)
average_grads.append(grad_and_var)
return average_grads
| 40.384375 | 79 | 0.583998 |
495c925b5444da2267f89a8db3c5a875669b6a75 | 310 | py | Python | retrobiocat_web/app/db_analysis/__init__.py | ihayhurst/RetroBioCat | d674897459c0ab65faad5ed3017c55cf51bcc020 | [
"MIT"
] | 9 | 2020-12-01T16:33:02.000Z | 2022-01-19T20:02:42.000Z | retrobiocat_web/app/db_analysis/__init__.py | ihayhurst/RetroBioCat | d674897459c0ab65faad5ed3017c55cf51bcc020 | [
"MIT"
] | 4 | 2020-10-02T14:38:32.000Z | 2021-08-02T09:23:58.000Z | retrobiocat_web/app/db_analysis/__init__.py | ihayhurst/RetroBioCat | d674897459c0ab65faad5ed3017c55cf51bcc020 | [
"MIT"
] | 6 | 2021-01-14T07:48:36.000Z | 2022-03-20T17:34:27.000Z | from flask import Blueprint
bp = Blueprint('db_analysis',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/db_analysis/static'
)
from retrobiocat_web.app.db_analysis.routes import bioinformatics, ssn
| 25.833333 | 70 | 0.616129 |
495d8b60f3e72589eb60ba4876bee12c223e7de1 | 10,249 | py | Python | tasks.py | pycopia/devtest | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | [
"Apache-2.0"
] | null | null | null | tasks.py | pycopia/devtest | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | [
"Apache-2.0"
] | null | null | null | tasks.py | pycopia/devtest | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3.9
"""Tasks file used by the *invoke* command.
This simplifies some common development tasks.
Run these tasks with the `invoke` tool.
"""
from __future__ import annotations
import sys
import os
import shutil
import getpass
from glob import glob
from pathlib import Path
import keyring
import semver
from setuptools_scm import get_version
from invoke import task, run, Exit
SIGNERS = ["keith"]
PYTHONBIN = os.environ.get("PYTHONBIN", sys.executable)
# Put the path in quotes in case there is a space in it.
PYTHONBIN = f'"{PYTHONBIN}"'
GPG = "gpg2"
CURRENT_USER = getpass.getuser()
# Putting pypi info here eliminates the need for user-private ~/.pypirc file.
PYPI_HOST = "upload.pypi.org"
PYPI_URL = f"https://{PYPI_HOST}/legacy/"
PYPI_USER = "__token__"
PYPI_INDEX = f"{PYPI_URL}simple"
# Helper functions follow.
def get_virtualenv():
venv = os.environ.get("VIRTUAL_ENV")
if venv and os.path.isdir(venv):
return venv
return None
def get_tags():
rv = run('git tag -l "v*"', hide="out")
vilist = []
for line in rv.stdout.split():
try:
vi = semver.parse_version_info(line[1:])
except ValueError:
pass
else:
vilist.append(vi)
vilist.sort()
return vilist
def get_pypi_token():
cred = keyring.get_credential(PYPI_HOST, PYPI_USER)
if not cred:
raise Exit("You must set the pypi token with the set-pypi-token target.", 1)
return cred.password
def get_suffix():
return run(
f'{PYTHONBIN} -c \'import sysconfig; print(sysconfig.get_config_vars()["EXT_SUFFIX"])\'',
hide=True,
).stdout.strip() # noqa
def resolve_path(base, p):
p = Path(p)
return str(base / p)
def find_git_base():
"""Find the base directory of this git repo.
The git status output is always relative to this directory.
"""
start = Path.cwd().resolve()
while start:
if (start / ".git").exists():
return start
start = start.parent
raise Exit("Not able to find git repo base.")
def get_modified_files(untracked):
"""Find the list of modified and, optionally, untracked Python files.
If `untracked` is True, also include untracked Python files.
"""
filelist = []
gitbase = find_git_base()
gitout = run('git status --porcelain=1 -z', hide=True)
for line in gitout.stdout.split("\0"):
if line:
if not line.endswith(".py"):
continue
if line[0:2] == " M":
filelist.append(resolve_path(gitbase, line[3:]))
if untracked and line[0:2] == "??":
filelist.append(resolve_path(gitbase, line[3:]))
return filelist
| 28.469444 | 100 | 0.623085 |
495dad09c3d51139d0567841ccbcb4d16adb5840 | 3,846 | py | Python | sandbox/lib/jumpscale/Jumpscale/servers/gedis/tests/3_threebot_redis_registration.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/Jumpscale/servers/gedis/tests/3_threebot_redis_registration.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/Jumpscale/servers/gedis/tests/3_threebot_redis_registration.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | from Jumpscale import j
from io import BytesIO
import binascii
def main(self):
"""
kosmos -p 'j.servers.gedis.test("threebot_redis_registration")'
"""
####THREEBOT REGISTRATION
phonebook = j.threebot.package.phonebook.client_get()
if j.sal.nettools.tcpPortConnectionTest("www.google.com", 443):
phonebook.actors.phonebook.wallet_create("test")
j.data.nacl.configure(name="client_test", generate=True, interactive=False)
client_nacl = j.data.nacl.get(name="client_test")
cl = j.clients.redis.get(port=8901)
threebot_info = register_threebot_redis()
threebot_info2 = query_threebot_redis(threebot_info["id"])
assert threebot_info == threebot_info2
print("**DONE**")
| 32.871795 | 118 | 0.607904 |
495f237d0412eaba51c51ff1ebcf25e4b6ae6465 | 2,803 | py | Python | font/gen.py | smaji-org/cjkv_info_sample | 12440b938a58b2384a3c9d11c0897dd4101d6fe6 | [
"MIT"
] | null | null | null | font/gen.py | smaji-org/cjkv_info_sample | 12440b938a58b2384a3c9d11c0897dd4101d6fe6 | [
"MIT"
] | null | null | null | font/gen.py | smaji-org/cjkv_info_sample | 12440b938a58b2384a3c9d11c0897dd4101d6fe6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import os
import argparse
import glob
from functools import partial
import fontforge
import psMat
import source
opt_parser= argparse.ArgumentParser()
opt_parser.add_argument("--cjkv_info", type= str,
help= u"the path of cjkv_info")
opt_parser.add_argument("--region", type= str,
help= u"the region from where to select characters")
opt_parser.add_argument("--start", type= partial(int, base=16),
help= u"the start point of unicode")
opt_parser.add_argument("--end", type= partial(int, base=16),
help= u"the end point of unicode")
opt_parser.add_argument("--name", type= str,
help= u"the name of the new font")
opt_parser.add_argument("--adjust", type= int,
help= u"adjust the position of the outline")
opt_parser.add_argument("--output", type= str,
help= u"the path and filename of the new font")
opts= setup_opt()
src_files= glob.glob(os.path.join(opts.cjkv_info, "data", "*", "*.src"))
src_files= filter(filter_src, src_files)
newfont= fontforge.font()
newfont.em= 1024
newfont.fontname= opts.name
for src_file in src_files:
code= get_code(src_file);
glyph= newfont.createChar(code)
(root, ext)= os.path.splitext(src_file)
glyph.importOutlines(root + ".svg")
glyph.transform(psMat.translate(0, opts.adjust))
newfont.generate(os.path.join(opts.output[0], opts.output[1]))
| 26.951923 | 72 | 0.662861 |
496001f2e20c60b98e9d4d0701aee95ac8df87b1 | 3,692 | py | Python | alarm_control_panel.py | rs1932/homeassistant-ring_alarm_component | b65b8ee1bc7e7408c3bc1adb6fd4e3f4ebf330d6 | [
"Apache-2.0"
] | 4 | 2019-09-07T23:15:54.000Z | 2020-04-20T22:47:37.000Z | alarm_control_panel.py | rs1932/homeassistant-ring_alarm_component | b65b8ee1bc7e7408c3bc1adb6fd4e3f4ebf330d6 | [
"Apache-2.0"
] | 3 | 2019-09-10T00:03:24.000Z | 2020-10-02T13:26:08.000Z | alarm_control_panel.py | rs1932/homeassistant-ring_alarm_component | b65b8ee1bc7e7408c3bc1adb6fd4e3f4ebf330d6 | [
"Apache-2.0"
] | 3 | 2019-11-19T11:03:01.000Z | 2021-05-12T20:11:16.000Z | import logging
import pandas as pd
from homeassistant.components.alarm_control_panel import (
AlarmControlPanel
)
from homeassistant.core import callback
from homeassistant.util import convert
from .ringalarmdevice import RingAlarmDevice
from .constants import *
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED
)
_LOGGER = logging.getLogger(__name__)
| 31.288136 | 93 | 0.54117 |
4960a6159309ea3987628a9491068ccbc097d8ac | 4,224 | py | Python | bot/game.py | thequeenofspades/AlphaGOLADZero | f057f249bcda21ef570a5d8d8753544bf743aaec | [
"Apache-2.0"
] | 1 | 2018-11-01T01:56:26.000Z | 2018-11-01T01:56:26.000Z | bot/game.py | thequeenofspades/AlphaGOLADZero | f057f249bcda21ef570a5d8d8753544bf743aaec | [
"Apache-2.0"
] | 7 | 2018-02-17T00:35:26.000Z | 2018-06-06T23:55:22.000Z | bot/game.py | thequeenofspades/AlphaGOLADZero | f057f249bcda21ef570a5d8d8753544bf743aaec | [
"Apache-2.0"
] | null | null | null | from sys import stdin, stdout, stderr
import traceback
import time
from player import Player
from field.field import Field
| 33 | 98 | 0.529356 |
4960c8757588886d5cee0b290cbf124fc76beb18 | 552 | py | Python | config/ConfigSices.py | atosborges00/sereno_bot | 06bedb02847eff050adeadb6bcc5440bcd2283c3 | [
"FSFAP"
] | null | null | null | config/ConfigSices.py | atosborges00/sereno_bot | 06bedb02847eff050adeadb6bcc5440bcd2283c3 | [
"FSFAP"
] | null | null | null | config/ConfigSices.py | atosborges00/sereno_bot | 06bedb02847eff050adeadb6bcc5440bcd2283c3 | [
"FSFAP"
] | null | null | null | from os import path
from config.ConfigPaths import ConfigPaths
| 26.285714 | 83 | 0.697464 |
4961b43029a0917649b5f3a648e7d599051b3b4f | 757 | py | Python | main.py | leonli/codename-gen | 53a66124184e9691d22bfd7db6274f1d44fe0a75 | [
"MIT"
] | null | null | null | main.py | leonli/codename-gen | 53a66124184e9691d22bfd7db6274f1d44fe0a75 | [
"MIT"
] | null | null | null | main.py | leonli/codename-gen | 53a66124184e9691d22bfd7db6274f1d44fe0a75 | [
"MIT"
] | null | null | null | import click
import random
from pyfiglet import Figlet
from termcolor import colored, cprint
import imagenet
if __name__ == '__main__':
codename_gen()
| 32.913043 | 95 | 0.611625 |
4961cb44515f6694bce4182b84680ae488d272d1 | 14,882 | py | Python | binder/plugins/views/userview.py | asma-oueslati/django-binder | 0a16a928664b4be2b2b8e3f5f65c29301f0096fe | [
"MIT"
] | null | null | null | binder/plugins/views/userview.py | asma-oueslati/django-binder | 0a16a928664b4be2b2b8e3f5f65c29301f0096fe | [
"MIT"
] | null | null | null | binder/plugins/views/userview.py | asma-oueslati/django-binder | 0a16a928664b4be2b2b8e3f5f65c29301f0096fe | [
"MIT"
] | null | null | null | import logging
import json
from abc import ABCMeta, abstractmethod
from django.contrib import auth
from django.contrib.auth import update_session_auth_hash, password_validation
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import ValidationError, PermissionDenied
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from binder.permissions.views import no_scoping_required
from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, \
BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, \
BinderNotFound
from binder.router import list_route, detail_route
from binder.json import JsonResponse
from binder.views import annotate
logger = logging.getLogger(__name__)
class UserViewMixIn(UserBaseMixin):
__metaclass__ = ABCMeta
log_request_body = False
token_generator = default_token_generator
default_authentication_backend = None
def _require_model_perm(self, perm_type, request, pk=None):
"""
Overwrite the _require_model_perm, to make sure that you can not modify a superuser as non superuser
We need to be very careful about permission assumptions after this point
"""
# If the user is trying to change a superuser and is not a superuser, disallow
if pk and self.model.objects.get(pk=int(pk)).is_superuser and not request.user.is_superuser:
# Maybe BinderRequestError?
raise BinderForbidden('modify superuser', request.user)
# Everything normal
return super()._require_model_perm(perm_type, request, pk)
def _store__groups(self, obj, field, value, request, pk=None):
"""
Store the groups of the user.
If we get here, the user might not actually have admin permissions;
If the user does not have user change perms, disallow setting groups.
"""
try:
self._require_model_perm('changegroups', request)
return self._store_field(obj, field, value, request, pk=pk)
except BinderForbidden: # convert to read-only error, so the field is ignored
raise BinderReadOnlyFieldError(self.model.__name__, field)
def get_users(self, request, username):
"""
Given a username, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
Copied from django.contrib.auth.forms.PasswordResetForm
"""
active_users = self.model._default_manager.filter(**{
self.model.USERNAME_FIELD + '__iexact': username,
'is_active': True,
})
return (u for u in active_users if u.has_usable_password())
def _store__username(self, user, field, value, request, pk=None):
"""
Makes sure the username is always stored as a lowercase
"""
if not isinstance(value, str):
raise BinderFieldTypeError(self.model.__name__, field)
return self._store_field(user, field, value.lower(), request, pk=pk)
def filter_deleted(self, queryset, pk, deleted, request=None):
"""
Can be used to filter deleted users, or unfilter them.
"""
if pk or deleted == 'true':
return queryset
if deleted is None:
return queryset.filter(is_active=True)
if deleted == 'only':
return queryset.filter(is_active=False)
raise BinderRequestError(_('Invalid value: deleted=%s.') % request.GET.get('deleted'))
def soft_delete(self, user, undelete=False, request=None):
"""
Allows the user to be soft deleted, and undeleted. What actually needs to be done on soft deletion
can be implemented in
_after_soft_delete
"""
try:
if not user.is_active and not undelete:
raise BinderIsDeleted()
if not not user.is_active and undelete:
raise BinderIsNotDeleted()
except AttributeError:
raise BinderMethodNotAllowed()
user.is_active = undelete
user.save()
self._after_soft_delete(request, user, undelete)
def _reset_pass_for_user(self, request, user_id, token, password):
"""
Helper function that actually resets the password for an user
"""
try:
user = self.model._default_manager.get(pk=user_id)
except (TypeError, ValueError, OverflowError, self.model.DoesNotExist):
user = None
if user is None or not self.token_generator.check_token(user, token):
raise BinderNotFound()
logger.info('login for {}/{} via successful password reset'.format(user.id, user))
try:
password_validation.validate_password(password, user)
except ValidationError as ve:
raise BinderValidationError({'password': ve.messages})
user.set_password(password)
user.save()
self.auth_login(request, user)
return self.respond_with_user(request, user.id)
| 27.009074 | 118 | 0.73216 |
4961f5ae237c556abd210443e12861307b7068fa | 362 | py | Python | libra_client/crypto/x25519.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | null | null | null | libra_client/crypto/x25519.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | null | null | null | libra_client/crypto/x25519.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | 1 | 2022-01-05T06:49:42.000Z | 2022-01-05T06:49:42.000Z | from libra_client.canoser import DelegateT, BytesT
# Size of a X25519 private key
PRIVATE_KEY_SIZE = 32
# Size of a X25519 public key
PUBLIC_KEY_SIZE = 32
# Size of a X25519 shared secret
SHARED_SECRET_SIZE = 32 | 22.625 | 50 | 0.787293 |
49633e3a1fe78865b9e181ac00df94f57d6194b3 | 3,303 | py | Python | plenum/test/plugin/demo_plugin/main.py | SchwiftyRick/indy-plenum | d23b99423eb805971e50446d7e89ada892aa6811 | [
"Apache-2.0"
] | null | null | null | plenum/test/plugin/demo_plugin/main.py | SchwiftyRick/indy-plenum | d23b99423eb805971e50446d7e89ada892aa6811 | [
"Apache-2.0"
] | 1 | 2021-07-14T17:10:04.000Z | 2021-07-14T17:10:04.000Z | plenum/test/plugin/demo_plugin/main.py | SchwiftyRick/indy-plenum | d23b99423eb805971e50446d7e89ada892aa6811 | [
"Apache-2.0"
] | 2 | 2021-02-19T15:36:50.000Z | 2021-07-20T11:37:54.000Z | from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.server.client_authn import CoreAuthNr
from plenum.test.plugin.demo_plugin import AUCTION_LEDGER_ID
from plenum.test.plugin.demo_plugin.batch_handlers.auction_batch_handler import AuctionBatchHandler
from plenum.test.plugin.demo_plugin.config import get_config
from plenum.test.plugin.demo_plugin.request_handlers.auction_end_handler import AuctionEndHandler
from plenum.test.plugin.demo_plugin.request_handlers.auction_start_handler import AuctionStartHandler
from plenum.test.plugin.demo_plugin.request_handlers.get_bal_handler import GetBalHandler
from plenum.test.plugin.demo_plugin.request_handlers.place_bid_handler import PlaceBidHandler
from plenum.test.plugin.demo_plugin.storage import get_auction_hash_store, \
get_auction_ledger, get_auction_state
| 58.982143 | 101 | 0.693612 |
496648c5898f258ebf19c8b06ad31502f0290680 | 5,213 | py | Python | biobakery_workflows/document_templates/quality_control_paired_dna_rna.template.py | shbrief/biobakery_workflows | 2037f45caa8e4af9a40b5c1d2886cde15bc00381 | [
"MIT"
] | 1 | 2020-11-16T20:04:15.000Z | 2020-11-16T20:04:15.000Z | biobakery_workflows/document_templates/quality_control_paired_dna_rna.template.py | mlwright97/biobakery_workflows | b3e74f25253d7354bebd02936ac25986281e85d6 | [
"MIT"
] | null | null | null | biobakery_workflows/document_templates/quality_control_paired_dna_rna.template.py | mlwright97/biobakery_workflows | b3e74f25253d7354bebd02936ac25986281e85d6 | [
"MIT"
] | null | null | null |
#+ echo=False
import numpy
from biobakery_workflows import utilities, visualizations, files
from anadama2 import PweaveDocument
document=PweaveDocument()
# get the variables for this document generation task
vars = document.get_vars()
# determine the document format
pdf_format = True if vars["format"] == "pdf" else False
# read in the DNA samples
(dna_paired_columns, dna_orphan_columns), dna_samples, (dna_paired_data, dna_orphan_data) = visualizations.qc_read_counts(document, vars["dna_read_counts"])
# read in the RNA samples
(rna_paired_columns, rna_orphan_columns), rna_samples, (rna_paired_data, rna_orphan_data) = visualizations.qc_read_counts(document, vars["rna_read_counts"])
#' # Quality Control
#' <% visualizations.ShotGun.print_qc_intro_caption("{} DNA and {} RNA ".format(len(dna_samples),len(rna_samples)), rna_paired_columns[2:], paired=True) %>
#+ echo=False
#' ## DNA Samples Quality Control
#' ### DNA Samples Tables of Filtered Reads
#+ echo=False
document.write_table(["# Sample"]+dna_paired_columns, dna_samples, dna_paired_data,
files.ShotGunVis.path("qc_counts_paired",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_paired_data, dna_samples,
dna_paired_columns, "DNA Paired end reads", files.ShotGunVis.path("qc_counts_paired"),
format_data_comma=True)
#' <%= table_message %>
#+ echo=False
document.write_table(["# Sample"]+dna_orphan_columns, dna_samples, dna_orphan_data,
files.ShotGunVis.path("qc_counts_orphan",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_orphan_data, dna_samples,
dna_orphan_columns, "DNA Orphan reads", files.ShotGunVis.path("qc_counts_orphan"),
format_data_comma=True)
#' <%= table_message %>
#' <% if pdf_format: print("\clearpage") %>
#+ echo=False
# plot the microbial reads ratios
dna_microbial_reads, dna_microbial_labels = utilities.microbial_read_proportion_multiple_databases(
dna_paired_data, dna_paired_columns, dna_orphan_data)
document.write_table(["# Sample"]+dna_microbial_labels, dna_samples,
dna_microbial_reads, files.ShotGunVis.path("microbial_counts",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_microbial_reads, dna_samples,
dna_microbial_labels, "DNA microbial read proportion",
files.ShotGunVis.path("microbial_counts"))
#' <%= visualizations.ShotGun.captions["microbial_ratios"] %>
#' <%= table_message %>
#' ### DNA Samples Plots of Filtered Reads
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(dna_paired_data), row_labels=dna_paired_columns,
column_labels=dna_samples, title="DNA Paired end reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(dna_orphan_data), row_labels=dna_orphan_columns,
column_labels=dna_samples, title="DNA Orphan reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#' ## RNA Samples Quality Control
#' ### RNA Samples Tables of Filtered Reads
#+ echo=False
document.write_table(["# Sample"]+rna_paired_columns, rna_samples, rna_paired_data,
files.ShotGunVis.path("rna_qc_counts_paired",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_paired_data, rna_samples,
rna_paired_columns, "RNA Paired end reads", files.ShotGunVis.path("rna_qc_counts_paired"),
format_data_comma=True)
#' <%= table_message %>
#+ echo=False
document.write_table(["# Sample"]+rna_orphan_columns, rna_samples, rna_orphan_data,
files.ShotGunVis.path("rna_qc_counts_orphan",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_orphan_data, rna_samples,
rna_orphan_columns, "RNA Orphan reads", files.ShotGunVis.path("rna_qc_counts_orphan"),
format_data_comma=True)
#' <%= table_message %>
#' <% if pdf_format: print("\clearpage") %>
#+ echo=False
# write and plot the microbial reads ratios
rna_microbial_reads, rna_microbial_labels = utilities.microbial_read_proportion_multiple_databases(
rna_paired_data, rna_paired_columns, rna_orphan_data)
document.write_table(["# Sample"]+rna_microbial_labels, rna_samples,
rna_microbial_reads, files.ShotGunVis.path("rna_microbial_counts",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_microbial_reads, rna_samples,
rna_microbial_labels, "RNA microbial read proportion",
files.ShotGunVis.path("rna_microbial_counts"))
#' <%= visualizations.ShotGun.captions["microbial_ratios"] %>
#' <%= table_message %>
#' ### RNA Samples Plots of Filtered Reads
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(rna_paired_data), row_labels=rna_paired_columns,
column_labels=rna_samples, title="RNA Paired end reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(rna_orphan_data), row_labels=rna_orphan_columns,
column_labels=rna_samples, title="RNA Orphan reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
| 38.330882 | 156 | 0.782851 |
49665a0b0e4dd98e4c598bd1960650361ca30dc7 | 1,604 | py | Python | Other_notebooks/Shapefile_Demo.py | gamedaygeorge/datacube-applications-library | 1b6314ee3465f9f17930391a4c241e981a9e200e | [
"Apache-2.0"
] | null | null | null | Other_notebooks/Shapefile_Demo.py | gamedaygeorge/datacube-applications-library | 1b6314ee3465f9f17930391a4c241e981a9e200e | [
"Apache-2.0"
] | null | null | null | Other_notebooks/Shapefile_Demo.py | gamedaygeorge/datacube-applications-library | 1b6314ee3465f9f17930391a4c241e981a9e200e | [
"Apache-2.0"
] | 1 | 2021-02-25T14:19:05.000Z | 2021-02-25T14:19:05.000Z | # Code behind module for Shapefile_Demo.ipynb
################################
##
## Import Statments
##
################################
# Import standard Python modules
import sys
import datacube
import numpy as np
import fiona
import xarray as xr
from rasterio.features import geometry_mask
import shapely
from shapely.ops import transform
from shapely.geometry import shape
from functools import partial
import pyproj
################################
##
## Function Definitions
##
################################
def shapefile_mask(dataset: xr.Dataset, shapefile) -> np.array:
"""Extracts a mask from a shapefile using dataset latitude and longitude extents.
Args:
dataset (xarray.Dataset): The dataset with the latitude and longitude extents.
shapefile (string): The shapefile to be used for extraction.
Returns:
A boolean mask array.
"""
with fiona.open(shapefile, 'r') as source:
collection = list(source)
geometries = []
for feature in collection:
geom = shape(feature['geometry'])
project = partial(
pyproj.transform,
pyproj.Proj(init=source.crs['init']), # source crs
pyproj.Proj(init='epsg:4326')) # destination crs
geom = transform(project, geom) # apply projection
geometries.append(geom)
geobox = dataset.geobox
mask = geometry_mask(
geometries,
out_shape=geobox.shape,
transform=geobox.affine,
all_touched=True,
invert=True)
return mask | 28.642857 | 86 | 0.596633 |
496664e291d159374bf05460e543caa52023fc6f | 272 | py | Python | Day 02/Day 02.1.py | Mraedis/AoC2021 | 4d198f8b227ce8f8f2f3fd2fed9396d7898e9d2a | [
"Apache-2.0"
] | 1 | 2021-11-30T22:41:09.000Z | 2021-11-30T22:41:09.000Z | Day 02/Day 02.1.py | Mraedis/AoC2021 | 4d198f8b227ce8f8f2f3fd2fed9396d7898e9d2a | [
"Apache-2.0"
] | null | null | null | Day 02/Day 02.1.py | Mraedis/AoC2021 | 4d198f8b227ce8f8f2f3fd2fed9396d7898e9d2a | [
"Apache-2.0"
] | null | null | null | linelist = [line for line in open('Day 02.input').readlines()]
hor = 0
dep = 0
for line in linelist:
mov, amount = line.split(' ')
if mov == 'forward':
hor += int(amount)
else:
dep += int(amount) * (-1 if mov == 'up' else 1)
print(hor * dep)
| 20.923077 | 62 | 0.555147 |
49670dd8b225f1420aba3e75006477f4bfe63dc1 | 1,879 | py | Python | examples/simple1d/driver.py | michael-a-hansen/jalapeno | 2f0d47467a78395e42854e11abebcf1b7721e0be | [
"MIT"
] | 1 | 2019-11-09T15:13:38.000Z | 2019-11-09T15:13:38.000Z | examples/simple1d/driver.py | michael-a-hansen/jalapeno | 2f0d47467a78395e42854e11abebcf1b7721e0be | [
"MIT"
] | 3 | 2016-10-05T22:57:46.000Z | 2016-10-06T06:26:22.000Z | examples/simple1d/driver.py | michael-a-hansen/jalapeno | 2f0d47467a78395e42854e11abebcf1b7721e0be | [
"MIT"
] | null | null | null | '''
This example provides three examples of a simple plot of 1-D data.
1. a publication-ready single column figure, which is printed to png (600 dpi), pdf, and svg
2. a presentation-ready figure on a black background
Four steps are involved in each figure:
- load/generate the data
- construct a 1d plot (figure, axis, line series) for the spectrum
- size the figure and font
- print the figure to a pdf
'''
import jalapeno.colors.svgcolors as jc
import jalapeno.plots.plots as jpp
import jalapeno.plots.colorscheme as jpc
import numpy as np
# generate the data
x = np.linspace(0, 2*np.pi, 600)
y = np.abs(np.cos(2*x))
# make a 1d plot
fig, ax, line = jpp.make_1d_plot(linecolor=jc.darkorange,
maxx=max(x/np.pi),
maxy=1.01,
xname='x/pi',
yname='cos(2x)')
# plot the data on our 1d plot
line.set_data(x/np.pi,y)
# size the figure and print it to pdf
jpp.SquareFigure().set_size(fig)
jpp.print_fig(fig, 'xy-for-publication', ['pdf', 'png', 'svg'], dpi=600)
# make another 1d plot
fig, ax, line = jpp.make_1d_plot(colorscheme=jpc.FigColors.scheme('black'),
linecolor=jc.coral,
linewidth=4,
showgrid='off',
maxx=max(x/np.pi),
maxy=1.01,
xname='x/pi',
yname='cos(2x)')
# plot the data on our 1d plot
line.set_data(x/np.pi, y)
# size the figure and print it to pdf
jpp.SquareFigure(width=4, fontsize=12).set_size(fig)
jpp.print_fig(fig, 'xy-for-presentation', exts=['pdf']) # way 1, use print_fig and provide exts=['pdf']
jpp.print_fig_to_pdf(fig, 'xy-for-presentation') # way 2, use print_fig_to_pdf | 33.553571 | 104 | 0.581692 |
4968c48330c18edd322258fb79f85333f213b40b | 2,306 | py | Python | process/1_embed_keep_ge.py | omarmaddouri/GCNCC_1 | ec858bbe8246e4af15f7b870ca0ccafdea93d627 | [
"MIT"
] | 4 | 2020-12-03T11:57:15.000Z | 2021-12-09T05:20:44.000Z | process/1_embed_keep_ge.py | alkaidone/GCNCC | 3270b4c2d48e0090a18a0ab1df3b9fd81627029d | [
"MIT"
] | 5 | 2020-01-28T23:14:40.000Z | 2021-08-25T15:55:23.000Z | process/1_embed_keep_ge.py | alkaidone/GCNCC | 3270b4c2d48e0090a18a0ab1df3b9fd81627029d | [
"MIT"
] | 3 | 2021-11-23T05:13:27.000Z | 2021-12-30T08:12:48.000Z | from __future__ import division
from __future__ import print_function
from pathlib import Path
import sys
project_path = Path(__file__).resolve().parents[1]
sys.path.append(str(project_path))
from keras.layers import Dense, Activation, Dropout
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras.optimizers import Adam
import keras.backend as K
import numpy as np
import time
import tensorflow as tf
import os
from core.utils import *
from core.layers.graph_cnn_layer import GraphCNN
from sklearn.preprocessing import normalize
# Set random seed
seed = 123
np.random.seed(seed)
tf.random.set_seed(seed)
# Settings
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'brc_microarray_usa', 'Dataset string.')
flags.DEFINE_string('embedding_method', 'ge', 'Name of the embedding method.')
#Check dataset availability
if not os.path.isdir("{}/data/parsed_input/{}".format(project_path, FLAGS.dataset)):
sys.exit("{} dataset is not available under data/parsed_input/".format(FLAGS.dataset))
if not os.path.isdir("{}/data/output/{}/embedding/{}".format(project_path, FLAGS.dataset, FLAGS.embedding_method)):
os.makedirs("{}/data/output/{}/embedding/{}".format(project_path, FLAGS.dataset, FLAGS.embedding_method))
print("--------------------------------------------")
print("--------------------------------------------")
print("Hyper-parameters:")
print("Dataset: {}".format(FLAGS.dataset))
print("Embedding method: {}".format(FLAGS.embedding_method))
print("--------------------------------------------")
print("--------------------------------------------")
# Prepare Data
X, A, Y = load_training_data(dataset=FLAGS.dataset)
Y_train, Y_val, Y_test, train_idx, val_idx, test_idx, train_mask = get_splits_for_learning(Y, dataset=FLAGS.dataset)
# Normalize gene expression
X = normalize(X, norm='l1') #for positive non-zero entries, it's equivalent to: X /= X.sum(1).reshape(-1, 1)
#Save the node emmbeddings
np.savetxt("{}/data/output/{}/embedding/{}/embeddings.txt".format(project_path, FLAGS.dataset, FLAGS.embedding_method), X, delimiter="\t")
print("Embeddings saved in /data/output/{}/embedding/{}/embeddings.txt".format(FLAGS.dataset, FLAGS.embedding_method)) | 38.433333 | 139 | 0.685603 |
496956f2c24db98208ca44b218ea029a5dcff3f8 | 832 | py | Python | docs/modelserving/detect/aif/germancredit/simulate_predicts.py | chinhuang007/website | c5324e9ee3e7f202c226836de0aca9ebd33b61b2 | [
"Apache-2.0"
] | 1,146 | 2019-03-27T21:14:34.000Z | 2021-09-22T08:36:46.000Z | docs/modelserving/detect/aif/germancredit/simulate_predicts.py | chinhuang007/website | c5324e9ee3e7f202c226836de0aca9ebd33b61b2 | [
"Apache-2.0"
] | 1,803 | 2019-03-27T22:16:02.000Z | 2021-09-22T15:27:44.000Z | docs/modelserving/detect/aif/germancredit/simulate_predicts.py | chinhuang007/website | c5324e9ee3e7f202c226836de0aca9ebd33b61b2 | [
"Apache-2.0"
] | 573 | 2019-03-27T21:14:58.000Z | 2021-09-20T21:15:52.000Z | import sys
import json
import time
import requests
if len(sys.argv) < 3:
raise Exception("No endpoint specified. ")
endpoint = sys.argv[1]
headers = {
'Host': sys.argv[2]
}
with open('input.json') as file:
sample_file = json.load(file)
inputs = sample_file["instances"]
# Split inputs into chunks of size 15 and send them to the predict server
print("Sending prediction requests...")
time_before = time.time()
res = requests.post(endpoint, json={"instances": inputs}, headers=headers)
for x in range(0, len(inputs), 15):
query_inputs = inputs[x: x+20]
payload = {"instances": query_inputs}
res = requests.post(endpoint, json=payload, headers=headers)
print(res)
if not res.ok:
res.raise_for_status()
print("TIME TAKEN: ", time.time() - time_before)
print("Last response: ", res.json())
| 25.212121 | 74 | 0.689904 |
496a50df8aab51c7339b1ff94cb179d8c9744ace | 1,272 | py | Python | sdc/ysdc_dataset_api/utils/serialization.py | sty61010/shifts | d3bb3086d8f2581f74644585701f4b1db4338483 | [
"Apache-2.0"
] | 156 | 2021-07-16T08:54:39.000Z | 2022-03-24T11:49:36.000Z | sdc/ysdc_dataset_api/utils/serialization.py | sty61010/shifts | d3bb3086d8f2581f74644585701f4b1db4338483 | [
"Apache-2.0"
] | 18 | 2021-07-21T14:02:46.000Z | 2022-02-26T04:07:12.000Z | sdc/ysdc_dataset_api/utils/serialization.py | sty61010/shifts | d3bb3086d8f2581f74644585701f4b1db4338483 | [
"Apache-2.0"
] | 41 | 2021-07-21T05:38:07.000Z | 2022-01-13T15:25:51.000Z | import io
import zlib
import numpy as np
def serialize_numpy(arr: np.ndarray, compress: bool = False) -> str:
"""Serializes numpy array to string with optional zlib compression.
Args:
arr (np.ndarray): Numpy array to serialize.
compress (bool, optional): Whether to compress resulting string with zlib or not.
Defaults to False.
Returns:
str: serialized string
"""
buf = io.BytesIO()
assert isinstance(arr, np.ndarray)
np.save(buf, arr)
result = buf.getvalue()
return maybe_compress(result, compress)
def deserialize_numpy(serialized_string: str, decompress: bool = False) -> np.ndarray:
"""Deserializes numpy array from compressed string.
Args:
serialized_string (str): Serialized numpy array
decompress (bool, optional): Whether to decompress string with zlib before laoding.
Defaults to False.
Returns:
np.ndarray: deserialized numpy array
"""
str = maybe_decompress(serialized_string, decompress)
buf = io.BytesIO(str)
return np.load(buf)
| 27.06383 | 91 | 0.687893 |
496b0cdd9c9c0a2581d8be6db775211985c0614c | 278 | py | Python | hubspot/discovery/crm/extensions/videoconferencing/discovery.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/discovery/crm/extensions/videoconferencing/discovery.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/discovery/crm/extensions/videoconferencing/discovery.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | import hubspot.crm.extensions.videoconferencing as api_client
from ....discovery_base import DiscoveryBase
| 30.888889 | 68 | 0.78777 |