hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d203709221f68ac2daa8de8a8dbe4b58bcd9f4f3 | 497 | py | Python | Src/StdLib/Lib/test/xmltests.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | Src/StdLib/Lib/test/xmltests.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,074 | 2016-12-07T05:02:48.000Z | 2022-03-22T02:09:11.000Z | Src/StdLib/Lib/test/xmltests.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z | # Convenience test module to run all of the XML-related tests in the
# standard library.
import sys
import test.test_support
test.test_support.verbose = 0
runtest("test.test_minidom")
runtest("test.test_pyexpat")
runtest("test.test_sax")
runtest("test.test_xml_etree")
runtest("test.test_xml_etree_c")
runtest("test.test_xmllib")
runtest("test.test_xmlrpc")
| 22.590909 | 68 | 0.748491 |
d20425193c1b51cfe42ea596643380c8747b1847 | 1,275 | py | Python | memcnn/experiment/tests/test_factory.py | classner/memcnn | 107ea40945b2b0d312d05cab5b78633e5f977a52 | [
"MIT"
] | 224 | 2018-03-03T02:46:54.000Z | 2022-02-12T14:33:56.000Z | memcnn/experiment/tests/test_factory.py | classner/memcnn | 107ea40945b2b0d312d05cab5b78633e5f977a52 | [
"MIT"
] | 62 | 2018-04-28T01:25:14.000Z | 2021-11-25T13:20:57.000Z | memcnn/experiment/tests/test_factory.py | classner/memcnn | 107ea40945b2b0d312d05cab5b78633e5f977a52 | [
"MIT"
] | 25 | 2018-04-20T18:08:12.000Z | 2022-02-03T22:13:44.000Z | import pytest
import os
import memcnn.experiment.factory
from memcnn.config import Config
| 37.5 | 104 | 0.741176 |
d2045b61e5e8006918d4654b503671b6d4cfdf28 | 303 | py | Python | source/bluetooth/test_search_serial_port.py | Takahiro55555/CameraSystem | 53a77b7a7bd0c34b486d73af8ef2a49201a0bdaa | [
"MIT"
] | 1 | 2019-12-03T05:28:35.000Z | 2019-12-03T05:28:35.000Z | source/bluetooth/test_search_serial_port.py | Takahiro55555/CameraSystem | 53a77b7a7bd0c34b486d73af8ef2a49201a0bdaa | [
"MIT"
] | 88 | 2019-07-01T09:11:35.000Z | 2021-09-08T01:13:16.000Z | source/bluetooth/test_search_serial_port.py | Takahiro55555/CameraSystem | 53a77b7a7bd0c34b486d73af8ef2a49201a0bdaa | [
"MIT"
] | 5 | 2019-05-22T06:44:38.000Z | 2019-09-18T05:20:30.000Z | """
@file: test_search_serial_port.py
@author: Futa HIRAKOBA
@brief: search_serial_port.py
"""
from search_serial_port import search_com_ports, search_enabled_com_port
| 18.9375 | 72 | 0.808581 |
d2054031cc7f367ae05b0c0f073e7b256fa4a564 | 238 | py | Python | Aula 01/ConversaoMedidas.py | eduardojpsena/EstruturaDeDados-Python-IESP | 97c22fc1411dfdae2d1085e9a3ca0c334ee07988 | [
"MIT"
] | null | null | null | Aula 01/ConversaoMedidas.py | eduardojpsena/EstruturaDeDados-Python-IESP | 97c22fc1411dfdae2d1085e9a3ca0c334ee07988 | [
"MIT"
] | null | null | null | Aula 01/ConversaoMedidas.py | eduardojpsena/EstruturaDeDados-Python-IESP | 97c22fc1411dfdae2d1085e9a3ca0c334ee07988 | [
"MIT"
] | null | null | null | print("---CONVERSO DE MEDIDAS---")
valor_metros = float(input("Informe o valor em metros ser convertido: "))
valor_centimetros = valor_metros * 100
print("{} metros equivale a {} centimetros.".format(valor_metros, valor_centimetros)) | 39.666667 | 85 | 0.747899 |
d2057f4c0253aa5e357b86320d8d2148ad029e12 | 385 | py | Python | src/easymql/__init__.py | vivek-shrikhande/easy-mql | 8cbf6a77aed8230bd92cee5585227ea4a09001b8 | [
"MIT"
] | null | null | null | src/easymql/__init__.py | vivek-shrikhande/easy-mql | 8cbf6a77aed8230bd92cee5585227ea4a09001b8 | [
"MIT"
] | null | null | null | src/easymql/__init__.py | vivek-shrikhande/easy-mql | 8cbf6a77aed8230bd92cee5585227ea4a09001b8 | [
"MIT"
] | null | null | null | from pyparsing import ParseException
from easymql.exc import EasyMQLSyntaxError
from easymql.pipeline import Pipeline, encode
| 29.615385 | 85 | 0.724675 |
d205e00637b9718f14c4962c0430f40c178683e5 | 266 | py | Python | src/guildapi.py | nsde/discord-guildapi | b1303423e74c1370498e594429f3bf4aeae4ee95 | [
"MIT"
] | null | null | null | src/guildapi.py | nsde/discord-guildapi | b1303423e74c1370498e594429f3bf4aeae4ee95 | [
"MIT"
] | null | null | null | src/guildapi.py | nsde/discord-guildapi | b1303423e74c1370498e594429f3bf4aeae4ee95 | [
"MIT"
] | null | null | null | import requests
import json | 29.555556 | 90 | 0.733083 |
d2066abfbaca62c1d5be55ef5d80f560df075d0a | 409 | py | Python | smarthome/smarthomeproj/server/migrations/0011_auto_20210122_0256.py | nunocaseiro/smarthome-server-django | 711db6ff360061d861d9985264f753e0f7846327 | [
"Apache-2.0"
] | null | null | null | smarthome/smarthomeproj/server/migrations/0011_auto_20210122_0256.py | nunocaseiro/smarthome-server-django | 711db6ff360061d861d9985264f753e0f7846327 | [
"Apache-2.0"
] | null | null | null | smarthome/smarthomeproj/server/migrations/0011_auto_20210122_0256.py | nunocaseiro/smarthome-server-django | 711db6ff360061d861d9985264f753e0f7846327 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.3 on 2021-01-22 02:56
from django.db import migrations, models
| 21.526316 | 70 | 0.613692 |
d207522acb3ce4394972c46c3f9f025ef3ebed35 | 683 | py | Python | p2/core/tasks.py | BeryJu/p2 | 80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27 | [
"MIT"
] | null | null | null | p2/core/tasks.py | BeryJu/p2 | 80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27 | [
"MIT"
] | null | null | null | p2/core/tasks.py | BeryJu/p2 | 80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27 | [
"MIT"
] | null | null | null | """p2 core tasks"""
from p2.core.celery import CELERY_APP
from p2.lib.reflection import path_to_class
| 32.52381 | 72 | 0.648609 |
d207656cad5f592cc3b1825bcd0b8c7607785174 | 4,463 | py | Python | tests/keras_contrib/layers/test_convolutional.py | rgreenblatt/keras-contrib | 46fcdb9384b3bc9399c651b2b43640aa54098e64 | [
"MIT"
] | 7 | 2017-07-22T09:05:44.000Z | 2019-04-30T02:08:04.000Z | tests/keras_contrib/layers/test_convolutional.py | rgreenblatt/keras-contrib | 46fcdb9384b3bc9399c651b2b43640aa54098e64 | [
"MIT"
] | 1 | 2017-12-26T02:59:59.000Z | 2017-12-26T02:59:59.000Z | tests/keras_contrib/layers/test_convolutional.py | rgreenblatt/keras-contrib | 46fcdb9384b3bc9399c651b2b43640aa54098e64 | [
"MIT"
] | 11 | 2017-07-06T14:11:51.000Z | 2021-08-21T23:18:20.000Z | import pytest
import numpy as np
import itertools
from numpy.testing import assert_allclose
from keras_contrib.utils.test_utils import layer_test, keras_test
from keras.utils.conv_utils import conv_input_length
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib.layers import convolutional, pooling
from keras.models import Sequential
# TensorFlow does not support full convolution.
if K.backend() == 'theano':
_convolution_border_modes = ['valid', 'same']
else:
_convolution_border_modes = ['valid', 'same']
if __name__ == '__main__':
pytest.main([__file__])
| 37.191667 | 95 | 0.538203 |
d20883f007efa4a112403e5dc5f0370600e053b9 | 8,131 | py | Python | superpyrate/task_countfiles.py | willu47/superpyrate | 60ce6f98a00cac418f62ccac9a194023a4f4b37a | [
"MIT"
] | null | null | null | superpyrate/task_countfiles.py | willu47/superpyrate | 60ce6f98a00cac418f62ccac9a194023a4f4b37a | [
"MIT"
] | null | null | null | superpyrate/task_countfiles.py | willu47/superpyrate | 60ce6f98a00cac418f62ccac9a194023a4f4b37a | [
"MIT"
] | null | null | null | """Holds the luigi tasks which count the number of rows in the files
Records the number of clean and dirty rows in the AIS data,
writing these stats to the database and finally producing a report of the
statistics
1. Count the number of rows in the raw csv files (in ``files/unzipped/<archive>``)
2. Count the number of rows int the clean csv files (in ``files/cleancsv/``)
3. Write the clean rows in the clean column of ais_sources
4. Write the dirty (raw - clean) rows into the dirty column of ais_sources
"""
import luigi
from luigi.util import requires
from luigi.contrib.external_program import ExternalProgramTask
from luigi.postgres import CopyToTable, PostgresQuery
from superpyrate.pipeline import get_environment_variable, ProcessZipArchives, \
GetZipArchive, get_working_folder, \
RunQueryOnTable, GetCsvFile
from plumbum.cmd import wc
from glob import glob
import os
import logging
LOGGER = logging.getLogger(__name__)
logging.basicConfig(filename='reporting.log',
level=logging.DEBUG,
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
class DoIt(luigi.Task):
"""
"""
folder_of_zips = luigi.Parameter(significant=True)
with_db = luigi.BoolParameter(significant=False)
| 39.663415 | 101 | 0.615422 |
d20a84d94f2ed93364b818533786034015f7b86f | 1,917 | py | Python | pyquil/api/__init__.py | stjordanis/pyquil | 36987ecb78d5dc85d299dd62395b7669a1cedd5a | [
"Apache-2.0"
] | 677 | 2017-01-09T23:20:22.000Z | 2018-11-26T10:57:49.000Z | pyquil/api/__init__.py | stjordanis/pyquil | 36987ecb78d5dc85d299dd62395b7669a1cedd5a | [
"Apache-2.0"
] | 574 | 2018-11-28T05:38:40.000Z | 2022-03-23T20:38:28.000Z | pyquil/api/__init__.py | stjordanis/pyquil | 36987ecb78d5dc85d299dd62395b7669a1cedd5a | [
"Apache-2.0"
] | 202 | 2018-11-30T06:36:28.000Z | 2022-03-29T15:38:18.000Z | ##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Sub-package for facilitating connections to the QVM / QPU.
"""
__all__ = [
"AbstractCompiler",
"BenchmarkConnection",
"EncryptedProgram",
"EngagementManager",
"get_qc",
"list_quantum_computers",
"local_forest_runtime",
"QAM",
"QAMExecutionResult",
"QCSClientConfiguration",
"QCSQuantumProcessor",
"QPU",
"QPUCompiler",
"QuantumComputer",
"QuantumExecutable",
"QVM",
"QVMCompiler",
"WavefunctionSimulator",
]
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._benchmark import BenchmarkConnection
from pyquil.api._compiler import QVMCompiler, QPUCompiler, QuantumExecutable, EncryptedProgram, AbstractCompiler
from pyquil.api._engagement_manager import EngagementManager
from pyquil.api._qam import QAM, QAMExecutionResult
from pyquil.api._qpu import QPU
from pyquil.api._quantum_computer import (
QuantumComputer,
list_quantum_computers,
get_qc,
local_forest_runtime,
)
from pyquil.api._qvm import QVM
from pyquil.api._wavefunction_simulator import WavefunctionSimulator
from pyquil.quantum_processor import QCSQuantumProcessor
| 33.631579 | 112 | 0.691706 |
d20aad59d161f70830e20fabfe7cc1b0d6c4b1b9 | 946 | py | Python | nodes/makeblock_ros_one.py | to4dy/makeblock-ros | 12b58195c9be3cc95c6398704a17ceb3a841813e | [
"MIT"
] | 7 | 2017-12-17T00:45:07.000Z | 2022-03-11T10:25:54.000Z | nodes/makeblock_ros_one.py | to4dy/makeblock-ros | 12b58195c9be3cc95c6398704a17ceb3a841813e | [
"MIT"
] | null | null | null | nodes/makeblock_ros_one.py | to4dy/makeblock-ros | 12b58195c9be3cc95c6398704a17ceb3a841813e | [
"MIT"
] | 3 | 2016-06-21T05:45:24.000Z | 2017-04-19T18:48:31.000Z | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import Float32
from megapi import *
from makeblock_ros.srv import *
bot = None
pub = rospy.Publisher('makeblock_ros_ultrasensor', Float32, queue_size=1)
s = rospy.Service('makeblock_ros_move_motors', MakeBlockMover,
handle_makeblock_motors)
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| 20.12766 | 73 | 0.662791 |
d20acbdc55dd2187f4e70d6f0f36211cc6ddf2d9 | 9,347 | py | Python | bets-templates.py | longnow/longview | 9345faacec64f427eab43790abc165af6a572e3d | [
"BSD-2-Clause"
] | 82 | 2015-01-23T04:20:31.000Z | 2022-02-18T22:33:53.000Z | bets-templates.py | longnow/longview | 9345faacec64f427eab43790abc165af6a572e3d | [
"BSD-2-Clause"
] | 2 | 2015-03-27T22:24:46.000Z | 2017-02-20T08:19:12.000Z | bets-templates.py | longnow/longview | 9345faacec64f427eab43790abc165af6a572e3d | [
"BSD-2-Clause"
] | 7 | 2015-06-04T20:37:02.000Z | 2021-03-10T02:41:08.000Z | # Copyright (c) 2004, The Long Now Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# HTML template substitutions
#
# %n - nodeId (aka item number)
# %t - title
# %d - date string
# %1 [...] - positional arguments
# The HTML template used for a popup.
popupTemplate = """
<div class="node" id="node%n" onmouseout="javascript:hideNode('%n')">
<table cellpadding="0" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp">
BET<br><span class="txt">%n</span></td>
<td class="exp" align="right">
%d
</td>
</tr>
</table>
<div class="txt-sm">
%1</div>
<table cellpadding="3" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp" align="right">
AGREE
</td>
<td class="txt" align="left">
%2
</td>
</tr>
<tr>
<td class="exp" align="right">
DISAGREE
</td>
<td class="txt" align="left">
%3
</td>
</tr>
<tr>
<td class="exp" align="right">
STAKES
</td>
<td class="txt" align="left">
%4
</td>
</tr>
</table>
</div>
"""
notifyTemplate = """
<div class="node" id="node%n" onmouseout="javascript:hideNode('%n')">
<table cellpadding="0" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp">
BET<br><span class="txt">%1</span></td>
<td class="exp" align="right">
REMEMBER AND REMIND
</td>
</tr>
</table>
<div class="txt-sm">
%2</div>
<table cellpadding="3" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp" align="center">
%3
</td>
</tr>
</table>
</div>
"""
# this string gets written out in its entirety to styles.css
stylesheets = """
/* for the whole page, unless overridden */
body {
padding: 0;
margin: 0;
background-image: url("./img-static/bg.jpg");
}
/* Long Bets specific styles */
.exp {
font-size: 11px;
font-family: Verdana, Helvetica, sans-serif;
}
.txt-lg {
font-size: 16px;
font-family: Georgia, Times, serif;
}
.txt {
font-size: 14px;
font-family: Georgia, Times, serif;
}
.txt-sm {
font-size: 11px;
font-family: Georgia, Times, serif;
}
.txt-lt {
font-size: 14px;
font-family: Georgia, Times, serif;
color: #666666;
}
.node .txt-sm {
padding: 5px 0;
font-size: 12px;
}
.key {
width: 664px;
margin: 10px 0;
border: #ccc 1px solid;
}
.key td {
padding: 1px;
font-size: 11px;
width: 50%;
font-family: Verdana, Helvetica, sans-serif;
text-align: center;
}
/* links that have not been visited */
a:link {
color: #930;
text-decoration: none;
}
/* links that have already been visited */
a:visited {
color: #930;
text-decoration: none;
}
/* applied to a link when the cursor is hovering over it */
a:hover {
color: #c63;
text-decoration: underline;
}
/* the table at the very top of the page containing the logo image */
.logotable {
width: 100%; /* percent of the browser window occupied by the table */
margin: 0px;
padding: 0px;
}
/* the table data cell which contains the logo image */
.logo {
text-align: right;
background-color: #000;
border-bottom: 1px solid #996;
}
/* the table containing the title and navbar */
.titleandnav {
width: 100%; /* percent of the browser window occupied by the table */
}
/* the title cell itself */
.titlecell {
padding: 6px 10px; /* first value: top & bottom; second: left & right */
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 16px;
border-top: 1px solid #996;
border-bottom: 1px solid #996;
color: #666;
}
/* the table cell which holds the navigation bar & surrounding whitespace */
.navcell {
text-align: center;
vertical-align: middle;
padding-left: 15px;
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 10px;
color: #666;
}
/* table which holds the navigation bar & horizontal whitespace, but no
* vertical whitespace */
.navtable {
margin-left: auto;
margin-right: auto;
}
/* the dates on both ends of the navigation bar */
.navlabel {
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 10px;
padding: 4px;
}
/* table cell that holds the "Long View Powered" image */
.power {
padding-left: 15px;
padding-right: 5px;
text-align: right;
}
/* row of dates labeling the X-axis of the timeline, at the top */
.ytabletop {
border-bottom: 1px dotted #996;
}
/* cell containing an individual date label on the X-axis of the timeline */
.ycell {
text-align: center;
vertical-align: top;
padding: 0;
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 10px;
}
/* row of dates labeling the X-axis of the timeline, at the bottom */
.ytablebottom {
border-top: 1px dotted #996;
border-bottom: 1px solid #996;
}
/* table cell containing "Past", "Now", and "Future" at the top of the */
/* timeline*/
.pastnowcell {
text-align: right;
padding: 0;
}
/* the table containing the body of the timeline */
#datatable {
border-top: 1px #ddd solid;
border-right: 1px #ddd solid;
background-image: url('./img-generated/timeline-bg.png');
}
/* the background of each timeline bar */
.data {
padding-top: 1px;
padding-bottom: 1px;
background-position: 200px;
background-repeat: repeat-x;
}
/* the block that contains all of the timeline labels on the left side of
* the screen. */
#labels {
position: absolute;
top: 26px;
z-index: 3;
}
/* cell containing a single label on the left side of the screen */
.labelscell {
font-size: 10px;
font-weight: normal;
font-family: verdana, helvetica, arial, sans-serif; /* in order of desirability */
color: #999;
padding-top: 3px;
border: 0;
}
/* the popups themselves */
.node {
position: absolute;
visibility: hidden;
color: #333;
width: 200px;
z-index: 5;
border: 1px solid #999;
background-image: url(./img-static/popup-bg.gif);
padding: 6px;
}
/* The body of the popups (eg the HTML inside the table) */
.popupcell {
font-size: 10px;
font-weight: normal;
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
}
/* Popup titles */
.popuptitle {
font-size: 12px;
}
"""
# override the default header top matter from the lvhtml module
headerTop = """<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%s</title>
<link rel="stylesheet" href="./styles.css" />
<script language="javascript" type="text/javascript" src="./rollover.js"></script>
</head>
<body onload="loadimgs();">
<img src="./img-static/no.gif" alt="" width="1" height="25" border="0"><br>
<div align="center">
<table cellpadding="0" cellspacing="0" border="0" width="664">
<tr>
<td colspan="3">
<img src="./img-static/timeline.gif" alt="Timeline" width="664" height="38" border="0"></td>
</tr>
<tr>
<td class="exp" nowrap>
<img src="./img-static/no.gif" alt="" width="5" height="1" border="0">
<span class="txt"><b>%s</b></span><br>
<!-- longview.py unused value hack: %s - %s -->
« On the Record: <a href="http://www.longbets.com/bets" target="_top">Bets</a> | <a href="http://www.longbets.com/predictions" target="_top">Predictions</a></td>
<td class="navcell" align="right" nowrap>
<table class="navtable" cellpadding="0" cellspacing="0" border="0">
<tr>
<td class="navlabel">
%s</td>
<td nowrap="nowrap">\n"""
# another override
headerBottom = """</td>
<td class="navlabel">%s</td>
</tr>
</table></td>
<td class="power"><img src="img-static/longview-power.gif" alt="Powered by Long View" width="89" height="22" border="0" /></td>
</td>
</tr>
</table>
<table class="key">
<tr>
<td>
Votes: YES <img src="img-generated/key1.png" alt="" width="65" height="12"> NO</td>
<td>
Discussion Intensity: LESS <img src="img-generated/key2.png" alt="" width="65" height="12"> MORE</td>
</tr>
</table>
</div>
</body>
</html>
"""
| 23.484925 | 167 | 0.642559 |
d20af14dd3e3f451b0c30965586bb3662c6ee4a4 | 768 | py | Python | ansible/roles/kraken.config/filter_plugins/expand_config.py | yenicapotediaz/k2 | 90aeb6efd77371c388b1429fc443aa30673c7787 | [
"Apache-2.0"
] | 85 | 2016-10-06T23:15:14.000Z | 2017-09-15T00:52:25.000Z | ansible/roles/kraken.config/filter_plugins/expand_config.py | yenicapotediaz/k2 | 90aeb6efd77371c388b1429fc443aa30673c7787 | [
"Apache-2.0"
] | 739 | 2016-09-19T21:48:58.000Z | 2017-09-15T17:46:52.000Z | ansible/roles/kraken.config/filter_plugins/expand_config.py | yenicapotediaz/k2 | 90aeb6efd77371c388b1429fc443aa30673c7787 | [
"Apache-2.0"
] | 47 | 2016-09-22T21:32:12.000Z | 2017-09-14T21:00:53.000Z | import copy, os
from ansible import errors
| 26.482759 | 70 | 0.669271 |
d20cfcc3e7e361f935e2feabc8a3b8078a59377a | 2,514 | py | Python | jaxopt/_src/loop.py | ianwilliamson/jaxopt | 0ff6be8094aacb3bf5472a41d780e3f56fc8e0f8 | [
"Apache-2.0"
] | 2 | 2021-10-04T15:20:55.000Z | 2021-10-05T08:52:46.000Z | jaxopt/_src/loop.py | ianwilliamson/jaxopt | 0ff6be8094aacb3bf5472a41d780e3f56fc8e0f8 | [
"Apache-2.0"
] | null | null | null | jaxopt/_src/loop.py | ianwilliamson/jaxopt | 0ff6be8094aacb3bf5472a41d780e3f56fc8e0f8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loop utilities."""
import jax
import jax.numpy as jnp
def _while_loop_scan(cond_fun, body_fun, init_val, max_iter):
"""Scan-based implementation (jit ok, reverse-mode autodiff ok)."""
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def _while_loop_python(cond_fun, body_fun, init_val, maxiter):
"""Python based implementation (no jit, reverse-mode autodiff ok)."""
val = init_val
for _ in range(maxiter):
cond = cond_fun(val)
if not cond:
# When condition is met, break (not jittable).
break
val = body_fun(val)
return val
def _while_loop_lax(cond_fun, body_fun, init_val, maxiter):
"""lax.while_loop based implementation (jit by default, no reverse-mode)."""
return jax.lax.while_loop(_cond_fun, _body_fun, (0, init_val))[1]
def while_loop(cond_fun, body_fun, init_val, maxiter, unroll=False, jit=False):
"""A while loop with a bounded number of iterations."""
if unroll:
if jit:
fun = _while_loop_scan
else:
fun = _while_loop_python
else:
if jit:
fun = _while_loop_lax
else:
raise ValueError("unroll=False and jit=False cannot be used together")
if jit and fun is not _while_loop_lax:
# jit of a lax while_loop is redundant, and this jit would only
# constrain maxiter to be static where it is not required.
fun = jax.jit(fun, static_argnums=(0, 1, 3))
return fun(cond_fun, body_fun, init_val, maxiter)
| 30.289157 | 79 | 0.699682 |
d20e5a4fd52895393eb34015d45cba3558f08f7a | 8,407 | py | Python | official/recommendation/model_runner.py | decster/models | 82e783e3172f254b62dc4af08987754ebb7c348c | [
"Apache-2.0"
] | 3 | 2018-10-31T02:16:47.000Z | 2018-11-06T09:11:37.000Z | official/recommendation/model_runner.py | decster/models | 82e783e3172f254b62dc4af08987754ebb7c348c | [
"Apache-2.0"
] | null | null | null | official/recommendation/model_runner.py | decster/models | 82e783e3172f254b62dc4af08987754ebb7c348c | [
"Apache-2.0"
] | 1 | 2020-01-21T17:39:55.000Z | 2020-01-21T17:39:55.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains NcfModelRunner, which can train and evaluate an NCF model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import time
import tensorflow as tf
from tensorflow.contrib.compiler import xla
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
| 40.418269 | 93 | 0.692875 |
d20eb1e22a6672296afae7cc1ca61eef92581ba3 | 53,916 | py | Python | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/amberPrmTop.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/amberPrmTop.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/amberPrmTop.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | ## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
############################################################################
#
# Author: Ruth HUEY, Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2001
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/MolKit/amberPrmTop.py,v 1.32 2007/07/24 17:30:40 vareille Exp $
#
# $Id: amberPrmTop.py,v 1.32 2007/07/24 17:30:40 vareille Exp $
#
#from MolKit.molecule import Atom, AtomSet, Bond
from sff.amber import AmberParm
import numpy.oldnumeric as Numeric, types
from math import pi, sqrt, ceil, fabs
from string import split, strip, join
from os.path import basename
from MolKit.data.all_amino94_dat import all_amino94_dat
from MolKit.data.all_aminont94_dat import all_aminont94_dat
from MolKit.data.all_aminoct94_dat import all_aminoct94_dat
if __name__ == '__main__':
# load a protein and build bonds
from MolKit import Read
p = Read('sff/testdir/p1H.pdb')
p[0].buildBondsByDistance()
# build an Amber parameter description objects
from MolKit.amberPrmTop import ParameterDict
pd = ParameterDict()
from MolKit.amberPrmTop import Parm
prm = Parm()
prm.processAtoms(p.chains.residues.atoms)
| 34.037879 | 108 | 0.476167 |
d20f67ca5ace0109a27cb8bee9fd7724ffdbb6df | 2,342 | py | Python | main_model/example.py | benmaier/DigCT | 62fc3fddb7600e2a43761e08618b2e3df423569c | [
"MIT"
] | null | null | null | main_model/example.py | benmaier/DigCT | 62fc3fddb7600e2a43761e08618b2e3df423569c | [
"MIT"
] | null | null | null | main_model/example.py | benmaier/DigCT | 62fc3fddb7600e2a43761e08618b2e3df423569c | [
"MIT"
] | 1 | 2021-07-12T13:50:35.000Z | 2021-07-12T13:50:35.000Z | import numpy as np
from simulation import simulation_code
from tqdm import tqdm
np.random.seed(981736)
N = 10_000
n_meas = 100
kwargs = dict(
N = N,
q = 0.3,
a = 0.3,
R0 = 2.5,
quarantiningS = True,
parameter = {
'chi':1/2.5,
'recovery_rate' : 1/7,
'alpha' : 1/3,
'beta' : 1/2,
'number_of_contacts' : 20,
'x':0.17,
'I_0' : N*0.01,
'omega':1/10,
"y" : 0.1,
"z": 0.64,
"R0": 2.5,
"network_model":'er_network',
},
sampling_dt = 1,
time = 1e7,
)
import matplotlib.pyplot as pl
results_tracing = []
results_no_trac = []
for meas in tqdm(range(n_meas)):
kwargs['a'] = 0.3
_, result0 = simulation_code(kwargs)
result0 = get_epidemic(result0)
kwargs['a'] = 0.0
_, result1 = simulation_code(kwargs)
result1 = get_epidemic(result1)
results_tracing.append(result0)
results_no_trac.append(result1)
results_tracing = np.array(make_equal_length(results_tracing))
results_no_trac = np.array(make_equal_length(results_no_trac))
t0 = np.arange(np.shape(results_tracing)[1])
t1 = np.arange(np.shape(results_no_trac)[1])
mn0 = np.mean(results_tracing,axis=0)
mn1 = np.mean(results_no_trac,axis=0)
err0 = np.std(results_tracing,axis=0)
err1 = np.std(results_no_trac,axis=0)
err0low, md0, err0high = np.percentile(results_tracing,[25,50,75],axis=0)
err1low, md1, err1high = np.percentile(results_no_trac,[25,50,75],axis=0)
pl.plot(t0, md0, label='with tracing (a=0.3)')
pl.plot(t1, md1, label='without tracing')
pl.fill_between(t0, err0low, err0high, alpha=0.2)
pl.fill_between(t1, err1low, err1high, alpha=0.2)
pl.xlabel('time [d]')
pl.ylabel('prevalence')
pl.legend()
pl.gcf().savefig('example.png',dpi=300)
pl.show()
| 23.897959 | 79 | 0.62041 |
d2106f01efc43255c99ac9c1592bee8f1c926386 | 5,480 | py | Python | app/migrations/0002_appointment_doctor_patient_person_receptionist.py | sairamBikkina/sdp1 | e48cb01e8100259f95c16911f5fe6f843313464e | [
"MIT"
] | 5 | 2020-08-06T07:16:00.000Z | 2022-01-20T22:07:58.000Z | app/migrations/0002_appointment_doctor_patient_person_receptionist.py | sairamBikkina/sdp1 | e48cb01e8100259f95c16911f5fe6f843313464e | [
"MIT"
] | 2 | 2020-10-04T13:58:24.000Z | 2020-10-04T14:00:35.000Z | app/migrations/0002_appointment_doctor_patient_person_receptionist.py | sairamBikkina/sdp1 | e48cb01e8100259f95c16911f5fe6f843313464e | [
"MIT"
] | 3 | 2020-10-03T07:19:52.000Z | 2021-10-05T07:15:30.000Z | # Generated by Django 3.0.5 on 2020-05-24 10:19
import datetime
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| 34.683544 | 87 | 0.387044 |
d210d3e4fc7f26c1bc84d6a2851b1aad30445d94 | 2,185 | py | Python | notebook/datetime_fromisoformat.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/datetime_fromisoformat.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/datetime_fromisoformat.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import datetime
s = '2018-12-31'
d = datetime.date.fromisoformat(s)
print(d)
# 2018-12-31
print(type(d))
# <class 'datetime.date'>
# print(datetime.date.fromisoformat('2018-12'))
# ValueError: Invalid isoformat string: '2018-12'
print(datetime.date.fromisoformat('2018-01-01'))
# 2018-01-01
# print(datetime.date.fromisoformat('2018-1-1'))
# ValueError: Invalid isoformat string: '2018-1-1'
s = '05:00:30.001000'
t = datetime.time.fromisoformat(s)
print(t)
# 05:00:30.001000
print(type(t))
# <class 'datetime.time'>
print(datetime.time.fromisoformat('05'))
# 05:00:00
# print(datetime.time.fromisoformat('5:00:30'))
# ValueError: Invalid isoformat string: '5:00:30'
s = '2018-12-31T05:00:30.001000'
dt = datetime.datetime.fromisoformat(s)
print(dt)
# 2018-12-31 05:00:30.001000
print(type(dt))
# <class 'datetime.datetime'>
print(datetime.datetime.fromisoformat('2018-12-31x05:00:30.001000'))
# 2018-12-31 05:00:30.001000
# print(datetime.datetime.fromisoformat('2018-12-31xx05:00:30.001000'))
# ValueError: Invalid isoformat string: '2018-12-31xx05:00:30.001000'
print(datetime.datetime.fromisoformat('2018-12-31T05'))
# 2018-12-31 05:00:00
print(datetime.datetime.fromisoformat('2018-12-31'))
# 2018-12-31 00:00:00
# print(datetime.datetime.fromisoformat('2018-12-31T5:00'))
# ValueError: Invalid isoformat string: '2018-12-31T5:00'
s = '2018-12-31T05:00:30.001000'
# print(datetime.date.fromisoformat(s))
# ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000'
# print(datetime.time.fromisoformat(s))
# ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000'
d = datetime.datetime.fromisoformat(s).date()
print(d)
# 2018-12-31
print(type(d))
# <class 'datetime.date'>
t = datetime.datetime.fromisoformat(s).time()
print(t)
# 05:00:30.001000
print(type(t))
# <class 'datetime.time'>
s = '2018-12-31T05:00:30'
s_basic = s.replace('-', '').replace(':', '')
print(s_basic)
# 20181231T050030
s = '2018-12-31T05:00:30.001000'
s_basic = s.split('.')[0].replace('-', '').replace(':', '')
print(s_basic)
# 20181231T050030
s_ex = datetime.datetime.strptime(s_basic, '%Y%m%dT%H%M%S').isoformat()
print(s_ex)
# 2018-12-31T05:00:30
| 20.809524 | 71 | 0.707551 |
d211994f319cdf819a2e0d0b5d58c4101deb9cd5 | 418 | py | Python | app/main/models/hello_db.py | ZenithClown/flask-docker-template | cf5949fb6f448dd73cc287842b5deb1d5f7bd321 | [
"MIT"
] | null | null | null | app/main/models/hello_db.py | ZenithClown/flask-docker-template | cf5949fb6f448dd73cc287842b5deb1d5f7bd321 | [
"MIT"
] | 41 | 2021-09-01T17:31:47.000Z | 2022-03-28T12:13:12.000Z | app/main/models/hello_db.py | ZenithClown/flask-docker-template | cf5949fb6f448dd73cc287842b5deb1d5f7bd321 | [
"MIT"
] | 1 | 2021-12-22T07:25:08.000Z | 2021-12-22T07:25:08.000Z | # -*- encoding: utf-8 -*-
from .. import db
from ._base_model import ModelSchema
| 24.588235 | 93 | 0.665072 |
d2126b69bc34d19eeaa2b4aa3508f4499874a0f2 | 3,069 | py | Python | affineTransform.py | LuBru90/Facemorphing | ddeb8b0d368d62c66a032290cd756f0e3f3d6a81 | [
"Apache-2.0"
] | null | null | null | affineTransform.py | LuBru90/Facemorphing | ddeb8b0d368d62c66a032290cd756f0e3f3d6a81 | [
"Apache-2.0"
] | null | null | null | affineTransform.py | LuBru90/Facemorphing | ddeb8b0d368d62c66a032290cd756f0e3f3d6a81 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
if __name__ == "__main__":
main()
| 38.848101 | 121 | 0.491691 |
d214f97afaf19189be80677ea4aa9be0be0607e7 | 4,219 | py | Python | demo_count.py | addtt/multi-object-datasets | d3b03ec56a9e971fed4d3519e8bfee5ed02ed9cb | [
"MIT"
] | 4 | 2020-01-06T08:50:04.000Z | 2021-12-06T08:41:13.000Z | demo_count.py | addtt/multi-object-datasets | d3b03ec56a9e971fed4d3519e8bfee5ed02ed9cb | [
"MIT"
] | 2 | 2021-06-08T20:48:25.000Z | 2021-09-08T01:35:58.000Z | demo_count.py | addtt/multi-object-datasets | d3b03ec56a9e971fed4d3519e8bfee5ed02ed9cb | [
"MIT"
] | 2 | 2020-11-19T14:20:29.000Z | 2021-01-12T12:00:44.000Z | import argparse
import os
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim.adamax import Adamax
from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset
epochs = 100
batch_size = 64
lr = 3e-4
dataset_filename = os.path.join(
'dsprites',
'multi_dsprites_color_012.npz')
# dataset_filename = os.path.join(
# 'binary_mnist',
# 'multi_binary_mnist_012.npz')
def main():
args = parse_args()
path = os.path.join('generated', args.dataset_path)
# Datasets and dataloaders
print("loading dataset...")
train_set = MultiObjectDataset(path, train=True)
test_set = MultiObjectDataset(path, train=False)
train_loader = MultiObjectDataLoader(
train_set, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = MultiObjectDataLoader(test_set, batch_size=100)
# Model and optimizer
print("initializing model...")
channels = train_set.x.shape[1]
n_classes = 3 # hardcoded for dataset with 0 to 2 objects
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Model(channels, n_classes).to(device)
optimizer = Adamax(model.parameters(), lr=lr)
# Training loop
print("training starts")
step = 0
model.train()
for e in range(1, epochs + 1):
for x, labels in train_loader:
# Run model and compute loss
loss, acc = forward(model, x, labels, device)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
if step % 100 == 0:
print("[{}] loss: {:.2g} acc: {:.2g}".format(
step, loss.item(), acc))
# Test
with torch.no_grad():
model.eval()
loss = acc = 0.
for x, labels in test_loader:
loss_, acc_ = forward(model, x, labels, device)
k = len(x) / len(test_set)
loss += loss_.item() * k
acc += acc_ * k
model.train()
print("TEST [epoch {}] loss: {:.2g} acc: {:.2g}".format(
e, loss, acc))
if __name__ == '__main__':
main()
| 27.756579 | 73 | 0.569803 |
d21501d0dc912be2f83952df41a003d90a5d9684 | 2,015 | py | Python | run_main_script.py | korombus/blender_battleVR_py | d0d0ccfabfa644fc97105e5cc99e86e37167cb55 | [
"MIT"
] | null | null | null | run_main_script.py | korombus/blender_battleVR_py | d0d0ccfabfa644fc97105e5cc99e86e37167cb55 | [
"MIT"
] | null | null | null | run_main_script.py | korombus/blender_battleVR_py | d0d0ccfabfa644fc97105e5cc99e86e37167cb55 | [
"MIT"
] | null | null | null | import bpy
import random
import math
## #############################################################
#
FILE_ROOT_PATH = 'D:/blender_battleVR_py/'
setrendr_file_name = FILE_ROOT_PATH + "setting_render.py"
magicobj_file_name = FILE_ROOT_PATH + "magic_model.py"
fieldins_file_name = FILE_ROOT_PATH + "field_model.py"
wizardob_file_name = FILE_ROOT_PATH + "wizard_model.py"
witchcft_file_name = FILE_ROOT_PATH + "witchcraft_model.py"
camerast_file_name = FILE_ROOT_PATH + "camera_setting.py"
# SE
SE_ROOT_PATH = FILE_ROOT_PATH + 'se/'
#sound_begin = (SE_ROOT_PATH + "_begin.wav", SE_ROOT_PATH + "_begin.wav")
#sound_bomb = (SE_ROOT_PATH + "_bomb.wav", SE_ROOT_PATH + "nc178345_bomb.wav")
#
IMG_ROOT_PATH = FILE_ROOT_PATH + 'img/'
witchcraft_img_name = (
IMG_ROOT_PATH + "magic_0.png",
IMG_ROOT_PATH + "magic_1.png",
IMG_ROOT_PATH + "magic_2.png",
IMG_ROOT_PATH + "magic_3.png",
IMG_ROOT_PATH + "magic_4.png"
)
#
FRAME_END = 500
##########################################################################
#
bpy.ops.object.select_all(action='SELECT')
#
bpy.ops.object.delete(True)
#
if bpy.context.scene.sequence_editor:
bpy.context.scene.sequence_editor_clear()
bpy.context.scene.sequence_editor_create()
#
bpy.data.scenes["Scene"].frame_end = FRAME_END
#
exec(compile(open(setrendr_file_name).read().replace("FILE_ROOT_PATH", FILE_ROOT_PATH), setrendr_file_name, 'exec'))
#
exec(compile(open(camerast_file_name).read(), camerast_file_name, 'exec'))
#
exec(compile(open(fieldins_file_name).read(), fieldins_file_name, 'exec'))
#
exec(compile(open(wizardob_file_name).read(), wizardob_file_name, 'exec'))
#
exec(compile(open(witchcft_file_name).read().replace("WITCHECRAFT_IMAGES", str(witchcraft_img_name)), witchcft_file_name, 'exec'))
#
exec(compile(open(magicobj_file_name).read(), magicobj_file_name, 'exec')) | 31.984127 | 130 | 0.718114 |
d215f5660d06095bfa19474e13bb492e71765463 | 2,014 | py | Python | apps/genres/tests/__init__.py | GiannisClipper/payments | 94e08144597b3f4cd0de8485edf3f5535aeb9da6 | [
"MIT"
] | null | null | null | apps/genres/tests/__init__.py | GiannisClipper/payments | 94e08144597b3f4cd0de8485edf3f5535aeb9da6 | [
"MIT"
] | null | null | null | apps/genres/tests/__init__.py | GiannisClipper/payments | 94e08144597b3f4cd0de8485edf3f5535aeb9da6 | [
"MIT"
] | null | null | null | from django.test import TestCase
import copy
from django.contrib.auth import get_user_model
from funds.models import Fund
from genres.models import Genre
from users.tests import UserCreateMethods
from funds.tests import FundCreateMethods
from users.tests import USER_SAMPLES, ADMIN_SAMPLES
from funds.tests import FUND_SAMPLES
GENRE_SAMPLES = {
# First key digit is equal to user id
11: {'user': {'id': 1}, 'fund': {'key': 11}, 'code': '1', 'name': 'INCOME',
'is_income': True}, # noqa: E127
12: {'user': {'id': 1}, 'fund': {'key': 11}, 'code': '2', 'name': 'EXPENSES',
'is_income': False}, # noqa: E127
21: {'user': {'id': 2}, 'fund': {'key': 21}, 'code': 'ES', 'name': 'ESODA',
'is_income': True}, # noqa: E127
22: {'user': {'id': 2}, 'fund': {'key': 21}, 'code': 'EX', 'name': 'EXODA',
'is_income': False}, # noqa: E127
}
| 31.968254 | 86 | 0.602781 |
d2160cde3b51571cda15a85e9fdd3c56dfb2cae0 | 4,881 | py | Python | rltorch/papers/DQN/hyperparams.py | Jjschwartz/rltorch | eeb2ad955f018d768db98c4a2be5da96a75579f6 | [
"MIT"
] | null | null | null | rltorch/papers/DQN/hyperparams.py | Jjschwartz/rltorch | eeb2ad955f018d768db98c4a2be5da96a75579f6 | [
"MIT"
] | null | null | null | rltorch/papers/DQN/hyperparams.py | Jjschwartz/rltorch | eeb2ad955f018d768db98c4a2be5da96a75579f6 | [
"MIT"
] | null | null | null | """Hyperparameters from paper """
import numpy as np
import torch.optim as optim
from .model import DQN, DuelingDQN
| 31.901961 | 77 | 0.591477 |
d2162729fc2afb100ad7e2d7244982b56598a414 | 822 | py | Python | scripts/problem0002.py | Joel301/Project_Euler | 2280dc19b8e0a2c956cf0d6db6c7d24eedd5e943 | [
"MIT"
] | null | null | null | scripts/problem0002.py | Joel301/Project_Euler | 2280dc19b8e0a2c956cf0d6db6c7d24eedd5e943 | [
"MIT"
] | null | null | null | scripts/problem0002.py | Joel301/Project_Euler | 2280dc19b8e0a2c956cf0d6db6c7d24eedd5e943 | [
"MIT"
] | null | null | null | #! python3
# -*- coding: utf-8 -*-
"""
Euler description from https://projecteuler.net/
Problem 0002
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million[4000000], find the sum of the even-valued terms.
"""
#fibonacci list generator
# main function same aproach as problem0001
if __name__ == "__main__":
print(compute(4000000))
| 22.833333 | 79 | 0.644769 |
d216db91805a0649ebde91802222cf781d19168b | 1,090 | py | Python | pandas/main.py | monishshah18/python-cp-cheatsheet | a5514b08816959de1198156f7764c54a7a585f20 | [
"Apache-2.0"
] | 140 | 2020-10-21T13:23:52.000Z | 2022-03-31T15:09:45.000Z | pandas/main.py | stacykutyepov/python-cp-cheatsheet | a00a57e1b36433648d1cace331e15ff276cef189 | [
"Apache-2.0"
] | 1 | 2021-07-22T14:01:25.000Z | 2021-07-22T14:01:25.000Z | pandas/main.py | stacykutyepov/python-cp-cheatsheet | a00a57e1b36433648d1cace331e15ff276cef189 | [
"Apache-2.0"
] | 33 | 2020-10-21T14:17:02.000Z | 2022-03-25T11:25:03.000Z | """
Summarize a column total cases column and total deaths column
Country by country data in columns, sum up and match global totals
"""
import csv
import pandas
pandas.set_option("display.max_rows", None, "display.max_columns", None)
col_list = ["Total Cases", "Country/ Other", "Total Deaths", "# 9/27/2020"]
df = pandas.read_csv("covidmilliondead.csv", usecols=col_list, thousands=',')
totalCases, totalDeaths = 0,0
for idx, cases,deaths in zip(df["# 9/27/2020"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
totalCases += cases
if deaths > 0:
totalDeaths += deaths
for idx, country, cases, deaths in zip(df["# 9/27/2020"], df["Country/ Other"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
print("\n",country)
print("Cases : ", cases, "/", totalCases, " %", "{:.5%}".format(cases/totalCases))
if deaths > 0:
print("Deaths : ", int(deaths), "/", totalDeaths, " %", "{:.5%}".format(deaths/totalDeaths))
print("")
print("Total Cases")
print(totalCases)
print("Total Deaths")
print(totalDeaths) | 34.0625 | 119 | 0.633945 |
d2179a39d18a821a8ac003b90306797cd588fe76 | 908 | py | Python | conradomateu/day08/day08.py | CloudCoders/AdventOfCode2017 | 5a52d1e89076eccb55686e4af5848de289309813 | [
"MIT"
] | 8 | 2017-12-11T18:22:52.000Z | 2017-12-13T00:50:24.000Z | conradomateu/day08/day08.py | CloudCoders/AdventOfCode2017 | 5a52d1e89076eccb55686e4af5848de289309813 | [
"MIT"
] | 8 | 2017-12-01T14:31:29.000Z | 2017-12-07T21:43:43.000Z | conradomateu/day08/day08.py | CloudCoders/AdventOfCode2017 | 5a52d1e89076eccb55686e4af5848de289309813 | [
"MIT"
] | null | null | null | import operator
ops = {
"inc": operator.add,
"dec": operator.sub,
">": operator.gt,
"<": operator.lt,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
"<=": operator.le}
maxs = []
dict = {}
| 18.916667 | 71 | 0.544053 |
d21892bc6e13fbca51eb7154188132cae4f0e838 | 667 | py | Python | app/db/events.py | ilya-goldin/kanban-board-app | 3c7026aedb0e21eaccc26a2ac4a37f0b6a91a122 | [
"MIT"
] | null | null | null | app/db/events.py | ilya-goldin/kanban-board-app | 3c7026aedb0e21eaccc26a2ac4a37f0b6a91a122 | [
"MIT"
] | null | null | null | app/db/events.py | ilya-goldin/kanban-board-app | 3c7026aedb0e21eaccc26a2ac4a37f0b6a91a122 | [
"MIT"
] | null | null | null | import asyncpg
from fastapi import FastAPI
from loguru import logger
from app.core.settings.app import AppSettings
| 24.703704 | 69 | 0.731634 |
d21994c5a36ba9f1f16825926274957f83707bde | 912 | py | Python | Problem009/Python/solution_1.py | drocha87/ProjectEuler | c18407448aa4f05484191a0df1380e34f2b8c5d7 | [
"MIT"
] | 167 | 2015-08-12T19:32:03.000Z | 2022-03-25T12:26:43.000Z | Problem009/Python/solution_1.py | drocha87/ProjectEuler | c18407448aa4f05484191a0df1380e34f2b8c5d7 | [
"MIT"
] | 153 | 2016-02-16T02:05:31.000Z | 2020-11-06T15:35:51.000Z | Problem009/Python/solution_1.py | drocha87/ProjectEuler | c18407448aa4f05484191a0df1380e34f2b8c5d7 | [
"MIT"
] | 84 | 2015-08-12T20:54:04.000Z | 2022-02-27T05:14:53.000Z | #!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft Manoel Vilela
#
#
from __future__ import print_function
"""
Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a + b = c
For example, 3 + 4 = 9 + 16 = 25 = 52.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
print(problem9(1000))
| 19.826087 | 78 | 0.582237 |
d2201ef9718699e7cd1fdb19d37ed6f30c51724b | 1,248 | py | Python | contrib/automation_tests/orbit_load_presets.py | vwbaker/orbit | 361cc416d1b3ecbc07318275c1bdbc1bb1bc9651 | [
"BSD-2-Clause"
] | 2 | 2020-07-31T08:18:58.000Z | 2021-12-26T06:43:07.000Z | contrib/automation_tests/orbit_load_presets.py | jayant99acharya/orbit | f713721e33448a6b0dc8ea4c5aa587855337e32c | [
"BSD-2-Clause"
] | 3 | 2022-02-15T02:46:06.000Z | 2022-02-28T01:28:39.000Z | contrib/automation_tests/orbit_load_presets.py | jayant99acharya/orbit | f713721e33448a6b0dc8ea4c5aa587855337e32c | [
"BSD-2-Clause"
] | 1 | 2021-03-10T15:21:19.000Z | 2021-03-10T15:21:19.000Z | """
Copyright (c) 2020 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
from absl import app
from core.orbit_e2e import E2ETestSuite
from test_cases.connection_window import FilterAndSelectFirstProcess, ConnectToStadiaInstance
from test_cases.symbols_tab import LoadAndVerifyHelloGgpPreset
"""Apply two presets in Orbit using pywinauto.
Before this script is run there needs to be a gamelet reserved and
"hello_ggp_standalone" has to be started. Two presets named
draw_frame_in_hello_ggp_1_52.opr and ggp_issue_frame_token_in_hello_ggp_1_52
(hooking the functions DrawFrame and GgpIssueFrameToken) need to exist in the
preset folder.
The script requires absl and pywinauto. Since pywinauto requires the bitness of
the python installation to match the bitness of the program under test it needs
to by run from 64 bit python.
"""
if __name__ == '__main__':
app.run(main)
| 32 | 93 | 0.786058 |
d22021e322a81ec24f4d2957e1994d21c7ec3963 | 52 | py | Python | interrogatio/shortcuts/__init__.py | ffaraone/interrogatio | 8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1 | [
"BSD-3-Clause"
] | 5 | 2019-02-19T13:10:39.000Z | 2022-03-04T19:11:04.000Z | interrogatio/shortcuts/__init__.py | ffaraone/interrogatio | 8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1 | [
"BSD-3-Clause"
] | 11 | 2020-03-24T16:58:41.000Z | 2021-12-14T10:19:17.000Z | interrogatio/shortcuts/__init__.py | ffaraone/interrogatio | 8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1 | [
"BSD-3-Clause"
] | 2 | 2019-05-31T08:36:26.000Z | 2020-12-18T17:58:50.000Z | from interrogatio.shortcuts.dialogs import * # noqa | 52 | 52 | 0.807692 |
d220977b89635aa8f8397e7f63e18931cf662876 | 609 | py | Python | skit_pipelines/components/extract_tgz.py | skit-ai/skit-pipelines | d692582107aee81b1bb4aebcf169f7260ac956b5 | [
"MIT"
] | null | null | null | skit_pipelines/components/extract_tgz.py | skit-ai/skit-pipelines | d692582107aee81b1bb4aebcf169f7260ac956b5 | [
"MIT"
] | 4 | 2022-03-22T14:17:46.000Z | 2022-03-24T16:22:23.000Z | skit_pipelines/components/extract_tgz.py | skit-ai/skit-pipelines | d692582107aee81b1bb4aebcf169f7260ac956b5 | [
"MIT"
] | null | null | null | from typing import Union
import kfp
from kfp.components import InputPath, OutputPath
from skit_pipelines import constants as pipeline_constants
extract_tgz_op = kfp.components.create_component_from_func(
extract_tgz_archive, base_image=pipeline_constants.BASE_IMAGE
)
| 22.555556 | 65 | 0.766831 |
d220ea28079528b416680ff1ccebd74a80b37141 | 4,438 | py | Python | python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_input_defaults.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 1 | 2021-04-27T19:49:59.000Z | 2021-04-27T19:49:59.000Z | python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_input_defaults.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 7 | 2022-03-16T06:55:04.000Z | 2022-03-18T07:03:25.000Z | python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_input_defaults.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | null | null | null | import pytest
from dagster import (
DagsterInvalidDefinitionError,
InputDefinition,
Nothing,
Optional,
composite_solid,
execute_pipeline,
execute_solid,
lambda_solid,
pipeline,
)
# we can't catch bad default_values except for scalars until runtime since the type_check function depends on
# a context that has access to resources etc.
| 25.36 | 109 | 0.677332 |
d2213ea96c7a47974d92d29c00540c2195a53bed | 69 | py | Python | vivid/__init__.py | blacktanktop/vivid | e85837bcd86575f8a275517250dd026aac3e451f | [
"BSD-2-Clause-FreeBSD"
] | 39 | 2020-05-13T18:13:25.000Z | 2022-03-02T10:46:53.000Z | vivid/__init__.py | blacktanktop/vivid | e85837bcd86575f8a275517250dd026aac3e451f | [
"BSD-2-Clause-FreeBSD"
] | 29 | 2020-05-13T18:04:09.000Z | 2022-02-27T04:43:18.000Z | vivid/__init__.py | blacktanktop/vivid | e85837bcd86575f8a275517250dd026aac3e451f | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2020-05-13T19:17:01.000Z | 2020-10-28T21:29:42.000Z | from .core import BaseBlock
from .runner import Runner, create_runner | 34.5 | 41 | 0.84058 |
d2214310a3d3e2da5645867f809ad278174b1b1c | 473 | py | Python | rsa.py | overrkill/security | cd473013652903d6b21fa83f2c57a07f289078e6 | [
"MIT"
] | 1 | 2020-05-08T07:32:16.000Z | 2020-05-08T07:32:16.000Z | rsa.py | overrkill/security | cd473013652903d6b21fa83f2c57a07f289078e6 | [
"MIT"
] | null | null | null | rsa.py | overrkill/security | cd473013652903d6b21fa83f2c57a07f289078e6 | [
"MIT"
] | null | null | null | import math as m
p=int(input("enter a prime integer p "))
q=int(input("enter a prime integer q "))
num=int(input("enter a number to encrypt "))
n=p*q
z=(p-1)*(q-1)
for e in range(2,z):
if m.gcd(e,z)==1:
break
for i in range(1,10):
x=1+i*z
if x%e==0:
d=int(x/e)
break
alpha=pow(num,e)
ctt=alpha % n
beta=pow(ctt,d)
ptt=beta % n
print("PUBLIC-KEY({},{}) PRIVATE-KEY({},{})".format(n,e,n,d))
print("cipher \n{}".format(ctt))
print("plaintext \n{}".format(ptt))
| 17.518519 | 61 | 0.610994 |
d221a299320cc8e2a6ab063e29d7c98428b76ee2 | 831 | py | Python | python_2_script/komand_python_2_script/actions/run/action.py | GreyNoise-Intelligence/insightconnect-plugins | 2ba3121d42fd96e1267bb095bc76b962678c1f56 | [
"MIT"
] | null | null | null | python_2_script/komand_python_2_script/actions/run/action.py | GreyNoise-Intelligence/insightconnect-plugins | 2ba3121d42fd96e1267bb095bc76b962678c1f56 | [
"MIT"
] | null | null | null | python_2_script/komand_python_2_script/actions/run/action.py | GreyNoise-Intelligence/insightconnect-plugins | 2ba3121d42fd96e1267bb095bc76b962678c1f56 | [
"MIT"
] | null | null | null | import komand
from .schema import RunInput, RunOutput
# Custom imports below
| 22.459459 | 87 | 0.545126 |
d221e2f598eaeab4c5c60286a3134659beef83e8 | 636 | py | Python | config/configSample.py | snipeso/sample_psychopy | 332cd34cf2c584f9ba01302050964649dd2e5367 | [
"Linux-OpenIB"
] | null | null | null | config/configSample.py | snipeso/sample_psychopy | 332cd34cf2c584f9ba01302050964649dd2e5367 | [
"Linux-OpenIB"
] | 3 | 2021-06-02T00:56:48.000Z | 2021-09-08T01:35:53.000Z | config/configSample.py | snipeso/sample_psychopy | 332cd34cf2c584f9ba01302050964649dd2e5367 | [
"Linux-OpenIB"
] | null | null | null | from config.updateConfig import UpdateConfig
sampleCONF = {
"task": {
"name": "sample",
},
"instructions": {
"text": "Give instructions",
"startPrompt": "Press any key to continue. Press q to quit.",
"alarm": "horn.wav",
"questionnaireReminder": "answerQuestionnaire.wav"
},
"stimuli": {
"backgroundColor": {"versionMain": "black", "versionDemo": "blue", "versionDebug": "gray"},
},
}
sampleTriggers = {
"example": 10
}
updateCofig = UpdateConfig()
updateCofig.addContent(sampleCONF)
updateCofig.addTriggers(sampleTriggers)
CONF = updateCofig.getConfig()
| 22.714286 | 99 | 0.630503 |
d2222f7d6b30cad257fa79d950b134ab33ead31c | 2,994 | py | Python | oneflow/python/test/onnx/util.py | basicv8vc/oneflow | 2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3 | [
"Apache-2.0"
] | 1 | 2020-10-13T03:03:40.000Z | 2020-10-13T03:03:40.000Z | oneflow/python/test/onnx/util.py | basicv8vc/oneflow | 2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3 | [
"Apache-2.0"
] | null | null | null | oneflow/python/test/onnx/util.py | basicv8vc/oneflow | 2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import onnxruntime as ort
import onnx
from collections import OrderedDict
import tempfile
import os
import shutil
| 33.640449 | 89 | 0.671343 |
d2233790f33ba2cc856d503da044f2647bccf7b5 | 237 | py | Python | pymodule1/Hello1.py | debjava/pymodule1 | 8e5f63660f0b835709896cc50ed1147b386422a2 | [
"MIT"
] | null | null | null | pymodule1/Hello1.py | debjava/pymodule1 | 8e5f63660f0b835709896cc50ed1147b386422a2 | [
"MIT"
] | null | null | null | pymodule1/Hello1.py | debjava/pymodule1 | 8e5f63660f0b835709896cc50ed1147b386422a2 | [
"MIT"
] | null | null | null | '''
Created on Mar 30, 2019
@author: PIKU
'''
if __name__ == '__main__':
justSayHello()
x = getHello()
print(x)
| 11.85 | 27 | 0.523207 |
d22400f5a3ef8a9ceac1f66b5070a0a5f8fc69d4 | 1,090 | py | Python | scripts/Evaluation_Metrics/mean_average.py | Mr-TalhaIlyas/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using | dc1b645ad1a3a00ef650b170a4ac4c26ab0d687a | [
"CC-BY-4.0"
] | null | null | null | scripts/Evaluation_Metrics/mean_average.py | Mr-TalhaIlyas/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using | dc1b645ad1a3a00ef650b170a4ac4c26ab0d687a | [
"CC-BY-4.0"
] | null | null | null | scripts/Evaluation_Metrics/mean_average.py | Mr-TalhaIlyas/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using | dc1b645ad1a3a00ef650b170a4ac4c26ab0d687a | [
"CC-BY-4.0"
] | 1 | 2021-03-29T01:49:49.000Z | 2021-03-29T01:49:49.000Z | from Evaluation_Metrics.Average_Precision import ElevenPointInterpolatedAP
from Evaluation_Metrics.New_Metric import TP_FP | 22.244898 | 74 | 0.547706 |
d22588027964a9ce9520023258895efa1631a6bd | 5,001 | py | Python | src/peter_sslers/lib/errors.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | 35 | 2016-04-21T18:55:31.000Z | 2022-03-30T08:22:43.000Z | src/peter_sslers/lib/errors.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | 8 | 2018-05-23T13:38:49.000Z | 2021-03-19T21:05:44.000Z | src/peter_sslers/lib/errors.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | 2 | 2016-08-18T21:07:11.000Z | 2017-01-11T09:47:40.000Z |
# class TransitionError(_UrlSafeException):
# pass
# class OperationsContextError(_UrlSafeException):
# pass
| 20.084337 | 118 | 0.659868 |
d22743bfb3140f3685546e3e673c4427883f8ae7 | 771 | py | Python | tips-lib/tools/ordo/cc.py | cosmoss-jigu/tips | 386b992894363b535876020d1e60aa95f3d05f7c | [
"Apache-2.0"
] | 13 | 2021-07-16T07:52:15.000Z | 2022-02-13T10:52:46.000Z | tips-lib/tools/ordo/cc.py | cosmoss-jigu/tips | 386b992894363b535876020d1e60aa95f3d05f7c | [
"Apache-2.0"
] | null | null | null | tips-lib/tools/ordo/cc.py | cosmoss-jigu/tips | 386b992894363b535876020d1e60aa95f3d05f7c | [
"Apache-2.0"
] | 5 | 2021-08-09T13:16:23.000Z | 2022-03-09T08:50:19.000Z | #!/usr/bin/env python3
import sys
offset_table = []
if __name__ == '__main__':
main()
| 17.133333 | 69 | 0.460441 |
d2293531f48224d20922b0077cb19bb8cfd631bb | 18,212 | py | Python | cognitive_services/__main__.py | cleveranjos/Rapid-ML-Gateway | 10a14abfce3351791331642c47eddfbf622e76d2 | [
"MIT"
] | 3 | 2020-07-15T19:45:31.000Z | 2020-09-30T16:15:48.000Z | cognitive_services/__main__.py | cleveranjos/Rapid-ML-Gateway | 10a14abfce3351791331642c47eddfbf622e76d2 | [
"MIT"
] | 12 | 2020-07-15T17:00:24.000Z | 2021-01-19T21:02:00.000Z | cognitive_services/__main__.py | cleveranjos/Rapid-ML-Gateway | 10a14abfce3351791331642c47eddfbf622e76d2 | [
"MIT"
] | 2 | 2020-07-15T18:59:02.000Z | 2020-10-07T17:22:52.000Z | #! /usr/bin/env python3
import os
import sys
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
sys.path.append(os.path.join(PARENT_DIR, 'helper_functions'))
import argparse
import json
import logging
import logging.config
import inspect, time
from websocket import create_connection
import socket
import re
from concurrent import futures
from datetime import datetime
import requests, uuid
import configparser
import ServerSideExtension_pb2 as SSE
import grpc
import qlist
import cognitive_services as cs
from google.protobuf.json_format import MessageToDict
from ssedata import ArgType, FunctionType, ReturnType
# import helper .py files
#from scripteval import ScriptEval
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
config = configparser.ConfigParser()
def EvaluateScript(self, request, context):
"""
This plugin supports full script functionality, that is, all function types and all data types.
:param request:
:param context:
:return:
"""
logging.debug('In EvaluateScript: Main')
# Parse header for script request
metadata = dict(context.invocation_metadata())
logging.debug('Metadata {}',metadata)
header = SSE.ScriptRequestHeader()
header.ParseFromString(metadata['qlik-scriptrequestheader-bin'])
logging.debug('Header is : {}'.format(header))
logging.debug('Request is : {}' .format(request))
logging.debug("Context is: {}" .format(context))
return self.ScriptEval.EvaluateScript(header, request, context)
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Qlik Rapid API Gateway - Partner Engineering',
pluginVersion='v0.1.0')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
func_id = self._get_function_id(context)
logging.info(self._get_call_info(context))
# Call corresponding function
logging.info('ExecuteFunctions (functionId: {})' .format(func_id))
#self.functions[func_id]))
current_function_def = (json.load(open(self.function_definitions))['Functions'])[func_id]
logging.debug(current_function_def)
global q_function_name
q_function_name = current_function_def["Name"]
logging.debug('Logical Method Called is: {}' .format(q_function_name))
current_qrap_type = current_function_def["QRAP_Type"]
qrag_function_name ='_' + current_qrap_type
logging.debug('This is the type of QRAG Method Name: {}' .format(current_qrap_type))
logging.debug('Physical Method Called is: {}' .format(qrag_function_name))
# Convers to Method Name to Physical Main Function
qrag_id = qlist.find_key(self.functions, qrag_function_name)
logging.debug('QRAG ID: {}' .format(qrag_id))
global function_name
function_name = self.functions[qrag_id]
return getattr(self, self.functions[qrag_id])(request_iterator, context)
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials([(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info('*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info('*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
conf_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'config', 'qrag.ini')
#config.read(os.path.join(os.path.dirname(__file__), 'config', 'qrag.ini'))
logging.debug(conf_file)
logging.info('Location of qrag.ini {}' .format(conf_file))
config.read(conf_file)
port = config.get('base', 'port')
parser.add_argument('--port', nargs='?', default=port)
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition_file', nargs='?', default='functions.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.definition_file)
#print(def_file)
calc = ExtensionService(def_file)
logging.info('*** Server Configurations Port: {}, Pem_Dir: {}, def_file {} TimeStamp: {} ***'.format(args.port, args.pem_dir, def_file,datetime.now().isoformat()))
calc.Serve(args.port, args.pem_dir) | 43.361905 | 167 | 0.597024 |
d2296fe0c90ef20ef9cee97c8335c9349c8e3dec | 1,534 | py | Python | spirecomm/spire/card.py | ysjin94/Slaying_the_Spire_AI | 172b2e44b9da81f35cbdfa1ee0fd2a4ecbc66634 | [
"MIT"
] | null | null | null | spirecomm/spire/card.py | ysjin94/Slaying_the_Spire_AI | 172b2e44b9da81f35cbdfa1ee0fd2a4ecbc66634 | [
"MIT"
] | null | null | null | spirecomm/spire/card.py | ysjin94/Slaying_the_Spire_AI | 172b2e44b9da81f35cbdfa1ee0fd2a4ecbc66634 | [
"MIT"
] | 2 | 2020-07-13T18:21:46.000Z | 2020-08-04T21:18:10.000Z | from enum import Enum
| 26.448276 | 156 | 0.582138 |
d229bf33f366491dd645f2b26164b3b0a59e7d44 | 114 | py | Python | src/typeDefs/lineFlowSumm.py | nagasudhirpulla/wrldc_scada_mumbai_dashboard | bc107ef47568781b588316f0c5c0c0d2a08adac8 | [
"MIT"
] | null | null | null | src/typeDefs/lineFlowSumm.py | nagasudhirpulla/wrldc_scada_mumbai_dashboard | bc107ef47568781b588316f0c5c0c0d2a08adac8 | [
"MIT"
] | null | null | null | src/typeDefs/lineFlowSumm.py | nagasudhirpulla/wrldc_scada_mumbai_dashboard | bc107ef47568781b588316f0c5c0c0d2a08adac8 | [
"MIT"
] | null | null | null | from typing import TypedDict
| 14.25 | 31 | 0.719298 |
d22a005c486e400a70fdda2609e473e34cb98a87 | 1,280 | py | Python | eval/user.py | hscspring/chatbot | 9d0bc91db0d8834a1a75cba3edcd3133191e80af | [
"Apache-2.0"
] | null | null | null | eval/user.py | hscspring/chatbot | 9d0bc91db0d8834a1a75cba3edcd3133191e80af | [
"Apache-2.0"
] | null | null | null | eval/user.py | hscspring/chatbot | 9d0bc91db0d8834a1a75cba3edcd3133191e80af | [
"Apache-2.0"
] | null | null | null | import os
import random
import numpy as np
import torch
from chatbot_agent.nlu import BERTNLU
from chatbot_agent.policy.rule import RulePolicy
from chatbot_agent.nlg import TemplateNLG
from chatbot_agent.agent import PipelineAgent
from chatbot_agent.analyzer import Analyzer
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print("root: ", root)
user_nlu = BERTNLU(
model_dir=os.path.join(root, "model/sys_context"),
vocab_dir=os.path.join(root, "data/agent/vocab"),
)
user_dst = None
user_policy = RulePolicy(
goal_model_path=os.path.join(root, "model/goal/new_goal_model.pkl"),
db_path=os.path.join(root, "data/agent/db"),
vocab_path=os.path.join(root, "data/agent/vocab/"),
character="usr",
)
user_nlg = TemplateNLG(
is_user=True,
template_dir=os.path.join(root, "data/agent/template")
)
user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user')
analyzer = Analyzer(
db_path=os.path.join(root, "data/agent/db"),
user_agent=user_agent,
dataset='multiwoz'
)
text = "How about rosa's bed and breakfast ? Their postcode is cb22ha."
nlu_res = user_nlu.predict(text)
print(nlu_res) | 26.666667 | 82 | 0.742188 |
d22b9934bc74f943c4699852c43f6be8c7246c45 | 3,027 | py | Python | insights/parsers/tests/test_zipl_conf.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/tests/test_zipl_conf.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/tests/test_zipl_conf.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | from insights.parsers.zipl_conf import ZiplConf
from insights.tests import context_wrap
from insights.parsers import ParseException
import pytest
ZIPL_CONF = """
[defaultboot]
defaultauto
prompt=1
timeout=5
default=linux
target=/boot
[linux]
image=/boot/vmlinuz-3.10.0-693.el7.s390x
ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8"
[linux-0-rescue-a27932c8d57248e390cee3798bbd3709]
image=/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709
ramdisk=/boot/initramfs-0-rescue-a27932c8d57248e390cee3798bbd3709.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0"
[other]
image=/boot/vmlinuz
ramdisk=/boot/initramfs.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100
# Configuration for dumping to SCSI disk
# Separate IPL and dump partitions
[dumpscsi]
target=/boot
dumptofs=/dev/sda2
parameters="dump_dir=/mydumps dump_compress=none dump_mode=auto"
# Menu containing two DASD boot configurations
:menu1
1=linux
2=linux-0-rescue-a27932c8d57248e390cee3798bbd3709
default=1
prompt=1
timeout=30
""".strip()
ZIPL_CONF_INVALID = """
prompt=1
timeout=5
default=linux
[linux]
image=/boot/vmlinuz-3.10.0-693.el7.s390x
ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8"
""".strip()
| 41.465753 | 269 | 0.720846 |
d22d10f837e5ad288e126f1c5e79e0d962cba280 | 6,560 | py | Python | tests/services/http_service.py | the-gw/tomodachi | a1e2efc1abe6f4e2de4a580e58184323660b4299 | [
"MIT"
] | null | null | null | tests/services/http_service.py | the-gw/tomodachi | a1e2efc1abe6f4e2de4a580e58184323660b4299 | [
"MIT"
] | null | null | null | tests/services/http_service.py | the-gw/tomodachi | a1e2efc1abe6f4e2de4a580e58184323660b4299 | [
"MIT"
] | null | null | null | import asyncio
import os
import signal
import tomodachi
from typing import Any, Dict, Tuple, Callable, Union # noqa
from aiohttp import web
from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler
from tomodachi.discovery.dummy_registry import DummyRegistry
| 33.989637 | 131 | 0.611738 |
d22d16cc4c908be77ff9ce274ee5534ee91f29e1 | 13,624 | py | Python | mantrid/loadbalancer.py | epio/mantrid | 1c699f1a4b33888b533c19cb6d025173f2160576 | [
"BSD-3-Clause"
] | 30 | 2015-01-01T00:32:47.000Z | 2021-09-07T20:25:01.000Z | mantrid/loadbalancer.py | epio/mantrid | 1c699f1a4b33888b533c19cb6d025173f2160576 | [
"BSD-3-Clause"
] | null | null | null | mantrid/loadbalancer.py | epio/mantrid | 1c699f1a4b33888b533c19cb6d025173f2160576 | [
"BSD-3-Clause"
] | 9 | 2015-05-12T05:09:12.000Z | 2021-12-29T19:07:01.000Z | import eventlet
import errno
import logging
import traceback
import mimetools
import resource
import json
import os
import sys
import argparse
from eventlet import wsgi
from eventlet.green import socket
from .actions import Unknown, Proxy, Empty, Static, Redirect, NoHosts, Spin
from .config import SimpleConfig
from .management import ManagementApp
from .stats_socket import StatsSocket
from .greenbody import GreenBody
if __name__ == "__main__":
Balancer.main()
| 38.485876 | 153 | 0.545435 |
d22daea1e02414a246423f9065c5355093e77a88 | 18,989 | py | Python | pyhelp/managers.py | FHuchet/pyhelp | 9d658f5c6f6d8aee8e528ca9946a40eac0ff3a68 | [
"MIT"
] | 1 | 2020-07-20T20:32:15.000Z | 2020-07-20T20:32:15.000Z | pyhelp/managers.py | FHuchet/pyhelp | 9d658f5c6f6d8aee8e528ca9946a40eac0ff3a68 | [
"MIT"
] | null | null | null | pyhelp/managers.py | FHuchet/pyhelp | 9d658f5c6f6d8aee8e528ca9946a40eac0ff3a68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 PyHelp Project Contributors
# https://github.com/jnsebgosselin/pyhelp
#
# This file is part of PyHelp.
# Licensed under the terms of the GNU General Public License.
# ---- Standard Library Imports
import os
import os.path as osp
# ---- Third Party imports
import numpy as np
import geopandas as gpd
import netCDF4
import pandas as pd
# ---- Local Libraries Imports
from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs
from pyhelp.processing import run_help_allcells
from pyhelp.utils import savedata_to_hdf5
from pyhelp.weather_reader import (
save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP,
read_cweeds_file, join_daily_cweeds_wy2_and_wy3)
FNAME_CONN_TABLES = 'connect_table.npy'
# ---- Connect tables
def _setup_connect_tables(self):
"""Setup the connect tables dictionary."""
if osp.exists(self.path_connect_tables):
self.connect_tables = np.load(self.path_connect_tables).item()
else:
self.connect_tables = {}
def _save_connect_tables(self):
"""Save the connect tables dictionary to a numpy binary file."""
np.save(self.path_connect_tables, self.connect_tables)
# ---- HELP grid
def load_grid(self, path_togrid):
"""
Load the grid that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
self.grid = load_grid_from_csv(path_togrid)
return self.grid
# ---- Input files creation
def generate_d13_from_cweeds(self, d13fname, fpath_cweed2, fpath_cweed3,
cellnames=None):
"""
Generate the HELP D13 input file for solar radiation from wy2 and
wy3 CWEEDS files at a given location.
"""
d13fpath = osp.join(self.inputdir, d13fname)
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
print('Reading CWEEDS files...', end=' ')
daily_wy2 = read_cweeds_file(fpath_cweed2, format_to_daily=True)
daily_wy3 = read_cweeds_file(fpath_cweed3, format_to_daily=True)
wy23_df = join_daily_cweeds_wy2_and_wy3(daily_wy2, daily_wy3)
indexes = np.where((wy23_df['Years'] >= self.year_range[0]) &
(wy23_df['Years'] <= self.year_range[1]))[0]
print('done')
print('Generating HELP D13 file for solar radiation...', end=' ')
save_solrad_to_HELP(d13fpath,
wy23_df['Years'][indexes],
wy23_df['Irradiance'][indexes],
'CAN_QC_MONTREAL-INTL-A_7025251',
wy23_df['Latitude'])
print('done')
if self.year_range[1] > np.max(wy23_df['Years']):
print("Warning: there is no solar radiation data after year %d."
% np.max(wy23_df['Years']))
if self.year_range[0] < np.min(wy23_df['Years']):
print("Warning: there is no solar radiation data before year %d."
% np.min(wy23_df['Years']))
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
d13_connect_table = {cid: d13fpath for cid in cellnames}
self.connect_tables['D13'] = d13_connect_table
self._save_connect_tables()
print("done")
def generate_d10d11_input_files(self, cellnames=None, sf_edepth=1,
sf_ulai=1):
"""Prepare the D10 and D11 input datafiles for each cell."""
d10d11_inputdir = osp.join(self.inputdir, 'd10d11_input_files')
if not osp.exists(d10d11_inputdir):
os.makedirs(d10d11_inputdir)
# Only keep the cells that are going to be run in HELP because we
# don't need the D10 or D11 input files for those that aren't.
cellnames = self.get_run_cellnames(cellnames)
d10data, d11data = format_d10d11_inputs(self.grid, cellnames,
sf_edepth, sf_ulai)
# Write the D10 and D11 input files.
d10_conn_tbl, d11_conn_tbl = write_d10d11_allcells(
d10d11_inputdir, d10data, d11data)
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D10'] = d10_conn_tbl
self.connect_tables['D11'] = d11_conn_tbl
self._save_connect_tables()
print("done")
def generate_d4d7_from_MDELCC_grid(self, path_netcdf_dir, cellnames=None):
"""
Prepare the D4 and D7 input datafiles for each cell from the
interpolated grid of the MDDELCC.
"""
d4d7_inputdir = osp.join(self.inputdir, 'd4d7_input_files')
if not osp.exists(d4d7_inputdir):
os.makedirs(d4d7_inputdir)
cellnames = self.get_run_cellnames(cellnames)
N = len(cellnames)
# Get the latitudes and longitudes of the resulting cells.
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
# Generate the connectivity table between the HELP grid and the
# MDDELCC interpolated daily weather grid.
print('Generating the connectivity table for each cell...', end=' ')
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
d4_conn_tbl = {}
d7_conn_tbl = {}
data = []
for i, cellname in enumerate(cellnames):
lat_idx, lon_idx = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
d4fname = osp.join(
d4d7_inputdir, '%03d_%03d.D4' % (lat_idx, lon_idx))
d7fname = osp.join(
d4d7_inputdir, '%03d_%03d.D7' % (lat_idx, lon_idx))
d4_conn_tbl[cellnames[i]] = d4fname
d7_conn_tbl[cellnames[i]] = d7fname
data.append([lat_idx, lon_idx, d4fname, d7fname])
print('done')
# Fetch the daily weather data from the netCDF files.
data = np.unique(data, axis=0)
lat_indx = data[:, 0].astype(int)
lon_idx = data[:, 1].astype(int)
years = range(self.year_range[0], self.year_range[1]+1)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_idx, years)
# Convert and save the weather data to D4 and D7 HELP input files.
N = len(data)
for i in range(N):
print(("\rGenerating HELP D4 and D7 files for location " +
"%d of %d (%0.1f%%)...") % (i+1, N, (i+1)/N * 100), end=' ')
lat = meteo_manager.lat[lat_indx[i]]
lon = meteo_manager.lon[lon_idx[i]]
d4fname, d7fname = data[i, 2], data[i, 3]
city = 'Meteo Grid at lat/lon %0.1f ; %0.1f' % (lat, lon)
# Fill -999 with 0 in daily precip.
precip_i = precip[:, i]
precip_i[precip_i == -999] = 0
# Fill -999 with linear interpolation in daily air temp.
tasavg_i = tasavg[:, i]
time_ = np.arange(len(tasavg_i))
indx = np.where(tasavg_i != -999)[0]
tasavg_i = np.interp(time_, time_[indx], tasavg_i[indx])
if not osp.exists(d4fname):
save_precip_to_HELP(d4fname, years, precip_i, city)
if not osp.exists(d7fname):
save_airtemp_to_HELP(d7fname, years, tasavg_i, city)
print('done')
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D4'] = d4_conn_tbl
self.connect_tables['D7'] = d7_conn_tbl
self._save_connect_tables()
print('done')
def run_help_for(self, path_outfile=None, cellnames=None, tfsoil=0):
"""
Run help for the cells listed in cellnames and save the result in
an hdf5 file.
"""
# Convert from Celcius to Farenheight
tfsoil = (tfsoil * 1.8) + 32
tempdir = osp.join(self.inputdir, ".temp")
if not osp.exists(tempdir):
os.makedirs(tempdir)
run_cellnames = self.get_run_cellnames(cellnames)
cellparams = {}
for cellname in run_cellnames:
fpath_d4 = self.connect_tables['D4'][cellname]
fpath_d7 = self.connect_tables['D7'][cellname]
fpath_d13 = self.connect_tables['D13'][cellname]
fpath_d10 = self.connect_tables['D10'][cellname]
fpath_d11 = self.connect_tables['D11'][cellname]
fpath_out = osp.abspath(osp.join(tempdir, str(cellname) + '.OUT'))
daily_out = 0
monthly_out = 1
yearly_out = 0
summary_out = 0
unit_system = 2 # IP if 1 else SI
simu_nyear = self.year_range[1] - self.year_range[0] + 1
cellparams[cellname] = (fpath_d4, fpath_d7, fpath_d13, fpath_d11,
fpath_d10, fpath_out, daily_out,
monthly_out, yearly_out, summary_out,
unit_system, simu_nyear, tfsoil)
output = run_help_allcells(cellparams)
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
# ---- Utilities
def get_water_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are considered
to be in a surface water area.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells for which context is 0.
cellnames = self.grid['cid'][cellnames][self.grid['context'] == 0]
return cellnames.tolist()
def get_run_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are in the grid
and for which HELP can be run.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells that are going to be run in HELP because we
# don't need the D4 or D7 input files for those that aren't.
cellnames = self.grid['cid'][cellnames][self.grid['run'] == 1].tolist()
return cellnames
def get_latlon_for_cellnames(self, cells):
"""
Return a numpy array with latitudes and longitudes of the provided
cells cid. Latitude and longitude for cids that are missing from
the grid are set to nan.
"""
lat = np.array(self.grid['lat_dd'].reindex(cells).tolist())
lon = np.array(self.grid['lon_dd'].reindex(cells).tolist())
return lat, lon
class NetCDFMeteoManager(object):
def setup_ncfile_list(self):
"""Read all the available netCDF files in dirpath_netcdf."""
self.ncfilelist = []
for file in os.listdir(self.dirpath_netcdf):
if file.endswith('.nc'):
self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))
def get_idx_from_latlon(self, latitudes, longitudes, unique=False):
"""
Get the i and j indexes of the grid meshes from a list of latitude
and longitude coordinates. If unique is True, only the unique pairs of
i and j indexes will be returned.
"""
try:
lat_idx = [np.argmin(np.abs(self.lat - lat)) for lat in latitudes]
lon_idx = [np.argmin(np.abs(self.lon - lon)) for lon in longitudes]
if unique:
ijdx = np.vstack({(i, j) for i, j in zip(lat_idx, lon_idx)})
lat_idx = ijdx[:, 0].tolist()
lon_idx = ijdx[:, 1].tolist()
except TypeError:
lat_idx = np.argmin(np.abs(self.lat - latitudes))
lon_idx = np.argmin(np.abs(self.lon - longitudes))
return lat_idx, lon_idx
def get_data_from_latlon(self, latitudes, longitudes, years):
"""
Return the daily minimum, maximum and average air temperature and daily
precipitation
"""
lat_idx, lon_idx = self.get_idx_from_latlon(latitudes, longitudes)
return self.get_data_from_idx(lat_idx, lon_idx, years)
def load_grid_from_csv(path_togrid):
"""
Load the csv that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
print('Reading HELP grid from csv...', end=' ')
grid = pd.read_csv(path_togrid)
print('done')
fname = osp.basename(path_togrid)
req_keys = ['cid', 'lat_dd', 'lon_dd', 'run']
for key in req_keys:
if key not in grid.keys():
raise KeyError("No attribute '%s' found in %s" % (key, fname))
# Make sure that cid is a str.
grid['cid'] = np.array(grid['cid']).astype(str)
# Set 'cid' as the index of the dataframe.
grid.set_index(['cid'], drop=False, inplace=True)
return grid
| 37.015595 | 79 | 0.596609 |
d22e790f560b51447016ed3ce2c5663688b5fd74 | 6,131 | py | Python | tests/unit/test_types.py | OvalMoney/momapper | 9bcf1909a80677cab831132444be27fa4adaa2a5 | [
"MIT"
] | null | null | null | tests/unit/test_types.py | OvalMoney/momapper | 9bcf1909a80677cab831132444be27fa4adaa2a5 | [
"MIT"
] | null | null | null | tests/unit/test_types.py | OvalMoney/momapper | 9bcf1909a80677cab831132444be27fa4adaa2a5 | [
"MIT"
] | null | null | null | from decimal import Decimal
import pytest
from bson import Decimal128
from momapper import MappedClass, Field
from momapper.mongodb.collection import MappedCollection
from momapper.types import (
DecimalType,
ValidationError,
IntType,
FloatType,
StringType,
ByteType,
BoolType,
ListType,
DictType,
)
def test_decimal_type_if_missing(mongo_client):
doc = DocWithDecimalRequired()
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimalRequired
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
| 31.280612 | 88 | 0.669222 |
d230b8b07301d92ab203c4ea79e6dcb73031cdf8 | 36 | py | Python | deepleaps/workspace/src/ipc/CustomCommand.py | Longseabear/deep-leaps-pytorch | abcb87f3079c0612bde4a4f94c75d7c05d5aee3a | [
"MIT"
] | 1 | 2021-02-27T18:00:39.000Z | 2021-02-27T18:00:39.000Z | deepleaps/workspace/src/ipc/CustomCommand.py | Longseabear/deep-leaps-pytorch | abcb87f3079c0612bde4a4f94c75d7c05d5aee3a | [
"MIT"
] | null | null | null | deepleaps/workspace/src/ipc/CustomCommand.py | Longseabear/deep-leaps-pytorch | abcb87f3079c0612bde4a4f94c75d7c05d5aee3a | [
"MIT"
] | null | null | null | import deepleaps.ipc.RunningCommand
| 18 | 35 | 0.888889 |
d230ba96d95fc33b542202e8343f1394390c32cd | 26,878 | py | Python | sharpy/solvers/dynamiccoupled.py | ostodieck/sharpy | b85aa1c001a0ec851af4eb259cce7c01dfa68b9e | [
"BSD-3-Clause"
] | 1 | 2020-07-27T05:15:35.000Z | 2020-07-27T05:15:35.000Z | sharpy/solvers/dynamiccoupled.py | briandesilva/sharpy | aed86428ff88fd14d36cabd91cf7e04b5fc9a39a | [
"BSD-3-Clause"
] | null | null | null | sharpy/solvers/dynamiccoupled.py | briandesilva/sharpy | aed86428ff88fd14d36cabd91cf7e04b5fc9a39a | [
"BSD-3-Clause"
] | null | null | null | import ctypes as ct
import time
import copy
import numpy as np
import sharpy.aero.utils.mapping as mapping
import sharpy.utils.cout_utils as cout
import sharpy.utils.solver_interface as solver_interface
import sharpy.utils.controller_interface as controller_interface
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
import sharpy.structure.utils.xbeamlib as xbeam
import sharpy.utils.exceptions as exc
| 43.775244 | 230 | 0.613625 |
d232def19f888f5ef15eb9c21425eef07dc01fdd | 4,734 | py | Python | pony/orm/tests/test_generator_db_session.py | ProgHaj/pony | 52720af1728ab2931364be8615e18ad8714a7c9e | [
"Apache-2.0"
] | 2,628 | 2015-01-02T17:55:28.000Z | 2022-03-31T10:36:42.000Z | pony/orm/tests/test_generator_db_session.py | ProgHaj/pony | 52720af1728ab2931364be8615e18ad8714a7c9e | [
"Apache-2.0"
] | 525 | 2015-01-03T20:30:08.000Z | 2022-03-23T12:30:01.000Z | pony/orm/tests/test_generator_db_session.py | ProgHaj/pony | 52720af1728ab2931364be8615e18ad8714a7c9e | [
"Apache-2.0"
] | 256 | 2015-01-02T17:55:31.000Z | 2022-03-20T17:01:37.000Z | from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.core import local
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
def test6(self):
gen = f()
cache = next(gen)
self.assertTrue(cache.is_alive)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 1000)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 2000)
self.assertEqual(local.db_session, None)
try: next(gen)
except StopIteration:
self.assertFalse(cache.is_alive)
else:
self.fail()
if __name__ == '__main__':
unittest.main()
| 25.451613 | 113 | 0.555978 |
d234e5a37645a98c004023879e482d81ecedb1c6 | 725 | py | Python | private_sharing/migrations/0008_featuredproject.py | danamlewis/open-humans | 9b08310cf151f49032b66ddd005bbd47d466cc4e | [
"MIT"
] | 57 | 2016-09-01T21:55:52.000Z | 2022-03-27T22:15:32.000Z | private_sharing/migrations/0008_featuredproject.py | danamlewis/open-humans | 9b08310cf151f49032b66ddd005bbd47d466cc4e | [
"MIT"
] | 464 | 2015-03-23T18:08:28.000Z | 2016-08-25T04:57:36.000Z | private_sharing/migrations/0008_featuredproject.py | danamlewis/open-humans | 9b08310cf151f49032b66ddd005bbd47d466cc4e | [
"MIT"
] | 25 | 2017-01-24T16:23:27.000Z | 2021-11-07T01:51:42.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-05 01:20
from django.db import migrations, models
import django.db.models.deletion
| 30.208333 | 133 | 0.623448 |
d235a418647a421cc3cde687c03b74bacf4239b5 | 5,759 | py | Python | Tests/Validation/Optimization/test_zdt3.py | magnetron/pyleecan | 2a3338f4ab080ad6488b5ab8746c3fea1f36f177 | [
"Apache-2.0"
] | 1 | 2021-02-26T12:28:45.000Z | 2021-02-26T12:28:45.000Z | Tests/Validation/Optimization/test_zdt3.py | magnetron/pyleecan | 2a3338f4ab080ad6488b5ab8746c3fea1f36f177 | [
"Apache-2.0"
] | null | null | null | Tests/Validation/Optimization/test_zdt3.py | magnetron/pyleecan | 2a3338f4ab080ad6488b5ab8746c3fea1f36f177 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Test Pyleecan optimization module using ZitzlerDebThiele's function N. 3
"""
import pytest
from ....definitions import PACKAGE_NAME
from ....Tests.Validation.Machine.SCIM_001 import SCIM_001
from ....Classes.InputCurrent import InputCurrent
from ....Classes.MagFEMM import MagFEMM
from ....Classes.Simu1 import Simu1
from ....Classes.Output import Output
from ....Classes.OptiDesignVar import OptiDesignVar
from ....Classes.OptiObjFunc import OptiObjFunc
from ....Classes.OptiConstraint import OptiConstraint
from ....Classes.OptiProblem import OptiProblem
from ....Classes.ImportMatrixVal import ImportMatrixVal
from ....Classes.ImportGenVectLin import ImportGenVectLin
from ....Classes.OptiGenAlgNsga2Deap import OptiGenAlgNsga2Deap
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import random
| 30.310526 | 94 | 0.581177 |
d2366db96566571009998f46fd017359e1980f42 | 325 | py | Python | comm_lib/import_lib.py | GUTLY/machine_learning_in_action | 7820c948014c615ed10f693f03ea116a0f7d6b96 | [
"Apache-2.0"
] | null | null | null | comm_lib/import_lib.py | GUTLY/machine_learning_in_action | 7820c948014c615ed10f693f03ea116a0f7d6b96 | [
"Apache-2.0"
] | null | null | null | comm_lib/import_lib.py | GUTLY/machine_learning_in_action | 7820c948014c615ed10f693f03ea116a0f7d6b96 | [
"Apache-2.0"
] | null | null | null | """
@Time : 12/4/2020 13:57
@Author : Young lee
@File : import_lib
@Project : machine_learning_in_action
"""
import collections
import math
import os
import random
import sys
import tarfile
import time
import zipfile
import operator
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
| 16.25 | 37 | 0.775385 |
d236f9020f43723fb7080a085f23e82a9664de09 | 590 | py | Python | example/example.py | fmilthaler/HTMLParser | ebe343796e32a25726b6659742196ceaab30bb3d | [
"MIT"
] | null | null | null | example/example.py | fmilthaler/HTMLParser | ebe343796e32a25726b6659742196ceaab30bb3d | [
"MIT"
] | null | null | null | example/example.py | fmilthaler/HTMLParser | ebe343796e32a25726b6659742196ceaab30bb3d | [
"MIT"
] | null | null | null | from htmlparser import HTMLParser
import pandas
# Here we scrap a page from Wikipedia, parse it for tables, and convert the first table found into a `pandas.DataFrame`.
url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
hp = HTMLParser(url)
# scrapping the webpage
page = hp.scrap_url()
# extracting only tables from the webpage
element = 'table'
params = {'class': 'wikitable sortable'}
elements = hp.get_page_elements(page, element=element, params=params)
# get a pandas.DataFrame from the (first) html table
df = hp.parse_html_table(elements[0])
print(df.columns.values)
| 36.875 | 120 | 0.772881 |
d2374979329fc2d21717d5eca2294d35f3c0c1d9 | 2,099 | py | Python | project_name/common/models.py | brevetech/breve_drf_template | 125e476810641f919296cb878980f91f4c091cf2 | [
"MIT"
] | null | null | null | project_name/common/models.py | brevetech/breve_drf_template | 125e476810641f919296cb878980f91f4c091cf2 | [
"MIT"
] | 17 | 2021-04-05T00:22:13.000Z | 2022-01-11T04:53:47.000Z | project_name/common/models.py | brevetech/breve_drf_template | 125e476810641f919296cb878980f91f4c091cf2 | [
"MIT"
] | 1 | 2022-01-07T05:48:19.000Z | 2022-01-07T05:48:19.000Z | from django.db import models
# https://stackoverflow.com/questions/1737017/django-auto-now-and-auto-now-add/1737078#1737078
from {{project_name}}.common.enums import PersonSexEnum
| 32.292308 | 97 | 0.682706 |
d2387686143e714809862b9c318c59cf934f177d | 4,881 | py | Python | PikaBus/tools/PikaTools.py | alexbodn/PikaBus | 5faf2e48f4d4deecb4428707f94bcf72a81cc3ee | [
"MIT"
] | 7 | 2020-03-21T12:22:18.000Z | 2022-02-10T11:43:51.000Z | PikaBus/tools/PikaTools.py | alexbodn/PikaBus | 5faf2e48f4d4deecb4428707f94bcf72a81cc3ee | [
"MIT"
] | null | null | null | PikaBus/tools/PikaTools.py | alexbodn/PikaBus | 5faf2e48f4d4deecb4428707f94bcf72a81cc3ee | [
"MIT"
] | 1 | 2021-06-21T10:56:56.000Z | 2021-06-21T10:56:56.000Z | from typing import Union, List
import pika
import pika.exceptions
import time
import logging
| 38.132813 | 132 | 0.629379 |
d23a8dd5865bbf7ea08abcad56ee55962f12112f | 16,087 | py | Python | roundup/backends/blobfiles.py | Noschvie/roundup | 996377ed0d12c69a01c7565dc5f47d6fb0ccaf19 | [
"MIT"
] | 1 | 2015-12-17T08:09:28.000Z | 2015-12-17T08:09:28.000Z | roundup/backends/blobfiles.py | Noschvie/roundup | 996377ed0d12c69a01c7565dc5f47d6fb0ccaf19 | [
"MIT"
] | null | null | null | roundup/backends/blobfiles.py | Noschvie/roundup | 996377ed0d12c69a01c7565dc5f47d6fb0ccaf19 | [
"MIT"
] | 1 | 2015-07-10T08:16:24.000Z | 2015-07-10T08:16:24.000Z | #
# Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
# This module is free software, and you may redistribute it and/or modify
# under the same terms as Python, so long as this copyright message and
# disclaimer are retained in their original form.
#
# IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
# OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
"""This module exports file storage for roundup backends.
Files are stored into a directory hierarchy.
"""
__docformat__ = 'restructuredtext'
import os
# vim: set filetype=python ts=4 sw=4 et si
| 39.525799 | 82 | 0.654752 |
d23c5a7f0d13366045cfa8ea9d83ec4de2417ed0 | 1,467 | py | Python | LeetCode/E2 - Add Two Numbers/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | LeetCode/E2 - Add Two Numbers/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | LeetCode/E2 - Add Two Numbers/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | """solution.py"""
# Definition for singly-linked list.
| 23.66129 | 68 | 0.445808 |
d23c85c65422eeb7798338451574df0f59e40725 | 1,984 | py | Python | networking_mlnx/dhcp/mlnx_dhcp.py | stackhpc/networking-mlnx | 6a297fd040ff09e26e477b90f2fb229dc6a691b2 | [
"Apache-2.0"
] | null | null | null | networking_mlnx/dhcp/mlnx_dhcp.py | stackhpc/networking-mlnx | 6a297fd040ff09e26e477b90f2fb229dc6a691b2 | [
"Apache-2.0"
] | null | null | null | networking_mlnx/dhcp/mlnx_dhcp.py | stackhpc/networking-mlnx | 6a297fd040ff09e26e477b90f2fb229dc6a691b2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron.agent.linux import dhcp
| 34.807018 | 78 | 0.681452 |
d23df24d42dc33a797b2ad6f76f674f1c588ed01 | 679 | py | Python | solution/practice/algorithms/warmup/plus-minus/solution.py | benevolentPreta/HackerRank_Py3 | 03c4bd9e2db2d91645b72b62b060d73f5ec7e437 | [
"BSD-2-Clause"
] | null | null | null | solution/practice/algorithms/warmup/plus-minus/solution.py | benevolentPreta/HackerRank_Py3 | 03c4bd9e2db2d91645b72b62b060d73f5ec7e437 | [
"BSD-2-Clause"
] | 1 | 2020-06-06T19:56:54.000Z | 2020-06-06T19:56:54.000Z | solution/practice/algorithms/warmup/plus-minus/solution.py | benevolentPreta/HackerRank_Py3 | 03c4bd9e2db2d91645b72b62b060d73f5ec7e437 | [
"BSD-2-Clause"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the plusMinus function below.
def plusMinus(arr):
'''
There is probably a better solution than this
but this would be the trivial solution, and it
is successful.
'''
pos, neg, zero = 0, 0, 0
size = len(arr)
for i in range(size):
if arr[i] > 0:
pos+=1
elif arr[i] < 0:
neg+=1
else:
zero+=1
print(float((pos/size)))
print(float((neg/size)))
print(float((zero/size)))
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
| 17.868421 | 51 | 0.564065 |
d23e3eac1aa7a46a82d21a527d06862f245b4e29 | 4,273 | py | Python | youtube_dl/extractor/gorillavid.py | builder07/ytdl | 2c0a5d50af7ecc7302c813d649ee72dcd457a50a | [
"Unlicense"
] | null | null | null | youtube_dl/extractor/gorillavid.py | builder07/ytdl | 2c0a5d50af7ecc7302c813d649ee72dcd457a50a | [
"Unlicense"
] | null | null | null | youtube_dl/extractor/gorillavid.py | builder07/ytdl | 2c0a5d50af7ecc7302c813d649ee72dcd457a50a | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
encode_dict,
int_or_none,
)
| 33.645669 | 110 | 0.528902 |
d23f4d942f6df091ea30d280bbf61284f173aee1 | 7,552 | py | Python | Tests/test_GenBank_unittest.py | cbrueffer/biopython | 1ffb1d92d4735166089e28ac07ee614d5ec80070 | [
"PostgreSQL"
] | null | null | null | Tests/test_GenBank_unittest.py | cbrueffer/biopython | 1ffb1d92d4735166089e28ac07ee614d5ec80070 | [
"PostgreSQL"
] | null | null | null | Tests/test_GenBank_unittest.py | cbrueffer/biopython | 1ffb1d92d4735166089e28ac07ee614d5ec80070 | [
"PostgreSQL"
] | null | null | null | # Copyright 2013 by Kai Blin.
# Revisions copyright 2015 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import unittest
import warnings
from os import path
from Bio import BiopythonParserWarning
from Bio import GenBank
from Bio import SeqIO
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 50.346667 | 121 | 0.659428 |
d24018cb7c01fc32bd606207dd5f57d954a62e7b | 6,618 | py | Python | segtrain/trainer/trainer.py | parthi-bharathi/semantic-image-segmentation | 5dd34db4d74b0fe3d6cc9033a0e55ddf6e73420c | [
"Apache-2.0"
] | 2 | 2020-08-26T00:13:37.000Z | 2022-01-07T07:59:59.000Z | segtrain/trainer/trainer.py | parthi-bharathi/semantic-image-segmentation | 5dd34db4d74b0fe3d6cc9033a0e55ddf6e73420c | [
"Apache-2.0"
] | 1 | 2020-10-20T13:37:29.000Z | 2020-10-27T09:59:32.000Z | segtrain/trainer/trainer.py | parthi-bharathi/semantic-image-segmentation | 5dd34db4d74b0fe3d6cc9033a0e55ddf6e73420c | [
"Apache-2.0"
] | 1 | 2022-03-02T10:57:37.000Z | 2022-03-02T10:57:37.000Z | import os
import tensorflow.keras.backend as K
from dataflow import (
BatchData, RepeatedData, MultiProcessRunnerZMQ)
from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from tensorflow.keras.callbacks import LearningRateScheduler
from .modelcheckpoint import CustomModelCheckpointCallback
import tensorflow as tf
| 39.159763 | 135 | 0.630553 |
d24182845a6b7e4d2904f9bc95447b5c4c1ca7fd | 1,570 | py | Python | turtle/pyramid.py | luscra0/Turtle-Experiments | df9693c871dd176673667c231f7f81250a479348 | [
"MIT"
] | null | null | null | turtle/pyramid.py | luscra0/Turtle-Experiments | df9693c871dd176673667c231f7f81250a479348 | [
"MIT"
] | 6 | 2021-08-30T01:08:10.000Z | 2021-08-30T23:04:55.000Z | turtle/pyramid.py | luscra0/Turtle-Shape-Thingy | df9693c871dd176673667c231f7f81250a479348 | [
"MIT"
] | null | null | null | import turtle
import math
from time import sleep
screen = turtle.Screen()
t1 = turtle.Turtle()
t1.hideturtle()
pyramid_base_sides = 4
pyramid_height = 200
pyramid_width = 100
spin_x = True
spin_y = True
pyramid_pos = [0, 0]
pyramid_x_angles = [x for x in range(15, 375, 360//pyramid_base_sides)]
pyramid_y_angles = [80, 260]
draw_pyramid(t1, True)
while True:
draw_pyramid(t1)
if spin_x:
for i in range(len(pyramid_x_angles)):
pyramid_x_angles[i] += 1
if pyramid_x_angles[i] >= 360:
pyramid_x_angles[i] -= 360
if spin_y:
for i in range(len(pyramid_y_angles)):
pyramid_y_angles[i] += 1
if pyramid_y_angles[i] >= 360:
pyramid_y_angles[i] -= 360
screen.update()
sleep(.01)
t1.clear() | 26.166667 | 136 | 0.625478 |
d2423e50a292004365a346d8a0b8d79733015061 | 5,791 | py | Python | docker_leash/config.py | docker-leash/docker-leash | d98c0a98ddecac2c9775e839d1e64382b811a3cf | [
"MIT"
] | 1 | 2018-01-15T12:29:20.000Z | 2018-01-15T12:29:20.000Z | docker_leash/config.py | docker-leash/docker-leash | d98c0a98ddecac2c9775e839d1e64382b811a3cf | [
"MIT"
] | 92 | 2018-01-12T21:04:42.000Z | 2018-04-08T17:25:26.000Z | docker_leash/config.py | docker-leash/docker-leash | d98c0a98ddecac2c9775e839d1e64382b811a3cf | [
"MIT"
] | 2 | 2018-01-13T16:52:54.000Z | 2020-04-24T22:45:46.000Z | # vim:set ts=4 sw=4 et:
'''
Config
======
'''
import re
from .action_mapper import Action
from .checks_list import Checks
from .exceptions import ConfigurationException
| 30.967914 | 105 | 0.583319 |
d242ed9d3520b1a1062f3207cee3beda75ae982b | 1,039 | py | Python | printapp/migrations/0002_auto_20180217_1917.py | sumanlearning/potpapa2018 | 1557dd5aca645cb55a08e5b92623804e51fa8dfe | [
"Unlicense"
] | null | null | null | printapp/migrations/0002_auto_20180217_1917.py | sumanlearning/potpapa2018 | 1557dd5aca645cb55a08e5b92623804e51fa8dfe | [
"Unlicense"
] | null | null | null | printapp/migrations/0002_auto_20180217_1917.py | sumanlearning/potpapa2018 | 1557dd5aca645cb55a08e5b92623804e51fa8dfe | [
"Unlicense"
] | null | null | null | # Generated by Django 2.0.2 on 2018-02-17 12:17
import datetime
from django.db import migrations, models
| 29.685714 | 107 | 0.599615 |
d243084a9d78e560bb874101db60f382836bb734 | 7,569 | py | Python | waller.py | fredrikwahlberg/harvesters | 205dadeb3b6e25203843e71b95cb99aaf840c712 | [
"MIT"
] | 1 | 2018-02-20T16:34:26.000Z | 2018-02-20T16:34:26.000Z | waller.py | fredrikwahlberg/harvesters | 205dadeb3b6e25203843e71b95cb99aaf840c712 | [
"MIT"
] | null | null | null | waller.py | fredrikwahlberg/harvesters | 205dadeb3b6e25203843e71b95cb99aaf840c712 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Fredrik Wahlberg <fredrik.wahlberg@it.uu.se>
"""
import requests
import json
import os.path
import re
if __name__=='__main__':
datapath = os.path.expanduser("~/tmp/Waller")
db = Waller(datapath=datapath, verbose=1)
db.populate()
db.save()
print(db)
| 35.369159 | 104 | 0.492139 |
d244090a382037591d1f8d9a0c4ab8297cd9b302 | 701 | py | Python | helper_functions_class.py | lucaschatham/lambdata | 125087c521847e4f7659a4c8e34008994f3fb01b | [
"MIT"
] | null | null | null | helper_functions_class.py | lucaschatham/lambdata | 125087c521847e4f7659a4c8e34008994f3fb01b | [
"MIT"
] | null | null | null | helper_functions_class.py | lucaschatham/lambdata | 125087c521847e4f7659a4c8e34008994f3fb01b | [
"MIT"
] | null | null | null | """
Here are two different functions used for common data cleaning tasks.
You can use these functions to load data into a pandas Dataframe.
"""
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
| 22.612903 | 84 | 0.673324 |
d245456046b81bffbc996ce46fc7291edbaf4e36 | 870 | py | Python | services/web/apps/crm/supplierprofile/views.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | services/web/apps/crm/supplierprofile/views.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | services/web/apps/crm/supplierprofile/views.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# crm.supplierprofile application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.crm.models.supplierprofile import SupplierProfile
from noc.core.translation import ugettext as _
| 33.461538 | 71 | 0.558621 |
d24561aa431196a52ec81712ae5c3dded61222c7 | 2,849 | py | Python | all-python-codes/bagels/main.py | abdussalam02/py-projects | 653ba4e6923ee1f55a64aef23174515c1db68758 | [
"MIT"
] | null | null | null | all-python-codes/bagels/main.py | abdussalam02/py-projects | 653ba4e6923ee1f55a64aef23174515c1db68758 | [
"MIT"
] | null | null | null | all-python-codes/bagels/main.py | abdussalam02/py-projects | 653ba4e6923ee1f55a64aef23174515c1db68758 | [
"MIT"
] | null | null | null | from random import shuffle
NUM_DIGIT = 3
MAX_GUESSES = 10
def get_secret_num():
""" returns a string made up of {NUM_DIGITS} uniqe random digits """
numbers = list("0123456789") # create a list of digits 0 - 9
shuffle(numbers) # shuffle them into random order
""" get the first {NUM_DIGITS} digits in the list for the secret number """
secret_num = ""
for i in range(NUM_DIGIT):
secret_num += str(numbers[i])
return secret_num
def get_clues(guess, secret_num):
""" returns a string with the pico, fermi, bagels clues for a guess and secret number pair """
if guess == secret_num:
return "You got it!"
clues = []
for i in range(len(guess)):
if guess[i] == secret_num[i]:
# a correct digit is in the correct place
clues.append("Fermi")
elif guess[i] in secret_num:
# a correct digit is in the incorrect place
clues.append("Pico")
if len(clues) == 0:
return "Bagels" # there are no correct digit at all
else:
# sort the clues into alphabetical order so their original order does not give information away
clues.sort()
return " ".join(clues)
if __name__ == "__main__":
main()
| 30.634409 | 107 | 0.570727 |
d24580d757e7e7fcbb4b8b0a5b6d34e117acf284 | 2,652 | py | Python | NetEmbs/DataProcessing/unique_signatures.py | AlexWorldD/NetEmbs | ea3dc5769e2feb728dac8f21ec677a9807def3df | [
"Apache-2.0"
] | 1 | 2021-09-02T16:47:27.000Z | 2021-09-02T16:47:27.000Z | NetEmbs/DataProcessing/unique_signatures.py | AlexWorldD/NetEmbs | ea3dc5769e2feb728dac8f21ec677a9807def3df | [
"Apache-2.0"
] | null | null | null | NetEmbs/DataProcessing/unique_signatures.py | AlexWorldD/NetEmbs | ea3dc5769e2feb728dac8f21ec677a9807def3df | [
"Apache-2.0"
] | 1 | 2019-12-25T08:38:55.000Z | 2019-12-25T08:38:55.000Z | # encoding: utf-8
__author__ = 'Aleksei Maliutin'
"""
unique_signatures.py
Created by lex at 2019-03-28.
"""
import pandas as pd
from NetEmbs.CONFIG import N_DIGITS
def get_signature(df: pd.DataFrame) -> pd.Series:
"""
Aggregation function over GroupBy object: to extract unique signature for the given business process.
If business process includes only 1-1 flow (e.g. from Cash to Tax), used amount value.
If business process includes more than 2 transactions, used Credit/Debit values respectfully.
Parameters
----------
df : DataFrame
Unique business process as GroupBy DataFrame
Returns
-------
Pandas Series with ID and Signature
"""
signature_l = list()
signature_r = list()
if df.shape[0] == 2:
signature_l = list(
zip(df["FA_Name"][df["Credit"] > 0.0].values, df["amount"][df["Credit"] > 0.0].values.round(N_DIGITS)))
signature_r = list(
zip(df["FA_Name"][df["Debit"] > 0.0].values, df["amount"][df["Debit"] > 0.0].values.round(N_DIGITS)))
elif df.shape[0] > 2:
# Business process includes more that 2 transactions, hence, can use relative amount for creation signature
signature_l = sorted(
list(
zip(df["FA_Name"][df["Credit"] > 0.0].values, df["Credit"][df["Credit"] > 0.0].values.round(N_DIGITS))),
key=lambda x: x[0])
signature_r = sorted(
list(zip(df["FA_Name"][df["Debit"] > 0.0].values, df["Debit"][df["Debit"] > 0.0].values.round(N_DIGITS))),
key=lambda x: x[0])
return pd.Series({"ID": df["ID"].values[0], "Signature": str((signature_l, signature_r))})
def get_signature_df(df: pd.DataFrame) -> pd.DataFrame:
"""
Create DataFrame with ID and Signature
Parameters
----------
df : DataFrame to be processed
Returns
-------
DataFrame with Signature column
"""
"""
Helper function for extraction a signature of BP (as a combination of coefficients from left and right part)
:param original_df:
:return: DataFrame with BP ID and extracted signature
"""
res = df.groupby("ID", as_index=False).apply(get_signature)
return res.drop_duplicates(["Signature"])
def leave_unique_business_processes(df: pd.DataFrame) -> pd.DataFrame:
"""
Filtering original DF with respect to unique BP's signatures
Parameters
----------
df : DataFrame to be processed
Returns
-------
DataFrame with remove duplicated w.r.t. extracted signatures
"""
signatures = get_signature_df(df)
return signatures.merge(df, on="ID", how="left")
| 34 | 120 | 0.633107 |
d248471875d205a42c77cea45df52d51bb8e0b18 | 6,008 | py | Python | books/api/RecurringInvoicesApi.py | harshal-choudhari/books-python-wrappers | 43616ee451a78ef2f02facc1cfb1d7f1121a1464 | [
"MIT"
] | 1 | 2021-04-21T06:40:48.000Z | 2021-04-21T06:40:48.000Z | books/api/RecurringInvoicesApi.py | harshal-choudhari/books-python-wrappers | 43616ee451a78ef2f02facc1cfb1d7f1121a1464 | [
"MIT"
] | null | null | null | books/api/RecurringInvoicesApi.py | harshal-choudhari/books-python-wrappers | 43616ee451a78ef2f02facc1cfb1d7f1121a1464 | [
"MIT"
] | 1 | 2021-04-21T07:31:47.000Z | 2021-04-21T07:31:47.000Z | #$Id$#
from books.util.ZohoHttpClient import ZohoHttpClient
from books.parser.RecurringInvoiceParser import RecurringInvoiceParser
from .Api import Api
from json import dumps
base_url = Api().base_url + 'recurringinvoices/'
parser = RecurringInvoiceParser()
zoho_http_client = ZohoHttpClient()
| 33.19337 | 89 | 0.636152 |
d249639feb0e944a523bdb5fe34255236bfa3990 | 661 | py | Python | api/settings/local.py | hartliddell/api | 73d44d2271c01fe7540fedeee9174c4032cbbbc0 | [
"MIT"
] | null | null | null | api/settings/local.py | hartliddell/api | 73d44d2271c01fe7540fedeee9174c4032cbbbc0 | [
"MIT"
] | null | null | null | api/settings/local.py | hartliddell/api | 73d44d2271c01fe7540fedeee9174c4032cbbbc0 | [
"MIT"
] | null | null | null | """Define the django settings for a local setup."""
from .base import * # noqa
# SECURITY WARNING: don't run with debug turned on in production!
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# Allow all host headers
# SECURITY WARNING: don't run with this setting in production!
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# CORS settings.
# https://github.com/ottoyiu/django-cors-headers#cors_origin_allow_all
CORS_ORIGIN_ALLOW_ALL = True
| 33.05 | 70 | 0.747352 |
d24974e9a9f24d16218c96318a69ab049db6dc83 | 1,457 | py | Python | scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py | miltondp/phenomexcan | 38390ac21987f1e72835c42919c53abd1a35cb7e | [
"MIT"
] | 3 | 2020-12-07T15:06:41.000Z | 2021-05-25T06:03:38.000Z | scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py | miltondp/phenomexcan | 38390ac21987f1e72835c42919c53abd1a35cb7e | [
"MIT"
] | 1 | 2020-07-01T14:45:38.000Z | 2020-07-01T15:15:55.000Z | scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py | miltondp/phenomexcan | 38390ac21987f1e72835c42919c53abd1a35cb7e | [
"MIT"
] | 1 | 2020-08-20T13:23:40.000Z | 2020-08-20T13:23:40.000Z | #!/usr/bin/env python
import os
import argparse
import sqlite3
from glob import glob
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--gtex-models-dir', type=str, required=True)
parser.add_argument('--variants-file-with-gtex-id', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
all_models = glob(os.path.join(args.gtex_models_dir, '*.db'))
assert len(all_models) == 49, len(all_models)
all_variants_ids = set()
for m in all_models:
print(f'Processing {m}')
with sqlite3.connect(m) as conn:
df = pd.read_sql('select varID from weights', conn)['varID']
all_variants_ids.update(set(df.values))
print(f'Read {len(all_variants_ids)} unique variants in GTEx models')
print(f'Reading {args.variants_file_with_gtex_id}')
variants_gtexid = pd.read_csv(args.variants_file_with_gtex_id, sep='\t', usecols=['panel_variant_id'], squeeze=True).dropna()
variants_gtexid = set(variants_gtexid.values)
print(f' Read {len(variants_gtexid)} variants')
print('Merging GTEx and other variants')
merged_variants = variants_gtexid.intersection(all_variants_ids)
print(f'Final number of merged variants: {len(merged_variants)}')
print(f'Coverage of GTEx variants: {(len(merged_variants) / len(all_variants_ids)) * 100:.2f}%')
print(f'Writing to {args.output_file}')
pd.DataFrame({'rsid': list(merged_variants)}).to_csv(args.output_file, index=False)
| 33.883721 | 125 | 0.753603 |
d24abb7e1be3b51950c14587cbae8b44aa330b06 | 5,676 | py | Python | h/security/predicates.py | hypothesis/h | 92c1a326c305a3d94fe48f87402135fd7beb6a20 | [
"BSD-2-Clause"
] | 2,103 | 2015-01-07T12:47:49.000Z | 2022-03-29T02:38:25.000Z | h/security/predicates.py | hypothesis/h | 92c1a326c305a3d94fe48f87402135fd7beb6a20 | [
"BSD-2-Clause"
] | 4,322 | 2015-01-04T17:18:01.000Z | 2022-03-31T17:06:02.000Z | h/security/predicates.py | hypothesis/h | 92c1a326c305a3d94fe48f87402135fd7beb6a20 | [
"BSD-2-Clause"
] | 389 | 2015-01-24T04:10:02.000Z | 2022-03-28T08:00:16.000Z | """
Define authorization predicates.
These are functions which accept an `Identity` object and a context object and
return a truthy value. These represent building blocks of our permission map
which define when people do, or don't have permissions.
For example a predicate might define "group_created_by_user" which is only
true when a user is present, a group is present and the user created that
group.
"""
from itertools import chain
from h.models.group import JoinableBy, ReadableBy, WriteableBy
def requires(*parent_predicates):
"""
Decorate a predicate to say it requires other predicates to be True first.
:param parent_predicates: A list of predicates that have to be true for
this predicate to be true as well.
"""
return decorator
# Identity things
# The `@requires` here means that this predicate needs `authenticate` to be
# True before it's True. It also avoids attribute errors if identity is None
# Users
def user_found(_identity, context):
return hasattr(context, "user") and context.user
# Annotations
def annotation_found(_identity, context):
return hasattr(context, "annotation") and context.annotation
# Groups
def group_found(_identity, context):
return hasattr(context, "group") and context.group
def group_not_found(_identity, context):
return not hasattr(context, "group") or not context.group
def resolve_predicates(mapping):
"""
Expand predicates with requirements into concrete lists of predicates.
This takes a permission map which contains predicates which reference
other ones (using `@requires`), and converts each clause to include the
parents in parent first order. This means any parent which is referred to
by a predicate is executed before it, and no predicate appears more than once.
"""
return {
key: [_expand_clause(clause) for clause in clauses]
for key, clauses in mapping.items()
}
def _expand_clause(clause):
"""Generate all of the predicates + parents in a clause without dupes."""
seen_before = set()
# The chain.from_iterable here flattens nested iterables
return list(
chain.from_iterable(
_expand_predicate(predicate, seen_before) for predicate in clause
)
)
def _expand_predicate(predicate, seen_before):
"""Generate all of the parents and the predicate in parents first order."""
if hasattr(predicate, "requires"):
for parent in predicate.requires:
yield from _expand_predicate(parent, seen_before)
if predicate not in seen_before:
seen_before.add(predicate)
yield predicate
| 27.687805 | 88 | 0.767442 |
d24c807fe0e09931fae3e0caaf649694c890f3db | 3,325 | py | Python | gdm/planing_tool/models/empresas.py | Deonstudios/GDM | ad6c8182d3e70a6c4d1490f452b2c16e12dc85d8 | [
"Apache-2.0"
] | null | null | null | gdm/planing_tool/models/empresas.py | Deonstudios/GDM | ad6c8182d3e70a6c4d1490f452b2c16e12dc85d8 | [
"Apache-2.0"
] | null | null | null | gdm/planing_tool/models/empresas.py | Deonstudios/GDM | ad6c8182d3e70a6c4d1490f452b2c16e12dc85d8 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from planing_tool.models.plazas import State, Country, City
from django.contrib.gis.db.models import PointField
from datetime import datetime, timedelta
from libs.currency_utils import CurrencyUtils
from django.contrib.gis.db.models.manager import GeoManager
from django.db import models
from simple_history.models import HistoricalRecords
from django.utils.translation import ugettext as _
from autoslug import AutoSlugField
ACTIVITY_CHOICES = (
(0, _(u'Empresa')),
(1, _(u'Comerciante Individual')),
(2, _(u'Profesional')),
(3, _(u'Productor')),
)
| 29.166667 | 79 | 0.67218 |
d24ca4e55e2ea29a960fa8ecd6a05a6ef87a0584 | 8,346 | py | Python | network.py | tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning | fb9b20181dd3e3273fcbc28144d60f01185ceffd | [
"MIT"
] | 12 | 2020-07-24T13:21:35.000Z | 2021-11-08T10:13:24.000Z | network.py | tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning | fb9b20181dd3e3273fcbc28144d60f01185ceffd | [
"MIT"
] | null | null | null | network.py | tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning | fb9b20181dd3e3273fcbc28144d60f01185ceffd | [
"MIT"
] | 7 | 2020-07-24T13:28:44.000Z | 2021-11-08T10:13:25.000Z | import os
import tensorflow as tf
from util import masked_softmax
| 46.88764 | 122 | 0.642104 |
d24d2defb1725aab6afee3638c1358468609f75a | 32,111 | py | Python | tests/test_reusable_executor.py | hoodmane/loky | 00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b | [
"BSD-3-Clause"
] | 153 | 2020-01-29T07:26:58.000Z | 2022-03-31T23:30:55.000Z | tests/test_reusable_executor.py | hoodmane/loky | 00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b | [
"BSD-3-Clause"
] | 98 | 2020-01-17T09:14:16.000Z | 2022-03-10T15:32:14.000Z | tests/test_reusable_executor.py | hoodmane/loky | 00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b | [
"BSD-3-Clause"
] | 22 | 2020-01-17T09:26:38.000Z | 2022-02-02T09:27:59.000Z | import os
import sys
import gc
import ctypes
import psutil
import pytest
import warnings
import threading
from time import sleep
from multiprocessing import util, current_process
from pickle import PicklingError, UnpicklingError
from distutils.version import LooseVersion
import loky
from loky import cpu_count
from loky import get_reusable_executor
from loky.process_executor import _RemoteTraceback, TerminatedWorkerError
from loky.process_executor import BrokenProcessPool, ShutdownExecutorError
from loky.reusable_executor import _ReusablePoolExecutor
import cloudpickle
from ._executor_mixin import ReusableExecutorMixin
from .utils import TimingWrapper, id_sleep, check_python_subprocess_call
from .utils import filter_match
cloudpickle_version = LooseVersion(cloudpickle.__version__)
# Compat windows
if sys.platform == "win32":
from signal import SIGTERM as SIGKILL
libc = ctypes.cdll.msvcrt
else:
from signal import SIGKILL
from ctypes.util import find_library
libc = ctypes.CDLL(find_library("c"))
try:
import numpy as np
except ImportError:
np = None
# Backward compat for python2 cPickle module
PICKLING_ERRORS = (PicklingError,)
try:
import cPickle
PICKLING_ERRORS += (cPickle.PicklingError,)
except ImportError:
pass
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, reg):
getattr(mod, reg).clear()
def wait_dead(worker, n_tries=1000, delay=0.001):
"""Wait for process pid to die"""
for i in range(n_tries):
if worker.exitcode is not None:
return
sleep(delay)
raise RuntimeError("Process %d failed to die for at least %0.3fs" %
(worker.pid, delay * n_tries))
def crash():
"""Induces a segfault"""
import faulthandler
faulthandler._sigsegv()
def exit():
"""Induces a sys exit with exitcode 0"""
sys.exit(0)
def c_exit(exitcode=0):
"""Induces a libc exit with exitcode 0"""
libc.exit(exitcode)
def sleep_then_check_pids_exist(arg):
"""Sleep for some time and the check if all the passed pids exist"""
time, pids = arg
sleep(time)
res = True
for p in pids:
res &= psutil.pid_exists(p)
return res
def kill_friend(pid, delay=0):
"""Function that send SIGKILL at process pid"""
sleep(delay)
try:
os.kill(pid, SIGKILL)
except (PermissionError, ProcessLookupError) as e:
if psutil.pid_exists(pid):
util.debug("Fail to kill an alive process?!?")
raise e
util.debug("process {} was already dead".format(pid))
def raise_error(etype=UnpicklingError, message=None):
"""Function that raises an Exception in process"""
raise etype(message)
def return_instance(cls):
"""Function that returns a instance of cls"""
return cls()
def do_nothing(arg):
"""Function that return True, test passing argument"""
return True
def test_deadlock_kill(self):
"""Test deadlock recovery for reusable_executor"""
executor = get_reusable_executor(max_workers=1, timeout=None)
# trigger the spawning of the worker process
executor.submit(sleep, 0.1)
worker = next(iter(executor._processes.values()))
with pytest.warns(UserWarning) as recorded_warnings:
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
os.kill(worker.pid, SIGKILL)
wait_dead(worker)
# wait for the executor to be able to detect the issue and set itself
# in broken state:
sleep(.5)
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
executor.submit(id_sleep, 42, 0.1).result()
# the get_reusable_executor factory should be able to create a new
# working instance
executor = get_reusable_executor(max_workers=2, timeout=None)
assert executor.submit(id_sleep, 42, 0.).result() == 42
| 38.687952 | 79 | 0.64112 |
d24e25a2e5e83961161f51930a9dbcf5a8859141 | 3,781 | py | Python | modules/common/parsers/timetable_parser.py | hgyoseo/hdmeal | f6f96c9190701b38eb6f08e2238f4f5214b95d3b | [
"MIT"
] | 2 | 2020-03-01T13:15:57.000Z | 2020-03-25T18:53:21.000Z | modules/common/parsers/timetable_parser.py | hgyoseo/hdmeal | f6f96c9190701b38eb6f08e2238f4f5214b95d3b | [
"MIT"
] | null | null | null | modules/common/parsers/timetable_parser.py | hgyoseo/hdmeal | f6f96c9190701b38eb6f08e2238f4f5214b95d3b | [
"MIT"
] | null | null | null | #
#
#
#
#
#
# Copyright 2019-2020, Hyungyo Seo
# timetable_parser.py - .
import datetime
import json
import os
import urllib.error
import urllib.request
from itertools import groupby
from modules.common import conf, log
#
NEIS_OPENAPI_TOKEN = conf.configs['Tokens']['NEIS'] # NEUS API
ATPT_OFCDC_SC_CODE = conf.configs['School']['NEIS']['ATPT_OFCDC_SC_CODE'] #
SD_SCHUL_CODE = conf.configs['School']['NEIS']['SD_SCHUL_CODE'] #
timetable = {}
#
if __name__ == "__main__":
print(parse(3, 11, 2019, 10, 25, "****DEBUG****", True))
| 37.81 | 122 | 0.524729 |
d24e88624ecd17dbeb714acc8fe1596a1a4493c1 | 34,597 | py | Python | gittle/gittle.py | justecorruptio/gittle | e046fe4731ebe4168884e51ac5baa26c79f0567d | [
"Apache-2.0"
] | 1 | 2016-09-10T15:21:30.000Z | 2016-09-10T15:21:30.000Z | gittle/gittle.py | justecorruptio/gittle | e046fe4731ebe4168884e51ac5baa26c79f0567d | [
"Apache-2.0"
] | null | null | null | gittle/gittle.py | justecorruptio/gittle | e046fe4731ebe4168884e51ac5baa26c79f0567d | [
"Apache-2.0"
] | null | null | null | # From the future
from __future__ import absolute_import
# Python imports
import os
import copy
import logging
from hashlib import sha1
from shutil import rmtree
from functools import partial, wraps
# Dulwich imports
from dulwich.repo import Repo as DulwichRepo
from dulwich.client import get_transport_and_path
from dulwich.index import build_index_from_tree, changes_from_tree
from dulwich.objects import Tree, Blob
from dulwich.server import update_server_info
# Funky imports
import funky
# Local imports
from gittle.auth import GittleAuth
from gittle.exceptions import InvalidRemoteUrl
from gittle import utils
# Exports
__all__ = ('Gittle',)
# Guarantee that a diretory exists
# Useful decorators
# A better way to do this in the future would maybe to use Mixins
def ref_walker(self, ref=None):
"""
Very simple, basic walker
"""
ref = ref or 'HEAD'
sha = self._commit_sha(ref)
return self.repo.revision_history(sha)
def branch_walker(self, branch):
branch = branch or self.DEFAULT_BRANCH
ref = self._format_ref_branch(branch)
return self.ref_walker(ref)
def commit_info(self, start=0, end=None, branch=None):
"""Return a generator of commits with all their attached information
"""
if not self.has_commits:
return []
commits = [utils.git.commit_info(entry) for entry in self.branch_walker(branch)]
if not end:
return commits
return commits[start:end]
def commits(self):
"""Return a list of SHAs for all the concerned commits
"""
return [commit['sha'] for commit in self.commit_info()]
# Generate a branch selector (used for pushing)
# Get the absolute path for a file in the git repo
# Get the relative path from the absolute path
def get_client(self, origin_uri=None, **kwargs):
# Get the remote URL
origin_uri = origin_uri or self.origin_uri
# Fail if inexistant
if not origin_uri:
raise InvalidRemoteUrl()
client_kwargs = {}
auth_kwargs = self.authenticator.kwargs()
client_kwargs.update(auth_kwargs)
client_kwargs.update(kwargs)
client_kwargs.update({
'report_activity': self.report_activity
})
client, remote_path = get_transport_and_path(origin_uri, **client_kwargs)
return client, remote_path
def push_to(self, origin_uri, branch_name=None, progress=None, progress_stderr=None):
selector = self._wants_branch(branch_name=branch_name)
client, remote_path = self.get_client(origin_uri, progress_stderr=progress_stderr)
return client.send_pack(
remote_path,
selector,
self.repo.object_store.generate_pack_contents,
progress=progress
)
# Like: git push
# Not recommended at ALL ... !!!
# Like: git pull
def _commit(self, committer=None, author=None, message=None, files=None, tree=None, *args, **kwargs):
if not tree:
# If no tree then stage files
modified_files = files or self.modified_files
logging.warning("STAGING : %s" % modified_files)
self.add(modified_files)
# Messages
message = message or self.DEFAULT_MESSAGE
author_msg = self._format_userinfo(author)
committer_msg = self._format_userinfo(committer)
return self.repo.do_commit(
message=message,
author=author_msg,
committer=committer_msg,
encoding='UTF-8',
tree=tree,
*args, **kwargs
)
def _tree_from_structure(self, structure):
# TODO : Support directories
tree = Tree()
for file_info in structure:
# str only
try:
data = file_info['data'].encode('ascii')
name = file_info['name'].encode('ascii')
mode = file_info['mode']
except:
# Skip file on encoding errors
continue
blob = Blob()
blob.data = data
# Store file's contents
self.repo.object_store.add_object(blob)
# Add blob entry
tree.add(
name,
mode,
blob.id
)
# Store tree
self.repo.object_store.add_object(tree)
return tree.id
# Like: git commmit -a
def commit_structure(self, name=None, email=None, message=None, structure=None, *args, **kwargs):
"""Main use is to do commits directly to bare repositories
For example doing a first Initial Commit so the repo can be cloned and worked on right away
"""
if not structure:
return
tree = self._tree_from_structure(structure)
user_info = {
'name': name,
'email': email,
}
return self._commit(
committer=user_info,
author=user_info,
message=message,
tree=tree,
*args,
**kwargs
)
# Push all local commits
# and pull all remote commits
"""
@property
@funky.transform(set)
def modified_staged_files(self):
"Checks if the file has changed since last commit"
timestamp = self.last_commit.commit_time
index = self.index
return [
f
for f in self.tracked_files
if index[f][1][0] > timestamp
]
"""
# Return a list of tuples
# representing the changed elements in the git tree
"""
@property
@funky.transform(set)
def modified_files(self):
return self.modified_staged_files | self.modified_unstaged_files
"""
# Like: git add
# Like: git rm
# Like: git mv
def checkout_all(self, commit_sha=None):
commit_sha = commit_sha or self.head
commit_tree = self._commit_tree(commit_sha)
# Rebuild index from the current tree
return self._checkout_tree(commit_tree)
def checkout(self, commit_sha=None, files=None):
"""Checkout only a select amount of files
"""
commit_sha = commit_sha or self.head
files = files or []
return self
def _to_commit(self, commit_obj):
"""Allows methods to accept both SHA's or dulwich Commit objects as arguments
"""
if isinstance(commit_obj, basestring):
return self.repo[commit_obj]
return commit_obj
def _commit_sha(self, commit_obj):
"""Extracts a Dulwich commits SHA
"""
if utils.git.is_sha(commit_obj):
return commit_obj
elif isinstance(commit_obj, basestring):
# Can't use self[commit_obj] to avoid infinite recursion
commit_obj = self.repo[commit_obj]
return commit_obj.id
def _blob_data(self, sha):
"""Return a blobs content for a given SHA
"""
return self[sha].data
# Get the nth parent back for a given commit
def get_parent_commit(self, commit, n=None):
""" Recursively gets the nth parent for a given commit
Warning: Remember that parents aren't the previous commits
"""
if n is None:
n = 1
commit = self._to_commit(commit)
parents = commit.parents
if n <= 0 or not parents:
# Return a SHA
return self._commit_sha(commit)
parent_sha = parents[0]
parent = self[parent_sha]
# Recur
return self.get_parent_commit(parent, n - 1)
def _commit_tree(self, commit_sha):
"""Return the tree object for a given commit
"""
return self[commit_sha].tree
def diff_working(self, ref=None, filter_binary=True):
"""Diff between the current working directory and the HEAD
"""
return utils.git.diff_changes_paths(
self.repo.object_store,
self.path,
self._changed_entries(ref=ref),
filter_binary=filter_binary
)
def get_commit_files(self, commit_sha, parent_path=None, is_tree=None, paths=None):
"""Returns a dict of the following Format :
{
"directory/filename.txt": {
'name': 'filename.txt',
'path': "directory/filename.txt",
"sha": "xxxxxxxxxxxxxxxxxxxx",
"data": "blablabla",
"mode": 0xxxxx",
},
...
}
"""
# Default values
context = {}
is_tree = is_tree or False
parent_path = parent_path or ''
if is_tree:
tree = self[commit_sha]
else:
tree = self[self._commit_tree(commit_sha)]
for mode, path, sha in tree.entries():
# Check if entry is a directory
if mode == self.MODE_DIRECTORY:
context.update(
self.get_commit_files(sha, parent_path=os.path.join(parent_path, path), is_tree=True, paths=paths)
)
continue
subpath = os.path.join(parent_path, path)
# Only add the files we want
if not(paths is None or subpath in paths):
continue
# Add file entry
context[subpath] = {
'name': path,
'path': subpath,
'mode': mode,
'sha': sha,
'data': self._blob_data(sha),
}
return context
def file_versions(self, path):
"""Returns all commits where given file was modified
"""
versions = []
commits_info = self.commit_info()
seen_shas = set()
for commit in commits_info:
try:
files = self.get_commit_files(commit['sha'], paths=[path])
file_path, file_data = files.items()[0]
except IndexError:
continue
file_sha = file_data['sha']
if file_sha in seen_shas:
continue
else:
seen_shas.add(file_sha)
# Add file info
commit['file'] = file_data
versions.append(file_data)
return versions
def _diff_between(self, old_commit_sha, new_commit_sha, diff_function=None, filter_binary=True):
"""Internal method for getting a diff between two commits
Please use .diff method unless you have very speciic needs
"""
# If commit is first commit (new_commit_sha == old_commit_sha)
# then compare to an empty tree
if new_commit_sha == old_commit_sha:
old_tree = Tree()
else:
old_tree = self._commit_tree(old_commit_sha)
new_tree = self._commit_tree(new_commit_sha)
return diff_function(self.repo.object_store, old_tree, new_tree, filter_binary=filter_binary)
def changes(self, *args, **kwargs):
""" List of changes between two SHAs
Returns a list of lists of tuples :
[
[
(oldpath, newpath), (oldmode, newmode), (oldsha, newsha)
],
...
]
"""
kwargs['diff_type'] = 'changes'
return self.diff(*args, **kwargs)
def add_ref(self, new_ref, old_ref):
self.repo.refs[new_ref] = self.repo.refs[old_ref]
self.update_server_info()
def remove_ref(self, ref_name):
# Returns False if ref doesn't exist
if not ref_name in self.repo.refs:
return False
del self.repo.refs[ref_name]
self.update_server_info()
return True
def create_branch(self, base_branch, new_branch, tracking=None):
"""Try creating a new branch which tracks the given remote
if such a branch does not exist then branch off a local branch
"""
# The remote to track
tracking = self.DEFAULT_REMOTE
# Already exists
if new_branch in self.branches:
raise Exception("branch %s already exists" % new_branch)
# Get information about remote_branch
remote_branch = os.path.sep.join([tracking, base_branch])
# Fork Local
if base_branch in self.branches:
base_ref = self._format_ref_branch(base_branch)
# Fork remote
elif remote_branch in self.remote_branches:
base_ref = self._format_ref_remote(remote_branch)
# TODO : track
else:
raise Exception("Can not find the branch named '%s' to fork either locally or in '%s'" % (base_branch, tracking))
# Reference of new branch
new_ref = self._format_ref_branch(new_branch)
# Copy reference to create branch
self.add_ref(new_ref, base_ref)
return new_ref
def remove_branch(self, branch_name):
ref = self._format_ref_branch(branch_name)
return self.remove_ref(ref)
def switch_branch(self, branch_name, tracking=None, create=None):
"""Changes the current branch
"""
if create is None:
create = True
# Check if branch exists
if not branch_name in self.branches:
self.create_branch(branch_name, branch_name, tracking=tracking)
# Get branch reference
branch_ref = self._format_ref_branch(branch_name)
# Change main branch
self.repo.refs.set_symbolic_ref('HEAD', branch_ref)
if self.is_working:
# Remove all files
self.clean_working()
# Add files for the current branch
self.checkout_all()
def clean_working(self):
"""Purges all the working (removes everything except .git)
used by checkout_all to get clean branch switching
"""
return self.clean()
def commit_ls(self, ref, subpath=None):
"""List a "directory" for a given commit
using the tree of that commit
"""
tree_sha = self._commit_tree(ref)
# Root path
if subpath in self.ROOT_PATHS or not subpath:
return self._get_fs_structure(tree_sha, depth=1)
# Any other path
return self._get_fs_structure_by_path(tree_sha, subpath)
def commit_file(self, ref, path):
"""Return info on a given file for a given commit
"""
name, info = self.get_commit_files(ref, paths=[path]).items()[0]
return info
def __hash__(self):
"""This is required otherwise the memoize function will just mess it up
"""
return hash(self.path)
# Alias to clone_bare
fork = clone_bare
log = commit_info
diff_count = changes_count
contributors = recent_contributors
| 29.394223 | 125 | 0.598144 |
d24ee59db0447d71e371a28fd126b436b147eeac | 992 | py | Python | testg.py | dcn01/AndroidDropFrameAnalysis | 630d75dc999a8d1e4eec71edc0a1220334166d0a | [
"MIT"
] | 2 | 2018-12-10T03:49:03.000Z | 2018-12-10T13:43:26.000Z | testg.py | dcn01/AndroidDropFrameAnalysis | 630d75dc999a8d1e4eec71edc0a1220334166d0a | [
"MIT"
] | null | null | null | testg.py | dcn01/AndroidDropFrameAnalysis | 630d75dc999a8d1e4eec71edc0a1220334166d0a | [
"MIT"
] | null | null | null | # fpsAllFrameRead = open("profileAllFrame.txt", "r")
# profileDataReadList =[]
# t = []
# for line in fpsAllFrameRead.readlines():
# profileDataReadList.append(line)
#
# for line in profileDataReadList:
# splitByComma = line.split(",")
# l = len(splitByComma)
# print str(l)
a = 34.4/(1000/60)
print str(a)
# fin = ""
# c = 0
# e = len(willBeInsertIntoSqlList)
# for tmplist in willBeInsertIntoSqlList:
# splitByT = tmplist.split("\t")
# if c==0:
# fin = fin +"{"
#
# if c==e -1:
# fin = fin+str(c)+":{\"Draw\":"+splitByT[1]+",\"Prepare\":"+splitByT[2]+",\"Process\":"+splitByT[3]+",\"Execute\":"+splitByT[4].strip()+"}}"
# else:
# fin = fin+str(c)+":{\"Draw\":"+splitByT[1]+",\"Prepare\":"+splitByT[2]+",\"Process\":"+splitByT[3]+",\"Execute\":"+splitByT[4].strip()+"},"
#
# c = c+1
# fin = "var person_data = "+fin+";\nvar svg_width = 88350;"
# dataWrite = open("./output/js/data.js", "w")
# dataWrite.write(fin)
| 31 | 149 | 0.5625 |
d24f47bb348b9648ed9893766e4cb276bd461df6 | 452 | py | Python | app/core/urls.py | vatsamail/django-profiles | d9738fcb129e4f50ecde28126f5ffcccdf1999e0 | [
"MIT"
] | 1 | 2019-05-24T14:22:04.000Z | 2019-05-24T14:22:04.000Z | app/core/urls.py | vatsamail/django-profiles | d9738fcb129e4f50ecde28126f5ffcccdf1999e0 | [
"MIT"
] | 9 | 2020-06-05T18:17:48.000Z | 2022-03-11T23:21:33.000Z | app/core/urls.py | vatsamail/django-profiles | d9738fcb129e4f50ecde28126f5ffcccdf1999e0 | [
"MIT"
] | 1 | 2018-06-22T05:54:58.000Z | 2018-06-22T05:54:58.000Z | from django.urls import include, path, re_path
from . import views
from django.contrib.auth.views import (
login,
logout,
password_reset,
password_reset_done,
password_reset_confirm,
password_reset_complete,
)
app_name = 'core'
urlpatterns = [
path('', views.HomeView.as_view(), name='home'),
re_path(r'friending/(?P<operation>.+)/(?P<pk>\d+)/$', views.friending, name='friend_unfriend'),
]
| 26.588235 | 99 | 0.64823 |
d250a6fd3bfdb7ab11ae4c2f8ffe9bfe5c487a4e | 745 | py | Python | Python/lab2/temp_convert_FtoC.py | varuneagle555/BSA-STEM-Merit-Badge-Week | 04da40973c99eb64184bb98b58d8bf87b337456c | [
"MIT"
] | 3 | 2016-03-22T07:05:35.000Z | 2021-01-08T21:46:32.000Z | Python/lab2/temp_convert_FtoC.py | varuneagle555/BSA-STEM-Merit-Badge-Week | 04da40973c99eb64184bb98b58d8bf87b337456c | [
"MIT"
] | null | null | null | Python/lab2/temp_convert_FtoC.py | varuneagle555/BSA-STEM-Merit-Badge-Week | 04da40973c99eb64184bb98b58d8bf87b337456c | [
"MIT"
] | 4 | 2017-02-10T22:21:18.000Z | 2022-02-20T01:06:25.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""temp_convert.py: Convert temperature F to C."""
# initialize looping variable, assume yes as first answer
continueYN = "Y"
while continueYN.upper() == "Y":
# get temperature input from the user, and prompt them for what we expect
degF = int(raw_input("Enter temperature in degrees Fahrenheit (F) to convert: "))
degC = (degF - 32) * 5/9
print "Temperature in degrees C is: {temp}".format(temp=degC)
# check for temperature below freezing...
if degC < 0:
print "Pack long underwear!"
# check for it being a very hot day...
if degF > 100:
print "Remember to hydrate!"
continueYN = raw_input("Would you like to enter another (Y/N)? ")
| 25.689655 | 86 | 0.64698 |
d25277187f27f31c782ae6f4bfb336436c74c318 | 2,197 | py | Python | test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | #!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import conf
from hummingbot.connector.exchange.wazirx.wazirx_api_order_book_data_source import WazirxAPIOrderBookDataSource
from hummingbot.connector.exchange.wazirx.wazirx_user_stream_tracker import WazirxUserStreamTracker
from hummingbot.connector.exchange.wazirx.wazirx_auth import WazirxAuth
import asyncio
from hummingbot.core.utils.async_utils import safe_ensure_future
import logging
import unittest
trading_pairs = ["BTC-INR", "ZRX-INR"]
if __name__ == "__main__":
main()
| 35.435484 | 115 | 0.690942 |
d252d60d44fc7e878fae2a2e799df7cff950fbd9 | 597 | py | Python | setup.py | jaspershen/getDB | 6f767279775e201f9505bb1e98dd141ffe0335f7 | [
"MIT"
] | null | null | null | setup.py | jaspershen/getDB | 6f767279775e201f9505bb1e98dd141ffe0335f7 | [
"MIT"
] | null | null | null | setup.py | jaspershen/getDB | 6f767279775e201f9505bb1e98dd141ffe0335f7 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(name='getDB',
version='0.0.4',
description="This module can be used to download HMDB and KEGG database.",
license='MIT',
author='Xiaotao Shen',
author_email='shenxt1990@163.com',
url='https://github.com/jaspershen/getDB',
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=['requests', 'pandas', 'bs4', 'numpy'],
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
) | 35.117647 | 80 | 0.624791 |
d2536eb6f6ea1a24212cca3d6076bd9bd30877a3 | 7,706 | py | Python | lib/pts2angmap.py | samsafadi/PointRCNN | 761d4cadb3e634dc0994f2e95318240c37fbb485 | [
"MIT"
] | 1 | 2020-11-16T20:11:26.000Z | 2020-11-16T20:11:26.000Z | lib/pts2angmap.py | samsafadi/PointRCNN | 761d4cadb3e634dc0994f2e95318240c37fbb485 | [
"MIT"
] | null | null | null | lib/pts2angmap.py | samsafadi/PointRCNN | 761d4cadb3e634dc0994f2e95318240c37fbb485 | [
"MIT"
] | null | null | null |
"""
modified from sparsify.py file. This file gnerate angle map of [H=64,W=1024,4] from velodyne lidar bins
To run this:
python3 pts2angmap.py --calib_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/calib/'\
--image_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/image_2/' --ptc_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/velodyne/'\
--split_file '/root/gdrive/My Drive/PointRCNN/data/KITTI/ImageSets/train.txt' --output_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/angle_map/' --W 1024 --slice 1 --H 64
git config --global user.email "zhaoguangyuan@ucla.edu"
git config --global user.name "zhaoguangyuan123"
"""
import argparse
import os.path as osp
import time
import numpy as np
import torch
from tqdm.auto import tqdm
from data_utils.kitti_object import *
from data_utils.kitti_util import rotz, Calibration, load_image, load_velo_scan
from multiprocessing import Process, Queue, Pool
def pto_ang_map(data_idx, velo_points, H=64, W=512, slice=1, line_spec=None,
get_lines=False, fill_in_line=None, fill_in_spec=None,
fill_in_slice=None):
"""
:param H: the row num of depth map, could be 64(default), 32, 16
:param W: the col num of depth map
:param slice: output every slice lines
"""
dtheta = np.radians(0.4 * 64.0 / H)
dphi = np.radians(90.0 / W)
x, y, z, i = velo_points[:, 0], velo_points[:,
1], velo_points[:, 2], velo_points[:, 3]
# print('velo_points', velo_points[:4])
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W - 1
theta = np.radians(2.) - np.arcsin(z / d)
theta_ = (theta / dtheta).astype(int)
# print('theta_', theta_.shape)
# print('theta_', theta_[:100])
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H - 1
depth_map = - np.ones((H, W, 4))
depth_map[theta_, phi_, 0] = x
depth_map[theta_, phi_, 1] = y
depth_map[theta_, phi_, 2] = z
depth_map[theta_, phi_, 3] = i
if fill_in_line is not None:
if fill_in_spec is not None:
depth_map[fill_in_spec] = fill_in_line
else:
depth_map[::fill_in_slice, :, :] = fill_in_line
if line_spec is not None:
depth_map = depth_map[line_spec, :, :]
else:
depth_map = depth_map[::slice, :, :]
if get_lines:
depth_map_lines = depth_map.copy()
# print('depth_map', depth_map.shape)
# # imageio.imwrite(depth_dir + '/' + data_idx+'.png', depth_map)
# np.save(args.output_path + str(data_idx)+ '.npy', depth_map)
# print(args.output_path + '/' + str(data_idx)+ '.npy')
# print('Finish Depth Map {}'.format(data_idx))
return depth_map
if __name__ == '__main__':
parser = argparse.ArgumentParser("Generate sparse pseudo-LiDAR points")
parser.add_argument('--calib_path', type=str,
help='path to calibration files')
parser.add_argument('--image_path', type=str,
help='path to image files')
parser.add_argument('--ptc_path', type=str,
help='path to point cloud files')
parser.add_argument('--output_path', type=str,
help='path to sparsed point cloud files')
parser.add_argument('--slice', default=1, type=int)
parser.add_argument('--H', default=64, type=int)
parser.add_argument('--W', default=1024, type=int)
parser.add_argument('--D', default=700, type=int)
parser.add_argument('--store_line_map_dir', type=str, default=None)
parser.add_argument('--line_spec', type=int, nargs='+', default=None)
parser.add_argument('--fill_in_map_dir', type=str, default=None)
parser.add_argument('--fill_in_spec', type=int,
nargs='+', default=None)
parser.add_argument('--fill_in_slice', type=int, default=None)
parser.add_argument('--split_file', type=str)
parser.add_argument('--threads', type=int, default=4)
args = parser.parse_args()
gen_sparse_points_all(args)
| 34.401786 | 194 | 0.611601 |
d2537e3317890ddaef34e1cff80e0e43d3fa3866 | 13,481 | py | Python | testsuite/conversion.py | buganini/bsdconv | 7830f4ebef9b04f9877a21f24a7705a48a4812c4 | [
"BSD-2-Clause"
] | 33 | 2015-01-25T12:04:04.000Z | 2021-12-12T23:16:55.000Z | testsuite/conversion.py | buganini/bsdconv | 7830f4ebef9b04f9877a21f24a7705a48a4812c4 | [
"BSD-2-Clause"
] | 14 | 2015-11-19T20:52:39.000Z | 2021-06-15T03:18:31.000Z | testsuite/conversion.py | buganini/bsdconv | 7830f4ebef9b04f9877a21f24a7705a48a4812c4 | [
"BSD-2-Clause"
] | 5 | 2016-07-27T15:30:39.000Z | 2020-07-06T11:52:15.000Z | # -*- coding: utf-8 -*-
import sys
import urllib
from bsdconv import Bsdconv
iotest=[
["big5:utf-8","\xa5\x5c\x5c\xaf\xe0","\"],
["big5-5c,big5:utf-8","\xa5\x5c\x5c\xaf\xe0",""],
["utf-8:big5-5c,big5","","\xa5\x5c\x5c\xaf\xe0"],
["_cp950:utf-8","\xa5\x5c\xaf\xe0",""],
["utf-8:_cp950,ascii","",""],
["utf-8:_uao250,ascii","","\x95\xed"],
["utf-8:big5,cp950-trans","","\xb4\xfa\xb8\xd5"],
["ascii,3f:ascii","testtest","test??????test"],
["ascii,any#0137:ascii","testtest","test777777test"],
["utf-8:ascii,3f","testtest","test??test"],
["utf-8:ascii,any#38","testtest","test88test"],
["utf-8:uao250|_cp950,ascii,3f:utf-8","","??"],
["utf-8:uao250|_cp950,ascii,sub:utf-8","",""],
["cns11643:utf-8","1234\x00\x01\x60\x41\x00\x01\x66\x5cabcd","1234abcd"],
["utf-8:cns11643","1234abcd","1234\x00\x01\x60\x41\x00\x01\x66\x5cabcd"],
["ansi-control,utf-8:split:bsdconv-keyword,bsdconv","a\033[1mb","0161,1B5B316D,0162,"],
["ascii-named-html-entity:utf-8","ü",""],
["ascii-numeric-html-entity:utf-8","測試",""],
["utf-8:ascii-hex-numeric-html-entity","\n","測
"],
["utf-8:ascii-dec-numeric-html-entity","\n","測 "],
["utf-8:ascii-named-html-entity","","Ç"],
["bsdconv:utf-8","016e2c",""],
["bsdconv:utf-8","016e2c,018a66",""],
["utf-8:bsdconv","\n","016E2C010A"],
["utf-8:pass","\n","\x01\x6e\x2c\x01\x0a"],
["utf-8:raw","\n","\x6e\x2c\x8a\x66\x0a"],
["bsdconv-keyword,utf-8:bsdconv-keyword,bsdconv|bsdconv-keyword,bsdconv:bsdconv-keyword,utf-8",",\t,\n",",\t,\n"],
["byte:byte","\xaa\xbb\xcc\xdd","\xaa\xbb\xcc\xdd"],
["escape:utf-8","%u6e2c",""],
["escape:split:bsdconv-keyword,bsdconv","%u6e2c%e8%a9%a6","016E2C,03E8,03A9,03A6,"],
["escape:pass#mark&for=unicode,byte|pass#unmark,utf-8:utf-8","%u6e2c%e8%a9%a6",""],
["escape,utf-8:pass#mark&for=unicode,byte|pass#unmark,big5:utf-8","%u6e2c%b8%d5",""],
["escape,ascii-numeric-html-entity,utf-8:pass#mark&for=unicode,byte|pass#unmark,big5:utf-8","%u6e2c%b8%d5功",""],
["escape:pass#mark&for=unicode,byte|pass#unmark,utf-8:utf-8","\\346\\270\\254\\350\\251\\246",""],
["utf-8:ascii,ascii-escaped-unicode","test","test\\u6E2C\\u8A66"],
["utf-8:ascii-html-cns11643-img","","<img class=\"cns11643_img\" src=\"http://www.cns11643.gov.tw/AIDB/png.do?page=1&code=6041\" />"],
["utf-8:ascii-html-info","\n","<a href=\"http://www.cns11643.gov.tw/AIDB/query_general_view.do?page=1&code=6041\"><img src=\"http://www.cns11643.gov.tw/AIDB/png.do?page=1&code=6041\" /></a><a href=\"http://www.fileformat.info/info/unicode/char/0A/index.htm\"><img class=\"unicode_img\" src=\"http://www.unicode.org/cgi-bin/refglyph?24-A\" /></a>"],
["utf-8:ascii-html-unicode-img","","<img class=\"unicode_img\" src=\"http://www.unicode.org/cgi-bin/refglyph?24-6E2C\" />"],
["utf-8:null","blah",""],
["utf-8:ambiguous-pad:utf-8"," 2"," 2"],
["utf-8:ambiguous-unpad:utf-8"," 2"," 2"],
["ansi-control,byte:big5-defrag:byte,ansi-control|skip,big5:split:bsdconv-keyword,bsdconv","\xaf\033[1m\xe0","0180FD,1B5B316D,"],
["utf-8:chewing:utf-8","abcxyz","abcxyz"],
["utf-8:chewing:han-pinyin:utf-8","","ce4shi4"],
["utf-8:kana-phonetic:utf-8","","doraemon"],
["ascii:alias-from:ascii","BIG5","UAO250"],
["ascii:alias-from:ascii","UAO250","ASCII,_UAO250"],
["ascii:alias-from:ascii","LOCALE","UTF-8"],
["ascii:alias-from:ascii","UTF-8","ASCII,_UTF-8"],
["ascii:alias-to:ascii","BIG5","CP950"],
["ascii:alias-to:ascii","CP950","_CP950,ASCII"],
["utf-8:cns11643:split:bsdconv-keyword,bsdconv","","02016041,0201665C,"],
["bsdconv:unicode:split:bsdconv-keyword,bsdconv","02016041,0201665C","016E2C,018A66,"],
["utf-8:upper:utf-8","testTEST","TESTTEST"],
["utf-8:lower:utf-8","testTEST","testtest"],
["utf-8:full:utf-8","testTEST1234",""],
["utf-8:half:utf-8","","testTEST1234"],
["utf-8:upsidedown:utf-8","FUNNY",""],
["utf-8:unix:utf-8","a\r\nb","a\nb"],
["utf-8:mac:utf-8","a\r\nb","a\rb"],
["utf-8:win:utf-8","a\nb","a\r\nb"],
["utf-8:nl2br:utf-8","a\nb","a<br />b"],
["utf-8:trim-width#22&ambi-as-wide:utf-8","",""],
["utf-8:trim-width#22:utf-8","",""],
["utf-8:trim-width#10&ambiguous-as-wide:utf-8","32",""],
["utf-8:zh-strings:utf-8","abdefghij","\n\n"],
["utf-8:zhcn:utf-8","",""],
["utf-8:zhtw:utf-8","",""],
["utf-8:zhtw:zhtw-words:utf-8","",""],
["utf-8:whitespace-derail:zhtw:zhtw-words:whitespace-rerail:utf-8"," "," "],
["utf-8:zh-decomp:zh-comp:utf-8","",""],
["utf-8:ibm-37","EBCDIC test","\xc5\xc2\xc3\xc4\xc9\xc3\x40\xa3\x85\xa2\xa3"],
["utf-8:ibm-37|ibm-37:utf-8","EBCDIC test","EBCDIC test"],
["utf-8:ibm-930|ibm-930:utf-8","",""],
["utf-8:ibm-933|ibm-933:utf-8","",""],
["utf-8:ibm-935|ibm-935:utf-8","",""],
["utf-8:ibm-937|ibm-937:utf-8","",""],
["utf-8:ibm-939|ibm-939:utf-8","",""],
["utf-8:gb18030|gb18030:utf-8","",""],
["utf-8:ascii,escape#for=unicode&mode=16&prefix=2575","ab","%u6E2Ca%u8A66b%u597D"],
["utf-8:big5|ascii,byte:ascii,escape#for=byte&mode=hex&prefix=5c78","ab","\\xB4\\xFAa\\xB8\\xD5b\\xA6n"],
["utf-8:big5|ascii,byte:ascii,escape#for=byte&mode=oct&prefix=5c","ab","\\264\\372a\\270\\325b\\246n"],
["utf-8:big5,pass#for=unicode&mark|pass#unmark,ascii,byte:ascii,url","test","%B4%FAtest%u5586%B8%D5"],
["utf-8:ascii,escape#for=unicode&prefix=2623&mode=10&suffix=3b","test","測test喆試"],
["utf-8:upper:utf-8","a","A"],
["utf-8:lower:utf-8","A","a"],
["utf-8:nfd:utf-8","","a"],
["utf-8:nfc:utf-8","a",""],
["utf-8:nfkd:utf-8","","aDza"],
["utf-8:nfkc:utf-8","a","Da"],
["ascii,any#019644.012F:utf-8","AB","A///B"],
["utf-8:pass,zh-decomp:insert#after=002c:bsdconv-keyword,bsdconv","","014E0D,015927,014E0D,018981,"],
["utf-8:pass#limit=2,zh-decomp:insert#after=002c:bsdconv-keyword,bsdconv","","014E0D,015927,048D,040107,0476,"],
["bsdconv:nfd:_nf-order:insert#after=002c:bsdconv-keyword,bsdconv","011e9b,010323","01017F,010323,010307,"],
["utf-8:_nf-hangul-decomposition:utf-8","",""],
["utf-8:casefold:utf-8","Ab","abssss"],
["utf-8:replace#0142.0143=0132.0133:utf-8","ABCD","A23D"],
["utf-8:strings#min-len=2:utf-8","aababcabcd","ab\nabc\nabcd\n"],
["utf-8:strings#min-len=2&before=0128&after=0129.010a:utf-8","aababcabcd","(ab)\n(abc)\n(abcd)\n"],
["utf-8:whitespace-derail:zhtw:zhtw-words:whitespace-rerail:utf-8"," "," "],
["fallback-unicode:insert#after=002c:bsdconv-keyword,bsdconv", "\xe8","01E8,"],
["cp950-uda:insert#after=002c:bsdconv-keyword,bsdconv", "\xfa\x40\xfe\xfe\x8e\x40\xa0\xfe\x81\x40\x8d\xfe\xc6\xa1\xc8\xfe", "01E000,01E310,01E311,01EEB7,01EEB8,01F6B0,01F6B1,01F848,"],
["_utf-8:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xB0\x80", ""],
["_utf-8#cesu:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xB0\x80", "01010400,"],
["_utf-8#loose:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xB0\x80", "01D801,01DC00,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81", "013F,013F,013F,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80", "013F,013F,013F,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xe9\x99\x84", "013F,013F,013F,019644,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xe9\x99\x84", "013F,013F,013F,019644,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xe9\x99\x84", "01D801,019644,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xe9\x99\x84", "01DC00,019644,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xe9\x99\x84", "01D801,019644,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xe9\x99\x84", "01DC00,019644,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xA0\x81", "01D801,01D801,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xED\xB0\x80", "01DC00,01DC00,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xA0\x81", "01D801,01D801,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xED\xB0\x80", "01DC00,01DC00,"],
["_utf-8:insert#after=002c:bsdconv-keyword,bsdconv", "\xf0\x80\x80\xaf", ""],
["_utf-8#overlong:insert#after=002c:bsdconv-keyword,bsdconv", "\xf0\x80\x80\xaf", "012F,"],
["_utf-8#super:insert#after=002c:bsdconv-keyword,bsdconv", "\xf8\x80\x80\x80\xaf", ""],
["_utf-8#super&overlong:insert#after=002c:bsdconv-keyword,bsdconv", "\xf8\x80\x80\x80\xaf", "012F,"],
["_utf-8#super,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc1\xbf,\xe0\x9f\xbf,\xf0\x8f\xbf\xbf,\xf8\x87\xbf\xbf\xbf,\xfc\x83\xbf\xbf\xbf\xbf", "013F,013F,012C,013F,013F,013F,012C,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,013F,"],
["_utf-8#super&overlong,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc1\xbf,\xe0\x9f\xbf,\xf0\x8f\xbf\xbf,\xf8\x87\xbf\xbf\xbf,\xfc\x83\xbf\xbf\xbf\xbf", "017F,012C,0107FF,012C,01FFFF,012C,011FFFFF,012C,0103FFFFFF,"],
["_utf-8#overlong,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc1\xbf,\xe0\x9f\xbf,\xf0\x8f\xbf\xbf,\xf8\x87\xbf\xbf\xbf,\xfc\x83\xbf\xbf\xbf\xbf", "017F,012C,0107FF,012C,01FFFF,012C,013F,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,013F,"],
["_utf-8,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc0\x80,\xe0\x80\x80,\xf0\x80\x80\x80,\xf8\x80\x80\x80\x80,\xfc\x80\x80\x80\x80\x80", "013F,013F,012C,013F,013F,013F,012C,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,013F,"],
["_utf-8#nul&overlong&super,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc0\x80,\xe0\x80\x80,\xf0\x80\x80\x80,\xf8\x80\x80\x80\x80,\xfc\x80\x80\x80\x80\x80", "0100,012C,0100,012C,0100,012C,0100,012C,0100,"],
]
countertest=[
["utf-8:width:null","123",{"FULL":2,"AMBI":1,"HALF":3}],
["utf-8:count:null","123",{"COUNT":6}],
["utf-8:count#blah:null","123",{"BLAH":6}],
["utf-8:count#for=lala&for=cjk:null","123abc",{"COUNT":2}],
]
passed=True
for c, i, o in iotest:
p=Bsdconv(c)
if not p:
print(Bsdconv.error())
print("Test failed at %s" % repr([c, i, o]))
del p
passed=False
continue
r=p.conv(i)
if o != r:
print("Test failed at %s" % repr([c, i, o]))
print("expected(%d): %s" % (len(o), repr(o)))
print("result(%d): %s" % (len(r), repr(r)))
passed=False
del p
for c, d, i in countertest:
p=Bsdconv(c)
if not p:
print(Bsdconv.error())
print("Test failed at %s" % repr([c, i, o]))
passed=False
continue
p.conv(d)
r=p.counter()
for k in i:
if i[k] != r[k]:
print("Test failed at %s" % repr([c, d, i]))
print("expected: %s" % repr(i))
print("result: %s" % repr(r))
passed=False
del p
url=""
f_map=open("tmp/map.txt")
for l in f_map:
l=l.strip().split("\t")
if l[0]=="NormalizationTest.txt":
url=l[1]
break
nt=open("tmp/NormalizationTest.txt")
toSRC=Bsdconv("bsdconv:insert#after=002c:bsdconv-keyword,bsdconv")
toNFC=Bsdconv("bsdconv:nfc:insert#after=002c:bsdconv-keyword,bsdconv")
toNFD=Bsdconv("bsdconv:nfd:insert#after=002c:bsdconv-keyword,bsdconv")
toNFKC=Bsdconv("bsdconv:nfkc:insert#after=002c:bsdconv-keyword,bsdconv")
toNFKD=Bsdconv("bsdconv:nfkd:insert#after=002c:bsdconv-keyword,bsdconv")
print("Normalization Tests: #"+url)
ln = 0
for l in nt:
ln += 1
if not l:
continue
if l[0]=="#":
continue
if l[0]=="@":
print("\t"+l.strip())
continue
c1,c2,c3,c4,c5,comment=l.strip().split(";",5)
c1=bnf(c1)
c2=bnf(c2)
c3=bnf(c3)
c4=bnf(c4)
c5=bnf(c5)
nftest=[
#NFC
[toSRC.conv(c2), toNFC.conv(c1), "c2 == toNFC(c1)"],
[toNFC.conv(c1), toNFC.conv(c2), "toNFC(c1) == toNFC(c2)"],
[toNFC.conv(c2), toNFC.conv(c3), "toNFC(c2) == toNFC(c3)"],
[toSRC.conv(c4), toNFC.conv(c4), "c4 == toNFC(c4)"],
[toNFC.conv(c4), toNFC.conv(c5), "toNFC(c4) == toNFC(c5)"],
#NFD
[toSRC.conv(c3), toNFD.conv(c1), "c3 == toNFD(c1)"],
[toNFD.conv(c1), toNFD.conv(c2), "toNFD(c1) == toNFD(c2)"],
[toNFD.conv(c2), toNFD.conv(c3), "toNFD(c2) == toNFD(c3)"],
[toSRC.conv(c5), toNFD.conv(c4), "c5 == toNFD(c4)"],
[toNFD.conv(c4), toNFD.conv(c5), "toNFD(c4) == toNFD(c5)"],
#NFKC
[toSRC .conv(c4), toNFKC.conv(c1), "c4 == toNFKC(c1)"],
[toNFKC.conv(c1), toNFKC.conv(c2), "toNFKC(c1) == toNFKC(c2)"],
[toNFKC.conv(c2), toNFKC.conv(c3), "toNFKC(c2) == toNFKC(c3)"],
[toNFKC.conv(c3), toNFKC.conv(c4), "toNFKC(c3) == toNFKC(c4)"],
[toNFKC.conv(c4), toNFKC.conv(c5), "toNFKC(c4) == toNFKC(c5)"],
#NFKD
[toSRC .conv(c5), toNFKD.conv(c1)," c5 == toNFKD(c1)"],
[toNFKD.conv(c1), toNFKD.conv(c2), "toNFKD(c1) == toNFKD(c2)"],
[toNFKD.conv(c2), toNFKD.conv(c3), "toNFKD(c2) == toNFKD(c3)"],
[toNFKD.conv(c3), toNFKD.conv(c4), "toNFKD(c3) == toNFKD(c4)"],
[toNFKD.conv(c4), toNFKD.conv(c5), "toNFKD(c4) == toNFKD(c5)"],
]
for a,b,desc in nftest:
if a!=b:
print ln, "Failed: ", desc, a, "!=", b, comment
print("Conversion tests finished.")
| 53.284585 | 350 | 0.656925 |
d2554278f5d4ba5a87659a474ac65fdd8acaa5a1 | 2,488 | py | Python | apps/cloud/odc/apps/cloud/thredds_to_tar.py | robbibt/odc-tools | e2df2c9ef65dbd5652d97cd88617989b4b724814 | [
"Apache-2.0"
] | null | null | null | apps/cloud/odc/apps/cloud/thredds_to_tar.py | robbibt/odc-tools | e2df2c9ef65dbd5652d97cd88617989b4b724814 | [
"Apache-2.0"
] | null | null | null | apps/cloud/odc/apps/cloud/thredds_to_tar.py | robbibt/odc-tools | e2df2c9ef65dbd5652d97cd88617989b4b724814 | [
"Apache-2.0"
] | null | null | null | import tarfile
import click
import requests
from odc.io.tar import tar_mode, add_txt_file
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from urllib.parse import urlparse
from thredds_crawler.crawl import Crawl
if __name__ == '__main__':
cli()
| 33.621622 | 115 | 0.663987 |
d25543f2eb84e1a829ecf2a781633ed4850daa4c | 599 | py | Python | examples/ec2/tests/config.py | dabble-of-devops-biodeploy/terraform-aws-batch | 9d075163821f81f33d6be767820d1db20b45eb8e | [
"Apache-2.0"
] | 3 | 2021-12-07T18:10:16.000Z | 2022-02-04T09:15:31.000Z | examples/ec2/tests/config.py | dabble-of-devops-biodeploy/terraform-aws-batch | 9d075163821f81f33d6be767820d1db20b45eb8e | [
"Apache-2.0"
] | null | null | null | examples/ec2/tests/config.py | dabble-of-devops-biodeploy/terraform-aws-batch | 9d075163821f81f33d6be767820d1db20b45eb8e | [
"Apache-2.0"
] | 1 | 2022-02-22T01:48:38.000Z | 2022-02-22T01:48:38.000Z | DATA_S3 = "bioanalyze-ec2-test-nf-rnaseq-06o3qdtm7v"
JOB_S3 = DATA_S3
# These come from the terraform code in auto-deployment/terraform
ECR = "dabbleofdevops/nextflow-rnaseq-tutorial"
COMPUTE_ENVIRONMENT = "bioanalyze-ec2-test-nf-rnaseq"
JOB_DEF_NAME = "bioanalyze-ec2-test-nf-rnaseq"
JOB_QUEUE_NAME = "bioanalyze-ec2-test-nf-rnaseq-default-job-queue"
JOB_ROLE = "arn:aws:iam::018835827632:role/bioanalyze-ec2-test-nf-rnaseq-batch_execution_role"
SECRET_NAME = "bioanalyze-ec2-test-nf-rnaseq"
SECRET_ARN = "arn:aws:secretsmanager:us-east-1:018835827632:secret:bioanalyze-ec2-test-nf-rnaseq-Zg7kMY" | 49.916667 | 104 | 0.806344 |
d255a8c98ce6037d15065ccd226fd922085a64a0 | 4,067 | py | Python | adios-1.9.0/wrappers/numpy/example/utils/ncdf2bp.py | swatisgupta/Adaptive-compression | b97a1d3d3e0e968f59c7023c7367a7efa9f672d0 | [
"BSD-2-Clause"
] | null | null | null | adios-1.9.0/wrappers/numpy/example/utils/ncdf2bp.py | swatisgupta/Adaptive-compression | b97a1d3d3e0e968f59c7023c7367a7efa9f672d0 | [
"BSD-2-Clause"
] | null | null | null | adios-1.9.0/wrappers/numpy/example/utils/ncdf2bp.py | swatisgupta/Adaptive-compression | b97a1d3d3e0e968f59c7023c7367a7efa9f672d0 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Example:
$ python ./ncdf2bp.py netcdf_file
"""
from adios import *
from scipy.io import netcdf
import numpy as np
import sys
import os
import operator
if len(sys.argv) < 2:
usage()
sys.exit(0)
##fname = "MERRA100.prod.assim.tavg3_3d_mst_Cp.19791010.SUB.nc"
fname = sys.argv[1]
fout = '.'.join(fname.split('.')[:-1]) + ".bp"
tname = "time"
if len(sys.argv) > 2:
tname = sys.argv[2]
## Open NetCDF file
f = netcdf.netcdf_file(fname, 'r')
## Check dimension
assert (all(map(lambda x: x is not None,
[ val for k, val in f.dimensions.items()
if k != tname])))
## Two types of variables : time-dependent or time-independent
dimvar = {n:v for n,v in f.variables.items() if n in f.dimensions.keys()}
var = {n:v for n,v in f.variables.items() if n not in f.dimensions.keys()}
tdepvar = {n:v for n,v in var.items() if tname in v.dimensions}
tindvar = {n:v for n,v in var.items() if tname not in v.dimensions}
## Time dimension
if len(tdepvar) > 0:
assert (len(set([v.dimensions.index(tname) for v in tdepvar.values()]))==1)
tdx = tdepvar.values()[0].dimensions.index(tname)
assert (all([v.data.shape[tdx] for v in tdepvar.values()]))
tdim = tdepvar.values()[0].shape[tdx]
else:
tdim = 1
## Init ADIOS without xml
init_noxml()
allocate_buffer(BUFFER_ALLOC_WHEN.NOW, 100)
gid = declare_group ("group", tname, FLAG.YES)
select_method (gid, "POSIX1", "verbose=3", "")
d1size = 0
for name, val in f.dimensions.items():
if name == tname:
continue
print "Dimension : %s (%d)" % (name, val)
define_var (gid, name, "", DATATYPE.integer, "", "", "")
d1size += 4
"""
d2size = 0
for name, var in dimvar.items():
if name == tname:
continue
if name in f.dimensions.keys():
name = "v_" + name
print "Dim variable : %s (%s)" % (name, ','.join(var.dimensions))
define_var (gid, name, "", np2adiostype(var.data.dtype.type),
','.join(var.dimensions),
"",
"")
d2size += var.data.size * var.data.dtype.itemsize
"""
v1size = 0
for name, var in tindvar.items():
print "Variable : %s (%s)" % (name, ','.join(var.dimensions))
define_var (gid, name, "", np2adiostype(var.data.dtype.type),
','.join(var.dimensions),
"",
"")
v1size += var.data.size * var.data.dtype.itemsize
v2size = 0
for name, var in tdepvar.items():
print "Variable : %s (%s)" % (name, ','.join(var.dimensions))
define_var (gid, name, "", np2adiostype(var.data.dtype.type),
','.join(var.dimensions),
','.join([dname for dname in var.dimensions
if dname != tname]),
"0,0,0")
v2size += var.data.size * var.data.dtype.itemsize / tdim
## Clean old file
if os.access(fout, os.F_OK):
os.remove(fout)
for it in range(tdim):
print
print "Time step : %d" % (it)
fd = open("group", fout, "a")
groupsize = d1size + v1size + v2size
set_group_size(fd, groupsize)
for name, val in f.dimensions.items():
if name == tname:
continue
print "Dimension writing : %s (%d)" % (name, val)
write_int(fd, name, val)
for name, var in tindvar.items():
try:
arr = np.array(var.data,
dtype=var.data.dtype.type)
print "Time independent variable writing : %s %s" % (name, arr.shape)
write(fd, name, arr)
except ValueError:
print "Skip:", name
for name, var in tdepvar.items():
try:
arr = np.array(var.data.take([it], axis=tdx),
dtype=var.data.dtype)
print "Time dependent variable writing : %s %s" % (name, arr.shape)
write(fd, name, arr)
except ValueError:
print "Skip:", name
close(fd)
f.close()
finalize()
print
print "Done. Saved:", fout
| 27.666667 | 81 | 0.572904 |
d256dc1971a485e302633a36903b74f4a74ac3ab | 2,322 | py | Python | airflow/operators/hive_operator.py | nirmeshk/airflow | 4556450b88ef7682a006e9125131a5bb3a91df00 | [
"Apache-2.0"
] | 1 | 2021-03-02T20:08:53.000Z | 2021-03-02T20:08:53.000Z | airflow/operators/hive_operator.py | nirmeshk/airflow | 4556450b88ef7682a006e9125131a5bb3a91df00 | [
"Apache-2.0"
] | null | null | null | airflow/operators/hive_operator.py | nirmeshk/airflow | 4556450b88ef7682a006e9125131a5bb3a91df00 | [
"Apache-2.0"
] | null | null | null | import logging
import re
from airflow.hooks import HiveCliHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
| 33.652174 | 77 | 0.656331 |
d2571cfece71be4e3c7267fd9fb5b654ad0b459f | 1,042 | py | Python | classification/prepare_model.py | JSC-NIIAS/TwGoA4aij2021 | 9f011f506748435190f8e4e635820c8208144b94 | [
"MIT"
] | null | null | null | classification/prepare_model.py | JSC-NIIAS/TwGoA4aij2021 | 9f011f506748435190f8e4e635820c8208144b94 | [
"MIT"
] | null | null | null | classification/prepare_model.py | JSC-NIIAS/TwGoA4aij2021 | 9f011f506748435190f8e4e635820c8208144b94 | [
"MIT"
] | null | null | null | import os
import yaml
import segmentation_models_pytorch as smp
import torch
import argparse
import torch.nn as nn
import timm
from model_wrapper import Classification_model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--hyp', type=str, default='configs/baseline_signal.yaml', help='hyperparameters path')
opt = parser.parse_args()
prepare_model(opt)
| 41.68 | 177 | 0.764875 |
d257693b9fe3b1d9ae0d3ac5245b8412f3de31ea | 8,259 | py | Python | KarpuzTwitterApp/logic.py | bounswe/bounswe2018group5 | d547820bfb3070da3e3935a64429e4c45aef6098 | [
"MIT"
] | 10 | 2018-03-18T20:33:39.000Z | 2021-03-03T07:37:33.000Z | KarpuzTwitterApp/logic.py | bounswe/bounswe2018group5 | d547820bfb3070da3e3935a64429e4c45aef6098 | [
"MIT"
] | 254 | 2018-02-07T15:52:26.000Z | 2019-01-08T04:11:47.000Z | KarpuzTwitterApp/logic.py | bounswe/bounswe2018group5 | d547820bfb3070da3e3935a64429e4c45aef6098 | [
"MIT"
] | 5 | 2018-03-01T13:28:45.000Z | 2021-05-24T11:07:06.000Z | from requests import get
from utils.TwitterService import TwitterService
import tweepy
from decouple import config
def get_tweets_with_location_and_query(search_params):
""" Searches all tweets that are in the given location and contains a query string. """
if 'geocode' not in search_params:
return {'response': False, 'errors': 'Parameter must contain geocode'}
if 'result_type' not in search_params:
return {'response': False, 'errors': 'Parameter must contain result_type'}
if 'q' not in search_params:
return {'response': False, 'errors': 'Parameter must containe query(q)'}
if 'count' not in search_params:
return {'response': False, 'errors': 'Parameter must containe count'}
geocode = search_params['geocode']
result_type = search_params['result_type']
count = search_params['count']
if geocode == '' or len(geocode.split(',')) != 3:
return {
'response': False,
'errors': "GeoCode must include three values lat/long/distance. Distance must include km."
}
lat = geocode.split(',')[0]
long = geocode.split(',')[1]
perimeter = geocode.split(',')[2]
if 'km' != perimeter[-2:]:
return {
'response': False,
'errors': "Distance must include km."
}
try:
perimeter_float = float(perimeter[:-2])
if perimeter_float <= 0:
raise ValueError
except ValueError:
return {
'response': False,
'errors': "Distance must be positive float."
}
try:
float(lat)
float(long)
except ValueError:
return {
'response': False,
'errors': "Lat and Long must be float."
}
if result_type not in ['popular', 'recent', 'mixed']:
return {'response': False, 'errors': "Result type must be in ['popular', 'recent', 'mixed']."}
if type(count) is not int and not count.isdigit():
return {'response': False, 'errors': "Count must be integer."}
else:
count = int(count)
if count not in [25, 50, 100]:
return {'response': False, 'errors': "Count type must be in [25, 50, 100]."}
search_url = '{}1.1/search/tweets.json'.format(TwitterService().get_base_url())
search_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
# If response code different than 200 (means success), then return the error.
if search_response.status_code != 200:
return {'response': False, 'errors': search_response.json()['errors']}
# Subtracts the tweets from the twitter response
tweet_data = search_response.json()
tweets = tweet_data['statuses']
return {'response': True, 'tweets': tweets}
def search_tweets(query):
""" Searches all tweets that are in the given location and contains a query string. """
search_url = '{}1.1/search/tweets.json'.format(TwitterService().get_base_url())
search_params = {
'q' : query,
'count' : 20
}
search_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
# If response code different than 200 (means success), then return the error.
if search_response.status_code != 200:
return {'response': False, 'errors': search_response.json()['errors']}
# Subtracts the tweets from the twitter response
tweet_data = search_response.json()
tweets = tweet_data['statuses']
return {'response': True, 'tweets': tweets}
| 36.544248 | 128 | 0.683134 |
d258b7f764b2791ef696f1cad34e04a51316c183 | 4,511 | py | Python | StepperComms.py | MicaelJarniac/StepperComms | 53336a3733c1b5bb30b3d001b7fe3414f9c3fab9 | [
"MIT"
] | null | null | null | StepperComms.py | MicaelJarniac/StepperComms | 53336a3733c1b5bb30b3d001b7fe3414f9c3fab9 | [
"MIT"
] | null | null | null | StepperComms.py | MicaelJarniac/StepperComms | 53336a3733c1b5bb30b3d001b7fe3414f9c3fab9 | [
"MIT"
] | null | null | null | # Required imports
import os
import sys
import serial
import time
sys.path.append(os.path.dirname(os.path.expanduser('~/projects/Python-Playground/Debug'))) # Update path accordingly
from Debug.Debug import Debug
# Declare debug
debug = Debug(True, 3).prt # Simplifies debugging messages
# Message building blocks
RW_CMD = 0x80 # Validation check
TRANSFER_SIZE_MASK = 0x3f # Masks bits used for transfer size
BYTE_MASK = 0xff # Masks 1 byte
RW_MASK = 0x40 # Bit used for defining if 'read' or 'write' command type
READ = 1 # Command of type 'read'
WRITE = 0 # 'write'
ID_AMOUNT = 38 # Amount of remote variables
# Message size
CMD_ADDR_SIZE = 1
CMD_INFO_SIZE = 1 + CMD_ADDR_SIZE # 1 byte (basic info & transfer size) + 1 byte (address)
CMD_DATA_SIZE = 61 # 61 bytes (data)
CMD_BUFF_SIZE = CMD_INFO_SIZE + CMD_DATA_SIZE # Command info + command data
# Message buffer and related
OutCmdBuffer = [None] * CMD_BUFF_SIZE # Initializes the buffer with given size
# TODO Remove not used var
OutCmdBufferId = 0 # Holds the current buffer position
# Message parameters
CmdType = WRITE # Command type ('read' or 'write')
CmdSize = 0 # size
CmdAddr = 0 # address
CmdData = [None] * CMD_DATA_SIZE # data
# Serial configuration parameters
SerPort = "/dev/serial0" # Device
SerBaud = 9600 # Baud rate
SerTout = 1 # Timeout
SerDelay = 0.05 # Delay between quick writes
# Declare serial
ser = serial.Serial(
port = SerPort, # Serial port configurable above
baudrate = SerBaud, # Baudrate configurable above
bytesize = serial.EIGHTBITS, # Byte size hardcoded 8 bits
parity = serial.PARITY_NONE, # Parity hardcoded no parity
stopbits = serial.STOPBITS_TWO, # Stop bits hardcoded 2 stopbits
timeout = SerTout, # Timeout configurable above
xonxoff = False, # ? hardcoded false
rtscts = False, # ? hardcoded false
dsrdtr = False, # ? hardcoded false
write_timeout = SerTout, # Write timeout configurable above
inter_byte_timeout = None) # ? hardcoded none
# Remote variables
RemoteVars = [None] * ID_AMOUNT # Stores received variables
# TODO Read message
# Main loop
while True:
# Clear serial in and out buffers
ser.reset_input_buffer()
ser.reset_output_buffer()
# Placeholders
CmdType = WRITE
CmdSize = 1
CmdAddr = 31
CmdData[0] = 0x1
BuildMessage()
SendMessage()
debug("\n")
| 38.228814 | 144 | 0.552871 |