hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1be7ab6f787e652d44d15533e2b5246954d6801d | 932 | py | Python | tests/test_parse_icao24bit.py | Collen-Roller/arp | 08eaa2dda3adb1dbd600597a6d03603669c8e06d | [
"MIT"
] | 2 | 2020-10-28T17:03:14.000Z | 2021-01-27T10:44:33.000Z | tests/test_parse_icao24bit.py | Collen-Roller/arp | 08eaa2dda3adb1dbd600597a6d03603669c8e06d | [
"MIT"
] | 8 | 2020-12-08T16:42:43.000Z | 2020-12-29T00:41:33.000Z | tests/test_parse_icao24bit.py | Collen-Roller/arp | 08eaa2dda3adb1dbd600597a6d03603669c8e06d | [
"MIT"
] | 1 | 2020-12-09T20:35:52.000Z | 2020-12-09T20:35:52.000Z | import unittest
from flydenity import Parser
if __name__ == "__main__":
unittest.main()
| 35.846154 | 133 | 0.713519 |
1be82da5cbe879b6b36fe90dd23217980058a69e | 465 | py | Python | ever/util/_main.py | Bobholamovic/ever | f38060674a40ed53072b9d9be99cc656a830398f | [
"Apache-2.0"
] | 22 | 2021-08-21T00:13:18.000Z | 2022-03-28T19:38:10.000Z | ever/util/_main.py | Bobholamovic/ever | f38060674a40ed53072b9d9be99cc656a830398f | [
"Apache-2.0"
] | 2 | 2021-09-01T06:28:38.000Z | 2021-12-06T07:17:57.000Z | ever/util/_main.py | Bobholamovic/ever | f38060674a40ed53072b9d9be99cc656a830398f | [
"Apache-2.0"
] | 6 | 2021-08-21T06:32:47.000Z | 2022-02-10T07:41:29.000Z | import os
| 19.375 | 56 | 0.597849 |
1bea69b9a810613a8cdcc7d4cd5f8e74e2b87b61 | 687 | py | Python | resthelper/tests/test_build_url.py | rklonner/resthelper | c129a7ff3efb5447aeb9794142c4d640261d962d | [
"MIT"
] | null | null | null | resthelper/tests/test_build_url.py | rklonner/resthelper | c129a7ff3efb5447aeb9794142c4d640261d962d | [
"MIT"
] | null | null | null | resthelper/tests/test_build_url.py | rklonner/resthelper | c129a7ff3efb5447aeb9794142c4d640261d962d | [
"MIT"
] | null | null | null | import unittest
from resthelper.utils import build_restful_url
if __name__ == '__main__':
unittest.main() | 32.714286 | 68 | 0.622999 |
1beb0ef06d9c6f7de745f499f7af1a9f705e4a88 | 929 | py | Python | sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 8944b7d276f91b019ad6aa2e7e29324fa107fa01 | [
"MIT"
] | null | null | null | sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 8944b7d276f91b019ad6aa2e7e29324fa107fa01 | [
"MIT"
] | null | null | null | sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 8944b7d276f91b019ad6aa2e7e29324fa107fa01 | [
"MIT"
] | null | null | null | """ python-rq based backend
This backend will send your messages asynchronously with python-rq.
Before using this backend, make sure that django-rq is installed and
configured.
Usage
-----
In settings.py
SENDSMS_BACKEND = 'sendsms.backends.rq.SmsBackend'
RQ_SENDSMS_BACKEND = 'actual.backend.to.use.SmsBackend'
"""
from sendsms.api import get_connection
from sendsms.backends.base import BaseSmsBackend
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django_rq import job
RQ_SENDSMS_BACKEND = getattr(settings, 'RQ_SENDSMS_BACKEND', None)
if not RQ_SENDSMS_BACKEND:
raise ImproperlyConfigured('Set RQ_SENDSMS_BACKEND')
| 22.119048 | 68 | 0.787944 |
1bed3f78be12183f03bd98f78582fb16d8457339 | 2,435 | py | Python | venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #Autogenerated schema
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
String,
Bool,
Sequence,
)
from openpyxl.descriptors.excel import CellRange
| 25.904255 | 64 | 0.631622 |
1bee0a3b08699aa37d40800889d795e3cdf9fb23 | 2,918 | py | Python | cwbot/kolextra/request/ItemDescriptionRequest.py | zeryl/RUcwbot | 734716506066da599fcbc96d0a815a5e30f6e077 | [
"BSD-3-Clause"
] | null | null | null | cwbot/kolextra/request/ItemDescriptionRequest.py | zeryl/RUcwbot | 734716506066da599fcbc96d0a815a5e30f6e077 | [
"BSD-3-Clause"
] | 1 | 2019-04-15T02:48:19.000Z | 2019-04-15T03:02:36.000Z | cwbot/kolextra/request/ItemDescriptionRequest.py | rlbond86/cwbot | 2432a9c9d048b7600b53d5cb8f7ef608c6613258 | [
"BSD-3-Clause"
] | null | null | null | from kol.request.GenericRequest import GenericRequest
from kol.manager import PatternManager
import re
| 42.911765 | 100 | 0.675805 |
1beeb9bf708d482300442a926d31325bbdca0e33 | 619 | py | Python | SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 074c6e1a854fdfc21fb292e575a869719d56c5d5 | [
"Apache-2.0"
] | 24 | 2018-03-15T09:00:51.000Z | 2022-03-17T05:19:47.000Z | SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 074c6e1a854fdfc21fb292e575a869719d56c5d5 | [
"Apache-2.0"
] | 8 | 2020-01-20T15:44:42.000Z | 2021-10-18T05:39:04.000Z | SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 074c6e1a854fdfc21fb292e575a869719d56c5d5 | [
"Apache-2.0"
] | 22 | 2018-06-04T20:36:41.000Z | 2022-03-16T17:10:44.000Z | import json
import sys
def compatible_loads(json_data):
"""
Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3)
"""
if isinstance(json_data, bytes) and (3, 0) <= sys.version_info < (3, 6):
json_data = json_data.decode("utf-8")
return json.loads(json_data)
def get_massage_from_io_error(error):
"""
:param: IOError
:return: error message
"""
if sys.version_info >= (3, 0):
return error.strerror
else:
return error.message
| 24.76 | 92 | 0.610662 |
1bef48d1f47271bb3d6c33f78c3cf6b32220029d | 3,578 | py | Python | VokeScan.py | DaduVoke/VokeScan | a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c | [
"MIT"
] | 2 | 2021-12-05T04:00:50.000Z | 2022-03-24T17:53:26.000Z | VokeScan.py | DaduVoke/VokeScan | a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c | [
"MIT"
] | null | null | null | VokeScan.py | DaduVoke/VokeScan | a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c | [
"MIT"
] | null | null | null | import sys,time
from colorama import Fore, Back, Style
sprint (Fore.RED + ". tool- -DaduVoke- @2021")
import socket
import _thread
import time
try:
scan = Core()
scan.GetData(input(" IP URL\n"))
print(bcolors.WARNING,":",Core.mode,"\n :",Core.ipurl,"\n :",Core.network_speed,bcolors.ENDC)
print(bcolors.BOLD," ...",bcolors.ENDC)
for count in range(0,Core.mode):
time.sleep(Core.network_speed)
_thread.start_new_thread(scan.Start_Scan, (count,count+1))
if count > Core.mode:
exit(0)
except Exception as e:
print (e)
| 18.162437 | 139 | 0.488262 |
1bef4c913e56949ae48100d1d528ebecb2bb01d8 | 53,296 | py | Python | agent/src/clacks/agent/objects/object.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | 2 | 2015-01-26T07:15:19.000Z | 2015-11-09T13:42:11.000Z | agent/src/clacks/agent/objects/object.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | agent/src/clacks/agent/objects/object.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | # This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
The object base class.
"""
import copy
import zope.event
import pkg_resources
import os
from lxml import etree
from lxml.builder import E
from logging import getLogger
from zope.interface import Interface, implements
from clacks.common import Environment
from clacks.common.utils import N_, is_uuid
from clacks.common.components import PluginRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.agent.exceptions import ObjectException
# Status
STATUS_OK = 0
STATUS_CHANGED = 1
# Register the errors handled by us
C.register_codes(dict(
CREATE_NEEDS_BASE=N_("Creation of '%(location)s' lacks a base DN"),
READ_BACKEND_PROPERTIES=N_("Error reading properties for backend '%(backend)s'"),
ATTRIBUTE_BLOCKED_BY=N_("Attribute is blocked by %(source)s==%(value)s"),
ATTRIBUTE_READ_ONLY=N_("Attribute is read only"),
ATTRIBUTE_MANDATORY=N_("Attribute is mandatory"),
ATTRIBUTE_INVALID_CONSTANT=N_("Value is invalid - expected one of %(elements)s"),
ATTRIBUTE_INVALID_LIST=N_("Value is invalid - expected a list"),
ATTRIBUTE_INVALID=N_("Value is invalid - expected value of type '%(type)s'"),
ATTRIBUTE_CHECK_FAILED=N_("Value is invalid"),
ATTRIBUTE_NOT_UNIQUE=N_("Value is not unique (%(value)s)"),
ATTRIBUTE_NOT_FOUND=N_("Attribute not found"),
OBJECT_MODE_NOT_AVAILABLE=N_("Mode '%(mode)s' is not available for base objects"),
OBJECT_MODE_BASE_AVAILABLE=N_("Mode '%(mode)s' is only available for base objects"),
OBJECT_NOT_SUB_FOR=N_("Object of type '%(ext)s' cannot be added as to the '%(base)s' container"),
OBJECT_REMOVE_NON_BASE_OBJECT=N_("Cannot remove non base object"),
OBJECT_MOVE_NON_BASE_OBJECT=N_("Cannot move non base object"),
OBJECT_BASE_NO_RETRACT=N_("Base object cannot be retracted"),
FILTER_INVALID_KEY=N_("Invalid key '%(key)s' for filter '%(filter)s'"),
FILTER_MISSING_KEY=N_("Missing key '%(key)s' after processing filter '%(filter)s'"),
FILTER_NO_LIST=N_("Filter '%(filter)s' did not return a %(type)s value - a list was expected"),
ATTRIBUTE_DEPEND_LOOP=N_("Potential loop in attribute dependencies")
))
from clacks.agent.objects.proxy import ObjectProxy
| 38.287356 | 158 | 0.564001 |
1bef7a1aa389a58d40ce648d1ed75a0579e889d3 | 8,752 | py | Python | tests/test_benchmark.py | fossabot/BIRL | 62e91523ac5797a13a7b78b9869ccfdf61cc60d8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_benchmark.py | fossabot/BIRL | 62e91523ac5797a13a7b78b9869ccfdf61cc60d8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_benchmark.py | fossabot/BIRL | 62e91523ac5797a13a7b78b9869ccfdf61cc60d8 | [
"BSD-3-Clause"
] | null | null | null | """
Testing default benchmarks in single thred and parallel configuration
Check whether it generates correct outputs and resulting values
Copyright (C) 2017-2019 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import argparse
import logging
import os
import shutil
import sys
import unittest
try: # python 3
from unittest.mock import patch
except ImportError: # python 2
from mock import patch
import numpy as np
import pandas as pd
from numpy.testing import assert_raises, assert_array_almost_equal
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.utilities.data_io import update_path, save_config_yaml
from birl.utilities.dataset import args_expand_parse_images
from birl.utilities.experiments import parse_arg_params, try_decorator
from birl.benchmark import ImRegBenchmark
from birl.bm_template import BmTemplate
PATH_ROOT = os.path.dirname(update_path('birl'))
PATH_DATA = update_path('data-images')
PATH_CSV_COVER_MIX = os.path.join(PATH_DATA, 'pairs-imgs-lnds_mix.csv')
PATH_CSV_COVER_ANHIR = os.path.join(PATH_DATA, 'pairs-imgs-lnds_histol.csv')
# logging.basicConfig(level=logging.INFO)
| 38.725664 | 114 | 0.63654 |
1bf02d45108f641ace7558443cc9e030c46ebd2f | 65 | py | Python | python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | b6c5494196842f3c273965063815ad222a18b4da | [
"MIT"
] | 9 | 2021-09-03T06:20:48.000Z | 2022-03-19T12:43:30.000Z | python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | b6c5494196842f3c273965063815ad222a18b4da | [
"MIT"
] | null | null | null | python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | b6c5494196842f3c273965063815ad222a18b4da | [
"MIT"
] | 6 | 2021-08-16T01:13:36.000Z | 2022-03-19T12:44:10.000Z | from .errors_exception_handling import errors_exception_handling
| 32.5 | 64 | 0.923077 |
1bf0738223b67b02abba6e6aa0c92e93cd84b652 | 470 | py | Python | mtstub.py | shimniok/rockblock | 7a84d7da7df34c2dbe1a288fb6de24558eb4485f | [
"MIT"
] | 1 | 2020-05-30T01:29:06.000Z | 2020-05-30T01:29:06.000Z | mtstub.py | shimniok/rockblock | 7a84d7da7df34c2dbe1a288fb6de24558eb4485f | [
"MIT"
] | 1 | 2017-10-16T03:30:55.000Z | 2018-01-14T19:05:43.000Z | mtstub.py | shimniok/rockblock | 7a84d7da7df34c2dbe1a288fb6de24558eb4485f | [
"MIT"
] | 1 | 2019-08-05T10:31:46.000Z | 2019-08-05T10:31:46.000Z | #!/usr/bin/env python
##################################################################################################
## mtstub.py
##
## emulates rockblock api so I don't have to burn credits testing...
##################################################################################################
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import config
print "Content-type: plain/text"
print
form = cgi.FieldStorage()
print "OK,12345"
| 24.736842 | 98 | 0.417021 |
1bf2d4c209e500db17a5c6d33e7442b5b858b75b | 343 | py | Python | sum.py | PraghadeshManivannan/Built-in-Functions-Python | a3120641e03e7be8e1408dd467997ad6fdf04d87 | [
"MIT"
] | null | null | null | sum.py | PraghadeshManivannan/Built-in-Functions-Python | a3120641e03e7be8e1408dd467997ad6fdf04d87 | [
"MIT"
] | null | null | null | sum.py | PraghadeshManivannan/Built-in-Functions-Python | a3120641e03e7be8e1408dd467997ad6fdf04d87 | [
"MIT"
] | null | null | null |
#sum(iterable, start=0, /)
#Return the sum of a 'start' value (default: 0) plus an iterable of numbers
#When the iterable is empty, return the start value.
'''This function is intended specifically for use with numeric values and may
reject non-numeric types.'''
a = [1,3,5,7,9,4,6,2,8]
print(sum(a))
print(sum(a,start = 4))
| 24.5 | 78 | 0.676385 |
1bf4cd25d9e85b2b0cb4131798b2cd2ef33b36d7 | 10,926 | py | Python | idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 112 | 2019-02-11T23:16:36.000Z | 2022-03-23T20:59:57.000Z | idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 621 | 2019-03-01T14:44:12.000Z | 2022-03-31T19:49:25.000Z | idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 154 | 2019-02-01T23:46:33.000Z | 2022-03-23T15:07:10.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
# === BASIC QUERY METHODS
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
| 43.185771 | 114 | 0.507139 |
1bf4f3ec8b611663d899f073f4f41ae66286507f | 12,055 | py | Python | elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 8e17212a26539dfd79b414ffe8f243a906d32149 | [
"MIT"
] | null | null | null | elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 8e17212a26539dfd79b414ffe8f243a906d32149 | [
"MIT"
] | null | null | null | elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 8e17212a26539dfd79b414ffe8f243a906d32149 | [
"MIT"
] | null | null | null | # coding: utf8
"""
Ortholog Based Bait Design Script for creating Elateridae ortholog based baits suitable submission to myBaits
Compares t_coffee AA alignment scores with nucleotide tranalignments to find conserved blocks
Author Jackson Eyres jackson.eyres@canada.ca
License: MIT
Copywright: Government of Canada
"""
import glob
import os
from Bio import AlignIO, SeqIO
import time
import argparse
import random
def main():
"""
Main Function to run Staphylinidae Bait Designer
:return:
"""
parser = argparse.ArgumentParser(description='Processes T_Coffee AA alignments to generate a ortholog bait set')
parser.add_argument('-o', type=str, required=True,
help='Output Directory')
parser.add_argument('-i', type=str, required=True,
help='T_Coffee Directory containing aa based .score_ascii files')
parser.add_argument('-n', type=str, required=True,
help='Directory containing tranalign nucleotide alignments')
# parser.add_argument('-p', type=str, required=True,
# help='Priorities File for Staphylinidae')
args = parser.parse_args()
print("Starting Staphylinidae Ortholog Bait Design".format(args.o))
print(args.o, args.i, args.n)
dict_of_max_sums = longest_exon_length(args.i)
sum_file = write_sums(args.o, dict_of_max_sums)
blocks_dir = extract_conserved_blocks(sum_file, args.n, args.o)
window_ranges = [600]
for window in window_ranges:
filtered_blocks_dir = filter_blocks(blocks_dir, args.o, window)
processed_blocks_dir = filtered_blocks_dir
# Original was going to stagger tile the baits, but bait manufacturer inherently does this
# tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window)
# processed_blocks_dir = tiled_blocks_dir
merge_baits(processed_blocks_dir, args.o, "Elateridae", window)
def extract_conserved_blocks(sum_file, alignment_directory, results_directory):
"""
Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to
Extract out a conserved block
:param sum_file:
:param alignment_directory:
:param results_directory:
:return: Output Directory of conserved blocks
"""
output_directory = os.path.join(results_directory, "conserved_blocks")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(sum_file) as f:
lines = f.readlines()
lines.pop(0)
for line in lines:
list_of_seqs = []
split = line.rstrip().split(",")
name = split[0].replace(".aa.summarized.score_ascii", "_tranaligned.fa")
window_range = int(split[2])*3
index = int(split[3])*3
file_path = os.path.join(alignment_directory, name)
if os.path.isfile(file_path):
with open(file_path) as g:
alignments = AlignIO.read(g, "fasta")
for alignment in alignments:
list_of_seqs.append(alignment[index:index + window_range])
orthogroup = split[0].split(".")[0]
file_name = "{}_block.fasta".format(orthogroup)
file_path = os.path.join(output_directory, file_name)
with open(file_path, "w") as h:
for seq in list_of_seqs:
h.write(seq.format("fasta"))
return output_directory
def longest_exon_length(directory):
"""
Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is
highly conserved, and sorts by the degree of conservation into an output file
:param directory: Directory of T_coffee results (containing score_ascii and aln files)
:return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400
"""
increments = [150, 200]
increments_rev = increments[::-1]
dict_of_max_sums = {}
files = glob.glob(os.path.join(directory, "*.score_ascii"))
count = 0
for file in files:
count += 1
if count % 100 == 0:
print(count)
# Scans an alignment and converts the cons string of numbers into a continous list of numbers
number_string = ""
with open(file) as f:
number_of_specimens = f.read().count(":") - 4
f.seek(0)
if number_of_specimens < 5:
print("Skipping {} Due to Low Specimen Count".format(file))
continue
for line in f:
if line.startswith("cons") and ":" not in line:
number = line.rstrip().split(" ")[-1]
number_string += number
number_list = [int(i) for i in number_string]
# Scans number list for sequence containing the highest window range of conserved bases within 95% of max
# TCS score for said window range aka 9*Window Range
# Sort the list so the highest score block within the window range is first. If the window range
# has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease
# window range and try again
for window_range in increments_rev:
list_of_sums = []
if len(number_list) > window_range:
for i in range(0, len(number_list) - window_range):
the_sum = sum(number_list[i:i + window_range])
list_of_sums.append((the_sum, window_range, i))
sorted_list = sorted(list_of_sums, reverse=True, key=lambda element: (element[0]))
if float(sorted_list[0][0]) >= float(9 * window_range * .95):
if os.path.basename(file) not in dict_of_max_sums:
dict_of_max_sums[os.path.basename(file)] = sorted_list[0]
break
return dict_of_max_sums
def write_sums(directory, dict_of_max_sums):
"""
Writes the dictionary of all ortholog T_coffee scores/sums to csv file
:param directory:
:param dict_of_max_sums:
:return:
"""
if not os.path.exists(directory):
os.makedirs(directory)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = "Conserved_Exons_Sums_{}.csv".format(timestr)
file_path = os.path.join(directory, file_name)
# Sorts dictionary into a list by score sum and then window length
sorted_x = sorted(dict_of_max_sums.items(), reverse=True, key=lambda x: (x[1][0], x[1][1]))
print("Writing T_Coffee score analysis to {}".format(file_path))
with open(file_path, "w") as f:
f.write("Orthogroup,Sum,Window,Index\n")
for entry in sorted_x:
f.write("{},{},{},{}\n".format(entry[0], entry[1][0], entry[1][1], entry[1][2]))
return file_path
def filter_blocks(directory, results_dir, window):
"""
Filters blocks generated by longest exon length and write sum functions based on various criteria
:param directory: Directory of fasta blocks to filter
:param results_dir: Parent Result Folder
:param window: Minimum length of a conserved block in basepairs
:return: Output Directory of filtered blocks
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "filtered_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
total_seq_length = 0
total_after_gap_removal = 0
total_sequences = 0
gene_count = 0
# For each block/file extract out sequences that meet the following critiera:
# Part of Priority List = 1
# Minimum Length of Window size in basepairs
# Gaps represent less than 20% of sequence
# Block contains atleast 5 sequences from priority list = 1
for fasta in fastas:
seqs = []
with open(fasta) as f:
file_name = os.path.basename(fasta).replace(".fasta", "_filtered.fasta")
for seq in SeqIO.parse(f, 'fasta'):
gaps = seq.seq.count("-")
gap_percent = float(gaps / len(seq.seq))
if gap_percent > 0.20:
pass
else:
if len(seq.seq) >= window:
seqs.append(seq)
if len(seqs) < 5:
pass
else:
gene_count += 1
# Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs
random.shuffle(seqs)
seqs = seqs[:3]
total_sequences += len(seqs)
for seq in seqs:
total_seq_length += len(seq.seq)
seq.seq = seq.seq.ungap(gap="-")
total_after_gap_removal += len(seq.seq)
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
print("Total Genes: {}, "
"Total Sequences: {}, "
"Total Length in bp: {}, "
"After Gap Removal: {}".format(gene_count, total_sequences, total_seq_length, total_after_gap_removal))
return output_dir
def tile_blocks(directory, results_dir, window):
"""
Takes a prefiltered block generated by the filtered_blocks function and tiles each bait
The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other
:param directory:
:param results_dir:
:param window:
:return:
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "tiled_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for fasta in fastas:
seqs = []
with open(fasta) as f:
count = 0
for seq in SeqIO.parse(f, 'fasta'):
seq.description = ""
# Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time
count += 1
if count == 1:
pass
if count == 2:
seq.seq = seq.seq[40:]
if count == 3:
seq.seq = seq.seq[80:]
count = 0
seqs.append(seq)
file_name = os.path.basename(fasta).replace("_block_filtered", "_block_tiled")
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
def merge_baits(directory, results_dir, prefix, window):
"""
Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but
using biopython ensures each fasta entry is formatted correctly
:param directory: Input directory of fastas
:param results_dir: Output Parent directory
:param prefix: Name of the output file
:param window:
:return:
"""
output_dir = os.path.join(results_dir, "final_baits")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fastas = glob.glob(os.path.join(directory, "*.fasta"))
seqs = []
total_dna = 0
total_seqs = 0
total_orthologs = 0
for fasta in fastas:
if total_dna > 3900000:
break
total_orthologs += 1
with open(fasta) as f:
for seq in SeqIO.parse(f, 'fasta'):
total_seqs += 1
total_dna += len(seq.seq)
seq.description = ""
seqs.append(seq)
file_name = "{}-{}-final-baits.fasta".format(prefix, window)
new_file = os.path.join(output_dir, file_name)
print("Bait File {} "
"with Total Orthologs {}, "
"Total Seqs {}, Total_Dna {} bp".format(new_file, total_orthologs, total_seqs, total_dna))
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
if __name__ == "__main__":
main()
| 36.41994 | 117 | 0.616093 |
1bf638f00910b809a7d45e1aeabdb75e4e5aef9c | 1,361 | py | Python | poilab.py | octeufer/Annotate_Optimize | 32d9cecc0159882d3f962990aba07168c4a023f5 | [
"Apache-2.0"
] | null | null | null | poilab.py | octeufer/Annotate_Optimize | 32d9cecc0159882d3f962990aba07168c4a023f5 | [
"Apache-2.0"
] | null | null | null | poilab.py | octeufer/Annotate_Optimize | 32d9cecc0159882d3f962990aba07168c4a023f5 | [
"Apache-2.0"
] | null | null | null | import sys
import numpy as np
sys.path.append("d:/data/annooptimize")
import triangle
import time
tinternal = list()
| 38.885714 | 87 | 0.722998 |
1bf69ac1479d462fb413d5e64a7b2f979173894e | 5,091 | py | Python | t_core/tc_python/xrule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | t_core/tc_python/xrule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | t_core/tc_python/xrule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z |
from util.infinity import INFINITY
from tc_python.arule import ARule
from t_core.rollbacker import Rollbacker
from t_core.resolver import Resolver
| 37.433824 | 111 | 0.575329 |
1bf74b762d2902af1c8ee402ce83c52345c29025 | 5,266 | py | Python | tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
] | 84 | 2018-06-02T02:00:53.000Z | 2022-03-13T12:17:42.000Z | tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
] | 3 | 2018-10-31T00:28:31.000Z | 2020-05-12T01:06:53.000Z | tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
] | 13 | 2018-09-14T20:37:51.000Z | 2021-03-23T09:24:49.000Z | from unittest import TestCase
from propara.commonsense.semantic_lexicon_knowledge.ai2_lexicon import AI2Lexicon, AI2LexiconPredicate, AI2LexiconArg, AI2LexiconIndications, \
AI2LexiconPattern
| 71.162162 | 143 | 0.681732 |
1bf7f1bc739f582663b9e33d97b9d4189cae0d04 | 473 | py | Python | fitbit/__init__.py | erichilarysmithsr/python-fitbit | 38cf916d0318aedc91b31d15431fa9c49a13d15f | [
"Apache-2.0"
] | null | null | null | fitbit/__init__.py | erichilarysmithsr/python-fitbit | 38cf916d0318aedc91b31d15431fa9c49a13d15f | [
"Apache-2.0"
] | null | null | null | fitbit/__init__.py | erichilarysmithsr/python-fitbit | 38cf916d0318aedc91b31d15431fa9c49a13d15f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Fitbit API Library
------------------
:copyright: 2012-2015 ORCAS.
:license: BSD, see LICENSE for more details.
"""
from .api import Fitbit, FitbitOauthClient, FitbitOauth2Client
# Meta.
__title__ = 'fitbit'
__author__ = 'Issac Kelly and ORCAS'
__author_email__ = 'bpitcher@orcasinc.com'
__copyright__ = 'Copyright 2012-2015 ORCAS'
__license__ = 'Apache 2.0'
__version__ = '0.1.3'
__release__ = '0.1.3'
# Module namespace.
all_tests = []
| 18.192308 | 62 | 0.684989 |
1bf7f576395a0ca86f448e1c60010a3d363f6af6 | 468 | py | Python | bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
] | 1 | 2021-09-08T16:39:07.000Z | 2021-09-08T16:39:07.000Z | bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
] | null | null | null | bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
] | null | null | null | from django.urls import include, path
from exchange.api.views import LatestOrdersListAPIView, OrderViewSet, ProfileAPIView
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'orders', OrderViewSet, basename='orders')
urlpatterns = [
path('profile/', ProfileAPIView.as_view(), name='profile-detail'),
path('orders/latest/', LatestOrdersListAPIView.as_view(), name='orders-latest'),
path('', include(router.urls))
]
| 36 | 84 | 0.767094 |
1bf8ddafa4dc0ba6cd6a406c255c3270696943bb | 848 | py | Python | kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | a0c7bad675bd3fff97f99c3e2b49f19a1fef7640 | [
"MIT"
] | null | null | null | kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | a0c7bad675bd3fff97f99c3e2b49f19a1fef7640 | [
"MIT"
] | 5 | 2021-03-18T22:18:49.000Z | 2022-03-11T23:40:56.000Z | kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | a0c7bad675bd3fff97f99c3e2b49f19a1fef7640 | [
"MIT"
] | 1 | 2019-10-16T19:29:12.000Z | 2019-10-16T19:29:12.000Z | from bs4 import BeautifulSoup
import requests
import re | 38.545455 | 173 | 0.660377 |
1bf9ff44f1b06f0e0c18c710168ee340dcb2a97f | 869 | py | Python | cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 9c546b7930a54a9b44efffdf87401726981e1b2a | [
"MIT"
] | null | null | null | cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 9c546b7930a54a9b44efffdf87401726981e1b2a | [
"MIT"
] | 1 | 2019-07-30T08:49:20.000Z | 2019-07-30T08:49:20.000Z | cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 9c546b7930a54a9b44efffdf87401726981e1b2a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from cfmacro.processors import SgProcessor
from cfmacro.core.engine import ProcessorEngine
from cfmacro.core.template import TemplateProcessor
def lambda_handler(event, context):
"""
Implement a core handler for security groups ingress / egress
:param event:
:param context:
:return:
"""
print(f'event received: {event}')
processor_engine = ProcessorEngine()
processor_engine.register_processor(SgProcessor)
template_processor = TemplateProcessor(processor_engine)
result = template_processor.process(fragment=event['fragment'],
template_params=event['templateParameterValues']).to_dict()
print(f'event processed. Result: \n{result}')
return {
"requestId": event['requestId'],
"status": "success",
"fragment": result
}
| 28.966667 | 99 | 0.674338 |
1bfcaa846cbe80234230889e864b2dd049be6c62 | 8,038 | py | Python | tf2qa/predict_long.py | mikelkl/TF2-QA | 3bca786d26565335df45538714532d6d3c070a2b | [
"MIT"
] | 17 | 2020-01-29T10:31:07.000Z | 2022-01-10T03:36:00.000Z | tf2qa/predict_long.py | mikelkl/TF2-QA | 3bca786d26565335df45538714532d6d3c070a2b | [
"MIT"
] | null | null | null | tf2qa/predict_long.py | mikelkl/TF2-QA | 3bca786d26565335df45538714532d6d3c070a2b | [
"MIT"
] | 4 | 2021-01-27T15:42:45.000Z | 2021-12-12T20:41:51.000Z | import torch
import argparse
from roberta_modeling import RobertaJointForLong
from transformers.modeling_roberta import RobertaConfig, RobertaModel
from torch.utils.data import TensorDataset, SequentialSampler, DataLoader
import utils
from tqdm import tqdm
import os
import json
import collections
import pickle
import pandas as pd
from utils_nq import read_candidates_from_one_split, compute_long_pred
from roberta_long_preprocess import InputLongFeatures
RawResult = collections.namedtuple("RawResult",
["unique_id",
"long_start_logits",
"long_end_logits"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default="0,1,2,3,4,5,6,7", type=str)
parser.add_argument("--eval_batch_size", default=128, type=int)
parser.add_argument("--n_best_size", default=20, type=int)
parser.add_argument("--max_answer_length", default=30, type=int)
parser.add_argument("--float16", default=True, type=bool)
parser.add_argument("--bert_config_file", default='roberta_large/config.json', type=str)
parser.add_argument("--init_restore_dir", default='check_points/roberta-large-long-V00/best_checkpoint.pth', type=str)
parser.add_argument("--predict_file", default='data/simplified-nq-test.jsonl', type=str)
parser.add_argument("--output_dir", default='check_points/roberta-large-long-V00',
type=str)
parser.add_argument("--predict_feat", default='dataset/test_data_maxlen512_roberta_tfidf_features.bin',
type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
bert_config = RobertaConfig.from_json_file(args.bert_config_file)
model = RobertaJointForLong(RobertaModel(bert_config), bert_config)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
dataset, features = load_cached_data(feature_dir=args.predict_feat, output_features=True, evaluate=True)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
print("***** Running evaluation *****")
print(" Num examples =", len(dataset))
print(" Batch size =", args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
input_ids, input_mask, segment_ids, example_indices = batch
inputs = {'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids}
start_logits, end_logits = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = str(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
long_start_logits=start_logits[i].cpu().numpy(),
long_end_logits=end_logits[i].cpu().numpy())
all_results.append(result)
pickle.dump(all_results, open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'wb'))
# all_results = pickle.load(open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'rb'))
print("Going to candidates file")
candidates_dict = read_candidates_from_one_split(args.predict_file)
print("Compute_pred_dict")
nq_pred_dict = compute_long_pred(candidates_dict, features, all_results, args.n_best_size)
output_prediction_file = os.path.join(args.output_dir, 'test_predictions.json')
print("Saving predictions to", output_prediction_file)
with open(output_prediction_file, 'w') as f:
json.dump({'predictions': list(nq_pred_dict.values())}, f)
# make_submission(output_prediction_file, args.output_dir)
| 43.923497 | 122 | 0.686365 |
1bfcf985c108d567ad3614fe9d2baeec4a87e0f1 | 9,385 | py | Python | city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | c14513a9e54405412085f1047f91ec58b263eac0 | [
"CC0-1.0"
] | 2 | 2020-11-23T22:08:58.000Z | 2022-03-02T13:13:20.000Z | city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | c14513a9e54405412085f1047f91ec58b263eac0 | [
"CC0-1.0"
] | 170 | 2019-12-31T13:37:04.000Z | 2022-03-12T14:03:35.000Z | city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | c14513a9e54405412085f1047f91ec58b263eac0 | [
"CC0-1.0"
] | 3 | 2020-05-08T05:58:02.000Z | 2022-03-15T16:07:25.000Z | """
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| 31.599327 | 90 | 0.716356 |
1bfd7e8367e5e96a626394bb27f0b9266054e693 | 1,184 | py | Python | test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
] | 115 | 2021-08-29T04:28:40.000Z | 2022-03-29T22:57:48.000Z | test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
] | 2 | 2022-01-14T01:52:07.000Z | 2022-03-04T11:40:10.000Z | test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
] | 18 | 2021-09-23T06:41:10.000Z | 2022-03-22T04:37:05.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, -
# linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "1919.34"},
{"text": ""},
{"text": ""},
{"text": ", "},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print(":")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
| 32 | 104 | 0.663007 |
1bfe4bc4102702ea9b7f00d3aaa75d9c6a870a4d | 5,645 | py | Python | tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 3b0bd00db6ae780c523524b71774f6d3da44435f | [
"MIT"
] | 14 | 2017-09-17T12:41:39.000Z | 2020-12-15T07:42:55.000Z | tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 3b0bd00db6ae780c523524b71774f6d3da44435f | [
"MIT"
] | 10 | 2017-09-12T20:06:47.000Z | 2021-03-22T17:16:10.000Z | tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 3b0bd00db6ae780c523524b71774f6d3da44435f | [
"MIT"
] | 1 | 2017-09-18T04:55:34.000Z | 2017-09-18T04:55:34.000Z | import unittest
from stringsheet.parser import create_spreadsheet_values
from stringsheet.parser import create_language_sheet_values
from stringsheet.parser import parse_resources
if __name__ == '__main__':
unittest.main()
| 46.270492 | 80 | 0.478654 |
1bff3ce09a664d524ed5b17fd85a06acad12da24 | 195 | py | Python | libs/imgutils.py | EpicKiwi/projet-datascience | 90b59fc674fc2146634d1c1681f9b65083a7aa91 | [
"MIT"
] | null | null | null | libs/imgutils.py | EpicKiwi/projet-datascience | 90b59fc674fc2146634d1c1681f9b65083a7aa91 | [
"MIT"
] | null | null | null | libs/imgutils.py | EpicKiwi/projet-datascience | 90b59fc674fc2146634d1c1681f9b65083a7aa91 | [
"MIT"
] | 2 | 2020-01-14T07:53:50.000Z | 2020-01-14T12:24:35.000Z | import cv2 | 27.857143 | 64 | 0.769231 |
1bff51099f471eb1158044ba33a024f093e0aed7 | 3,079 | py | Python | bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 4561ecfe3a38cc4c25df263d971a87e8a83f88ce | [
"MIT"
] | 18 | 2022-03-16T03:11:04.000Z | 2022-03-30T16:01:42.000Z | bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 4561ecfe3a38cc4c25df263d971a87e8a83f88ce | [
"MIT"
] | null | null | null | bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 4561ecfe3a38cc4c25df263d971a87e8a83f88ce | [
"MIT"
] | null | null | null | import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
for i in range(i0, i1+1):
run_mcmc(igals[i])
| 33.107527 | 92 | 0.528743 |
400075fe46c49c54066ef8f12574919b2debe75a | 2,709 | py | Python | studio/gs_provider.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
] | null | null | null | studio/gs_provider.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
] | null | null | null | studio/gs_provider.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
] | null | null | null | import json
import time
import re
from .keyvalue_provider import KeyValueProvider
from .gcloud_artifact_store import GCloudArtifactStore
from .util import timeit
| 31.137931 | 79 | 0.51495 |
40007ef606785b22cbc7c72b9274d6584b3f3fb5 | 46,830 | py | Python | gslib/tests/test_ls.py | MikeJeffrey/gsutil | 12f4258540ee83aee255ec1baf50e7e6faee10e2 | [
"Apache-2.0"
] | null | null | null | gslib/tests/test_ls.py | MikeJeffrey/gsutil | 12f4258540ee83aee255ec1baf50e7e6faee10e2 | [
"Apache-2.0"
] | null | null | null | gslib/tests/test_ls.py | MikeJeffrey/gsutil | 12f4258540ee83aee255ec1baf50e7e6faee10e2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ls command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from datetime import datetime
import os
import posixpath
import re
import stat
import subprocess
import sys
import time
import gslib
from gslib.commands import ls
from gslib.cs_api_map import ApiSelector
from gslib.project_id import PopulateProjectId
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.util import CaptureStdout
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_MD5
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY2_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY3_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import TEST_ENCRYPTION_KEY4_SHA256_B64
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import UTF8
from gslib.utils.ls_helper import PrintFullInfoAboutObject
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
KMS_XML_SKIP_MSG = ('gsutil does not support KMS operations for S3 buckets, '
'or listing KMS keys with the XML API.')
BUCKET_LOCK_SKIP_MSG = ('gsutil does not support bucket lock operations for '
'S3 buckets or listing retention policy with XML API.')
def test_one_object_with_L(self):
"""Tests listing one object with -L."""
obj_uri = self.CreateObject(contents=b'foo')
# Ensure that creation and update don't take place in the same second.
time.sleep(2)
# Check that the creation time, rather than the updated time, is displayed.
self.RunGsUtil(['setmeta', '-h', 'x-goog-meta-foo:bar', suri(obj_uri)])
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Update time:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
time_created = time_created_match.group('time_created_val')
self.assertIsNotNone(time_created)
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
if self.test_api == ApiSelector.XML:
# XML API has no concept of updated time.
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
time_updated = time_updated_match.group('time_updated_val')
self.assertIsNotNone(time_updated)
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [
bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list
][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
_Check1()
_Check2()
_Check3()
def test_labels(self):
"""Tests listing on a bucket with a label/tagging configuration."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# No labels are present by default.
self.assertRegex(stdout, r'Labels:\s+None')
# Add a label and check that it shows up.
self.RunGsUtil(['label', 'ch', '-l', 'labelkey:labelvalue', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
label_regex = re.compile(r'Labels:\s+\{\s+"labelkey":\s+"labelvalue"\s+\}',
re.MULTILINE)
self.assertRegex(stdout, label_regex)
# TODO(b/135700569): Stop skipping this once this field is available to all
# projects.
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents=b'x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
_Check5()
def test_list_acl(self):
"""Tests that long listing includes an ACL."""
key_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertIn('ACL:', stdout)
self.assertNotIn('ACCESS DENIED', stdout)
def test_list_gzip_content_length(self):
"""Tests listing a gzipped object."""
file_size = 10000
file_contents = b'x' * file_size
fpath = self.CreateTempFile(contents=file_contents, file_name='foo.txt')
key_uri = self.CreateObject()
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath), suri(key_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
_Check1()
def test_output_chopped(self):
"""Tests that gsutil still succeeds with a truncated stdout."""
bucket_uri = self.CreateBucket(test_objects=2)
# Run Python with the -u flag so output is not buffered.
gsutil_cmd = [
sys.executable, '-u', gslib.GSUTIL_PATH, 'ls',
suri(bucket_uri)
]
# Set bufsize to 0 to make sure output is not buffered.
p = subprocess.Popen(gsutil_cmd, stdout=subprocess.PIPE, bufsize=0)
# Immediately close the stdout pipe so that gsutil gets a broken pipe error.
p.stdout.close()
p.wait()
# Make sure it still exited cleanly.
self.assertEqual(p.returncode, 0)
def test_recursive_list_trailing_slash(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/',
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/foo/', stdout)
def test_wildcard_prefix(self):
"""Tests that an object name with a wildcard does not infinite loop."""
bucket_uri = self.CreateBucket()
wildcard_folder_object = 'wildcard*/'
object_matching_folder = 'wildcard10/foo'
self.CreateObject(bucket_uri=bucket_uri,
object_name=wildcard_folder_object,
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name=object_matching_folder,
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 2)
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'wildcard*')],
return_stderr=True,
expected_status=1)
self.assertIn(
'Cloud folder %s%s contains a wildcard' %
(suri(bucket_uri), '/wildcard*/'), stderr)
# Listing with a flat wildcard should still succeed.
# Use @Retry as hedge against bucket listing eventual consistency.
_Check()
def test_non_ascii_project_fails(self):
stderr = self.RunGsUtil(['ls', '-p', '', 'gs://fobarbaz'],
expected_status=1,
return_stderr=True)
self.assertIn('Invalid non-ASCII', stderr)
def set_default_kms_key_on_bucket(self, bucket_uri):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(
keyring_fqn, testcase.KmsTestingResources.CONSTANT_KEY_NAME)
# Make sure that the service account for the desired bucket's parent project
# is authorized to encrypt with the key above.
self.RunGsUtil(['kms', 'encryption', '-k', key_fqn, suri(bucket_uri)])
return key_fqn
| 44.137606 | 103 | 0.688661 |
4001312cef0d9f28268935ec40cf1f39b54d853e | 131 | py | Python | onadata/libs/utils/audit.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | onadata/libs/utils/audit.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | onadata/libs/utils/audit.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
HOME_ACCESSED = "home-accessed"
| 32.75 | 82 | 0.824427 |
4001b461738a1a675ced54e42a87a9e7681bbab2 | 2,217 | py | Python | places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 7112977d6615124412b7e7ffc4abdcaa969b4078 | [
"MIT"
] | null | null | null | places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 7112977d6615124412b7e7ffc4abdcaa969b4078 | [
"MIT"
] | null | null | null | places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 7112977d6615124412b7e7ffc4abdcaa969b4078 | [
"MIT"
] | null | null | null | import logging
from urllib.parse import unquote, urlparse
from pathlib import PurePosixPath
import requests
from requests.exceptions import ReadTimeout, ConnectionError, HTTPError
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from places.models import Place, Image
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
| 39.589286 | 90 | 0.59991 |
4002a9f7b6d3888657a9b000e3fb8c2cb6fac5dd | 18,227 | py | Python | gslib/utils/ls_helper.py | dickmao/gsutil | 3b61bf0e6188f65f78c72c79ea3cb69e9c61da4b | [
"Apache-2.0"
] | 1 | 2021-09-11T23:58:39.000Z | 2021-09-11T23:58:39.000Z | gslib/utils/ls_helper.py | shinfan/gsutil | 45b5fc020bed44c6342fe70ce8b081aa222d9213 | [
"Apache-2.0"
] | null | null | null | gslib/utils/ls_helper.py | shinfan/gsutil | 45b5fc020bed44c6342fe70ce8b081aa222d9213 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import sys
import six
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import GenerationFromUrlAndString
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
from gslib.utils.constants import UTF8
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AclTranslation
from gslib.utils import text_util
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = [
'md5Hash',
'crc32c',
]
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl',
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'customTime',
'kmsKeyName',
'customerEncryption',
'etag',
'eventBasedHold',
'generation',
'metadata',
'metageneration',
'retentionExpirationTime',
'size',
'storageClass',
'temporaryHold',
'timeCreated',
'timeDeleted',
'timeStorageClassUpdated',
'updated',
]
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
text_util.print_to_fd(bucket_listing_ref.url_string)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
text_util.print_to_fd('{}:'.format(bucket_listing_ref.url_string))
def PrintNewLine():
"""Default function for printing new lines between directories."""
text_util.print_to_fd()
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
text_util.print_to_fd('{}:'.format(url_str))
if obj.timeCreated:
text_util.print_to_fd(
MakeMetadataLine('Creation time',
obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.updated:
text_util.print_to_fd(
MakeMetadataLine('Update time',
obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
text_util.print_to_fd(
MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.storageClass:
text_util.print_to_fd(MakeMetadataLine('Storage class', obj.storageClass))
if obj.temporaryHold:
text_util.print_to_fd(MakeMetadataLine('Temporary Hold', 'Enabled'))
if obj.eventBasedHold:
text_util.print_to_fd(MakeMetadataLine('Event-Based Hold', 'Enabled'))
if obj.retentionExpirationTime:
text_util.print_to_fd(
MakeMetadataLine(
'Retention Expiration',
obj.retentionExpirationTime.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.kmsKeyName:
text_util.print_to_fd(MakeMetadataLine('KMS key', obj.kmsKeyName))
if obj.cacheControl:
text_util.print_to_fd(MakeMetadataLine('Cache-Control', obj.cacheControl))
if obj.contentDisposition:
text_util.print_to_fd(
MakeMetadataLine('Content-Disposition', obj.contentDisposition))
if obj.contentEncoding:
text_util.print_to_fd(
MakeMetadataLine('Content-Encoding', obj.contentEncoding))
if obj.contentLanguage:
text_util.print_to_fd(
MakeMetadataLine('Content-Language', obj.contentLanguage))
text_util.print_to_fd(MakeMetadataLine('Content-Length', obj.size))
text_util.print_to_fd(MakeMetadataLine('Content-Type', obj.contentType))
if obj.componentCount:
text_util.print_to_fd(
MakeMetadataLine('Component-Count', obj.componentCount))
if obj.customTime:
text_util.print_to_fd(MakeMetadataLine('Custom-Time', obj.customTime))
if obj.timeDeleted:
text_util.print_to_fd(
MakeMetadataLine('Noncurrent time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT')))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
text_util.print_to_fd(MakeMetadataLine('Metadata', ''))
for ap in non_marker_props:
ap_key = '{}'.format(ap.key)
ap_value = '{}'.format(ap.value)
meta_data_line = MakeMetadataLine(ap_key, ap_value, indent=2)
text_util.print_to_fd(meta_data_line)
if obj.customerEncryption:
if not obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', 'encrypted'))
if not obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', 'encrypted'))
text_util.print_to_fd(
MakeMetadataLine('Encryption algorithm',
obj.customerEncryption.encryptionAlgorithm))
text_util.print_to_fd(
MakeMetadataLine('Encryption key SHA256',
obj.customerEncryption.keySha256))
if obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', obj.crc32c))
if obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', obj.md5Hash))
text_util.print_to_fd(MakeMetadataLine('ETag', obj.etag.strip('"\'')))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
text_util.print_to_fd(MakeMetadataLine('Generation', generation_str))
if obj.metageneration:
text_util.print_to_fd(MakeMetadataLine('Metageneration',
obj.metageneration))
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
text_util.print_to_fd(
MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl)))
elif S3_ACL_MARKER_GUID in marker_props:
text_util.print_to_fd(
MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID]))
else:
# Empty ACLs are possible with Bucket Policy Only and no longer imply
# ACCESS DENIED anymore.
text_util.print_to_fd(MakeMetadataLine('ACL', '[]'))
return (num_objs, num_bytes)
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
text_util.print_to_fd(bucket_listing_ref.url_string)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
| 36.971602 | 80 | 0.681242 |
4003c8b3ef448fd698e5fde8ffd4368a0004acc2 | 2,250 | py | Python | app/config/cnMysql.py | itay-moav/rahl_commander | 79b9bb7d16f4f9511820d0e0ffcbba6ee8e0e42b | [
"MIT"
] | 1 | 2016-12-19T16:09:02.000Z | 2016-12-19T16:09:02.000Z | app/config/cnMysql.py | itay-moav/rahl_commander | 79b9bb7d16f4f9511820d0e0ffcbba6ee8e0e42b | [
"MIT"
] | 19 | 2015-01-08T18:34:13.000Z | 2018-02-26T14:51:22.000Z | app/config/cnMysql.py | itay-moav/rahl_commander | 79b9bb7d16f4f9511820d0e0ffcbba6ee8e0e42b | [
"MIT"
] | null | null | null | '''
Created on Dec 28, 2021
@author: Itay
Abstracting the DB connection piece
'''
import mysql.connector as My
from app import logging as L
from app import exceptions as exceptions | 34.615385 | 158 | 0.636444 |
4004bec8c10906a7cd716dc8ff33d14546f3a2fe | 1,527 | py | Python | src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | d031a8fe5faa2ef35f2c1dbb8241281ffda22429 | [
"MIT"
] | null | null | null | src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | d031a8fe5faa2ef35f2c1dbb8241281ffda22429 | [
"MIT"
] | 8 | 2020-02-19T20:03:44.000Z | 2022-02-03T19:27:24.000Z | src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | d031a8fe5faa2ef35f2c1dbb8241281ffda22429 | [
"MIT"
] | 3 | 2020-02-19T19:02:19.000Z | 2021-12-14T14:06:25.000Z | import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
pre_process()
for image in list_image:
print(image.shape)
| 31.163265 | 115 | 0.59201 |
4004f14ddc4bfb878b0872bfe2604774deea7bcf | 4,934 | py | Python | tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
if __name__ == "__main__":
tf.test.main()
| 37.097744 | 80 | 0.694366 |
4007ccb371063c993bd22bb2370d18838e357a3f | 3,218 | py | Python | extractor/util.py | bcskda/vk-archive-deepercopy | 3619b94eb3e0f5f67860022cdfb2074e457c0cd2 | [
"Unlicense"
] | 1 | 2020-04-24T09:24:31.000Z | 2020-04-24T09:24:31.000Z | extractor/util.py | bcskda/vk-archive-deepercopy | 3619b94eb3e0f5f67860022cdfb2074e457c0cd2 | [
"Unlicense"
] | null | null | null | extractor/util.py | bcskda/vk-archive-deepercopy | 3619b94eb3e0f5f67860022cdfb2074e457c0cd2 | [
"Unlicense"
] | null | null | null | import functools
import glob
import itertools
import logging
import os
from progressbar import progressbar
import re
import requests
from typing import List
def alphanumeric_glob(pattern: str):
"""Glob and sort alpahnumerically. Limitations: exactly one `*', no `?', file names with single extention."""
matches = glob.glob(pattern)
asterisk_pos = pattern.find('*')
matches.sort(key=lambda name: int(name[asterisk_pos:name.rfind('.')]))
return matches
| 38.771084 | 134 | 0.657551 |
400afc4da001a8c030925a65e03f44b9ed050772 | 1,637 | py | Python | setup.py | gillins/pyshepseg | bfa8d157d610bf4f581a2500d0afb42d4f92d59b | [
"MIT"
] | 5 | 2021-02-03T05:02:56.000Z | 2022-01-31T07:55:20.000Z | setup.py | gillins/pyshepseg | bfa8d157d610bf4f581a2500d0afb42d4f92d59b | [
"MIT"
] | 14 | 2021-02-03T04:18:48.000Z | 2022-01-24T03:50:22.000Z | setup.py | gillins/pyshepseg | bfa8d157d610bf4f581a2500d0afb42d4f92d59b | [
"MIT"
] | 13 | 2021-02-03T03:41:17.000Z | 2022-01-24T04:21:23.000Z | #Copyright 2021 Neil Flood and Sam Gillingham. All rights reserved.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without restriction,
#including without limitation the rights to use, copy, modify,
#merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
#ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
#CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.distutils.core import setup
import pyshepseg
setup(name='pyshepseg',
version=pyshepseg.SHEPSEG_VERSION,
description='Python implementation of the image segmentation algorithm described by Shepherd et al',
author='Neil Flood and Sam Gillingham',
scripts=['bin/test_pyshepseg.py', 'bin/test_pyshepseg_tiling.py',
'bin/test_pyshepseg_subset.py'],
packages=['pyshepseg'],
license='LICENSE.txt',
url='https://github.com/ubarsc/pyshepseg'
)
| 46.771429 | 106 | 0.756261 |
400c696eb52726be2cb58df8b7625711faea5a60 | 3,846 | py | Python | src/utils.py | daochenzha/SimTSC | 6e3200510e8e464049eab95db9540afdaf397f9c | [
"MIT"
] | 23 | 2022-01-06T05:15:35.000Z | 2022-03-28T08:08:14.000Z | src/utils.py | daochenzha/SimTSC | 6e3200510e8e464049eab95db9540afdaf397f9c | [
"MIT"
] | 2 | 2022-02-10T02:22:35.000Z | 2022-03-28T16:45:17.000Z | src/utils.py | daochenzha/SimTSC | 6e3200510e8e464049eab95db9540afdaf397f9c | [
"MIT"
] | 5 | 2022-01-09T08:58:24.000Z | 2022-01-19T09:52:43.000Z | import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
| 29.584615 | 103 | 0.621945 |
400d71727dfe67b72a8bc6849bc10bc05b88d55b | 17,458 | py | Python | mpinterfaces/mat2d/friction/analysis.py | yw-fang/MPInterfaces | ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e | [
"MIT"
] | 56 | 2015-06-23T03:03:18.000Z | 2022-02-06T16:41:34.000Z | mpinterfaces/mat2d/friction/analysis.py | yw-fang/MPInterfaces | ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e | [
"MIT"
] | 21 | 2015-09-03T17:50:18.000Z | 2022-03-01T02:26:34.000Z | mpinterfaces/mat2d/friction/analysis.py | joshgabriel/MPInterfaces | 2799ae161fa94c78842092fb24ef468607afa465 | [
"MIT"
] | 50 | 2015-09-17T19:09:36.000Z | 2021-11-15T19:13:20.000Z | from __future__ import print_function, division, unicode_literals
import os
import warnings
import numpy as np
from scipy import interpolate
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.core.structure import Structure
from pymatgen import Element
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator as VE
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "ashtonmv@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_corrugation_factor(structure):
"""
Calculate the "corrugation factor" for a 2D material.
The corrugation factor is defined as the sum of the
outer hemispheres of ionic radii of the atoms on the
material's top and bottom surfaces, divided by the
planar area of the whole unit cell's 001 plane. Top
and bottom corrugation factors are returned
separately in the final dictionary. In general,
a larger corrugation factor means a smoother surface.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
corrugation_factors (dict): Dictionary of "top"
and "bottom" corrugation factors, e.g.
{"top": top_corrugation_factor,
"bottom": bottom_corrugation_factor}
"""
sites = structure.sites
valences = VE(structure).valences
formatted_valences = {}
for e in valences:
temp=e[-1]
if "+" in e or "-" in e:
try:
# Some element names have a number followed
# by a plus or minus, e.g. "O2-"
int(e[-2])
element = e[:-2]
except:
# Others are simply a plus or minus, e.g. "Cl-"
element = e[:-1]
else:
element = e
formatted_valences[Element(element)] = valences[e]
all_z_coords = [s.coords[2] for s in sites]
max_z = max(all_z_coords)
min_z = min(all_z_coords)
top_layer = [s for s in sites if abs(s.coords[2] - max_z) < 0.1]
bottom_layer = [s for s in sites if abs(s.coords[2] - min_z) < 0.1]
pi = np.pi
top_sphere_area = 0
bottom_sphere_area = 0
for site in top_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
top_sphere_area += 2*pi*r*r
for site in bottom_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
bottom_sphere_area += 2*pi*r*r
lattice = structure.lattice
area = abs(np.cross(lattice._matrix[0], lattice._matrix[1])[2])
corrugation = {"top": top_sphere_area / area,
"bottom": bottom_sphere_area / area}
return corrugation
def plot_gamma_surface(fmt='pdf'):
"""
Collect the energies from a grid of static energy
calculations to plot the Gamma surface between two layers of the 2D
material.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
lattice = Structure.from_file('POSCAR').lattice
area = np.cross(lattice._matrix[0], lattice._matrix[1])[2]
ax = plt.figure(figsize=(n_divs_x * 1.2, n_divs_y * 1.2)).gca()
ax.set_xlim(0, n_divs_x + 1)
ax.set_ylim(0, n_divs_y + 1)
energies = []
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
not_converged = []
for x in x_values:
energies.append([])
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy / area
energies[x].append(energy)
except:
not_converged.append('{}x{}'.format(x, y))
energies[x].append(0)
os.chdir('../')
energies[x].append(energies[x][0])
energies.append([])
# ENERGY_ARRAY[n_divs_x] = ENERGY_ARRAY[0]
if not_converged:
warnings.warn('{} did not converge.'.format(not_converged))
for coords in not_converged:
energies[int(coords.split('x')[0])][int(coords.split('x')[1])] = energy
minima = []
maxima = []
for x in x_values:
minima.append(min(energies[x]))
maxima.append(max(energies[x]))
abs_minimum = min(minima)
abs_maximum = max(maxima)
for x in range(n_divs_x + 1):
for y in range(n_divs_y + 1):
# Plot all energies relative to the global minimum.
scaled_energy = energies[x][y] - abs_minimum
if '{}x{}'.format(x, y) in not_converged:
color_code = 'w'
else:
color_code = plt.cm.jet(
scaled_energy/(abs_maximum - abs_minimum))
ax.add_patch(plt.Rectangle((x, y), width=1, height=1,
facecolor=color_code, linewidth=0))
# Get rid of annoying ticks.
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_xaxis().set_ticks([])
os.chdir('../../')
plt.savefig('gamma_surface.{}'.format(fmt), transparent=True)
plt.close()
def get_number_of_surface_atoms():
"""
Count the number of atoms at a 2D material's surface. This
enables energy and force calculations to be normalized to
the number of surface atoms.
Returns:
int. Number of surface atoms (top + bottom) for both
layers in the bilayer model.
"""
structure = Structure.from_file('friction/lateral/POSCAR')
heights = np.array([site.z for site in structure.sites])
max_height = max(heights)
min_height = min(heights)
n_atoms_top = len([height for height in heights if max_height - height < 0.1])
n_atoms_bottom = len([height for height in heights if height - min_height < 0.1])
return (n_atoms_top + n_atoms_bottom) * 2
def get_basin_and_peak_locations():
"""
Find which directories inside 'friction/lateral' represent
the minimum (basin) and maximum (peak) energy stacking
configurations.
Returns:
tuple. Of the form (basin, peak).
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
abs_maximum = -np.Infinity
abs_minimum = np.Infinity
for x in x_values:
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy
if energy < abs_minimum:
basin = dir
abs_minimum = energy
if energy > abs_maximum:
peak = dir
abs_maximum = energy
except:
pass
os.chdir('../')
os.chdir('../../')
return(basin, peak)
def plot_friction_force(fmt='pdf'):
"""
Plot the sinusoidal curve of delta E between basin and saddle
points for each normal spacing dz.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
f, (ax1, ax2) = plt.subplots(2, figsize=(16, 16))
spacings = sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)])
spc_range = spacings[-1] - spacings[0] + 0.1
for spacing in spacings:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2 +
(start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
ax1.plot(x, sinx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax1.set_xticklabels(ax1.get_xticks(), family='serif', fontsize=18)
ax1.set_yticklabels(ax1.get_yticks(), family='serif', fontsize=18)
ax1.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax1.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', family='serif', fontsize=24)
ax2.plot(x, cosx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax2.set_xticklabels(ax2.get_xticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax2.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_f\/(eV/\AA)}$', family='serif', fontsize=24)
os.chdir('../')
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
os.chdir('../../')
plt.savefig('F_f.{}'.format(fmt))
def plot_normal_force(basin_dir, fmt='pdf'):
"""
Plot the LJ-like curve of the energy at the basin point
as a function of normal spacing dz.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
fig = plt.figure(figsize=(16, 10))
ax = fig.gca()
ax2 = ax.twinx()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
ax.set_xlim(spacings[0], spacings[-1])
ax.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0))
ax2.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0.9))
E_z = ax.plot(xnew, ynew, color=plt.cm.jet(0),
linewidth=4, label=r'$\mathrm{E(z)}$')
F_N = ax2.plot(spacings, [-y for y in ynew_slope], color=plt.cm.jet(0.9),
linewidth=4, label=r'$\mathrm{F_N}$')
ax.set_ylim(ax.get_ylim())
ax.set_xticklabels(ax.get_xticks(), family='serif', fontsize=18)
ax.set_yticklabels(ax.get_yticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax.set_xlabel(r'$\mathrm{z\/(\AA)}$', fontsize=24)
ax.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_N\/(eV/\AA)}$', fontsize=24)
data = E_z + F_N
labs = [l.get_label() for l in data]
ax.legend(data, labs, loc='upper right', fontsize=24)
ax.plot(spacings, E, linewidth=0, marker='o', color=plt.cm.jet(0),
markersize=10, markeredgecolor='none')
os.chdir('../../')
plt.savefig('F_N.{}'.format(fmt))
def plot_mu_vs_F_N(basin_dir, fmt='pdf'):
"""
Plot friction coefficient 'mu' vs. F_Normal.
mu = F_friction / F_Normal.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
fig = plt.figure(figsize=(16, 10))
# ax = fig.gca()
# ax2 = ax.twinx()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd()) if
os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
# xnew = np.arange(spacings[0], spacings[-1], 0.001)
# ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
sorted_dirs = sorted([float(spc) for spc in os.listdir(os.getcwd())
if os.path.isdir(spc)])
for spacing in sorted_dirs:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
ax = plt.figure().gca()
ax.plot(F_N, mu, linewidth=2, marker='o', markeredgecolor='none',
markersize=3, color=plt.cm.jet(0))
plt.savefig('mu_vs_F_N.{}'.format(fmt))
def get_mu_vs_F_N(basin_dir):
"""
Essentially the same function as plotting, but without the plot.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
Returns:
dic: Of the form {'F_N': F_N, 'mu': mu, 'F_f': F_f}, where
forces are in nN.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
# Convert eV.A to nN
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
for spacing in sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)]):
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
try:
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
-
Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
except:
print('One or more jobs in {}/ have not converged.'.format(spacing))
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
return {'F_N': F_N, 'mu': mu, 'F_f': F_f}
| 32.815789 | 94 | 0.592966 |
400f0a8fc2e264478738eb502734b3f76efaa361 | 1,380 | py | Python | aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 0f93a06e751b97959835187a05311deaffaed9d8 | [
"Apache-2.0"
] | 4 | 2019-05-09T12:39:14.000Z | 2022-01-05T20:36:06.000Z | aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 0f93a06e751b97959835187a05311deaffaed9d8 | [
"Apache-2.0"
] | null | null | null | aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 0f93a06e751b97959835187a05311deaffaed9d8 | [
"Apache-2.0"
] | 1 | 2022-01-05T19:56:49.000Z | 2022-01-05T19:56:49.000Z | from aiopylimit import AIOPyRateLimit
from aiopylimit import AIOPyRateLimitException
import asynctest
import asyncio
| 39.428571 | 72 | 0.642754 |
4010464a9caf650b2a6706b3ea8adb7b2458ae14 | 5,772 | py | Python | bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 18 | 2019-07-19T22:12:15.000Z | 2020-08-26T17:45:19.000Z | bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 44 | 2019-07-15T10:17:00.000Z | 2020-07-26T11:22:53.000Z | bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 9 | 2019-09-03T13:13:31.000Z | 2020-08-25T13:55:27.000Z | # coding: utf-8
import sys
import shutil
import requests
import wx
from pathlib import Path
from urllib.parse import urljoin, urlsplit
from tempfile import TemporaryFile
from zipfile import ZipFile
from bookworm import typehints as t
from bookworm import app
from bookworm.http_tools import RemoteJsonResource, HttpResource
from bookworm.ocr_engines.tesseract_ocr_engine import (
TesseractOcrEngine,
get_tesseract_path,
)
from bookworm.logger import logger
log = logger.getChild(__name__)
BRANCH = "develop"
TESSERACT_VERSION_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/version"
if app.arch == "x86":
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x86.zip"
else:
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x64.zip"
FAST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_fast/main/{lang_code}.traineddata"
BEST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_best/main/{lang_code}.traineddata"
| 26 | 139 | 0.573458 |
4010dc640b95065e204f3d03308d81598d5d3d22 | 2,448 | py | Python | python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_proj.py
---------
Date : November 2017
Copyright : (C) 2017 by Mdric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Mdric Ribreux'
__date__ = 'November 2017'
__copyright__ = '(C) 2017, Mdric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.core import QgsProcessingParameterString
| 36 | 75 | 0.561683 |
401141d52ec8be8928fc937b5ae582051fa62e45 | 1,919 | py | Python | examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 22f888119059a86bfc87ba9e7d9ac2cc90dadfb6 | [
"Apache-2.0"
] | null | null | null | examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 22f888119059a86bfc87ba9e7d9ac2cc90dadfb6 | [
"Apache-2.0"
] | null | null | null | examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 22f888119059a86bfc87ba9e7d9ac2cc90dadfb6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from devsim.python_packages.simple_physics import *
import diode_common
device="diode2d"
region="Bulk"
diode_common.Create2DGmshMesh(device, region)
# this is is the devsim format
write_devices (file="gmsh_diode2d_out.msh")
diode_common.SetParameters(device=device, region=region)
####
#### NetDoping
####
node_model(device=device, region=region, name="Acceptors", equation="1.0e18*step(0.5e-5-y);")
node_model(device=device, region=region, name="Donors" , equation="1.0e18*step(y-0.5e-5);")
node_model(device=device, region=region, name="NetDoping", equation="Donors-Acceptors;")
diode_common.InitialSolution(device, region)
####
#### Initial DC solution
####
solve(type="dc", absolute_error=1.0, relative_error=1e-12, maximum_iterations=30)
###
### Drift diffusion simulation at equilibrium
###
diode_common.DriftDiffusionInitialSolution(device, region)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=50)
v = 0.0
while v < 0.51:
set_parameter(device=device, name=GetContactBiasName("top"), value=v)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
PrintCurrents(device, "top")
PrintCurrents(device, "bot")
v += 0.1
write_devices(file="gmsh_diode2d.dat", type="tecplot")
write_devices(file="gmsh_diode2d_dd.msh", type="devsim")
| 30.951613 | 93 | 0.755602 |
40114e46f1a2c773c276da8bbeeb5529999aac68 | 470 | py | Python | python/astro_imaging/config.py | taranu/astro_imaging | a5a712576bd12762dc69f826703e077a859d8ec0 | [
"Apache-2.0"
] | null | null | null | python/astro_imaging/config.py | taranu/astro_imaging | a5a712576bd12762dc69f826703e077a859d8ec0 | [
"Apache-2.0"
] | null | null | null | python/astro_imaging/config.py | taranu/astro_imaging | a5a712576bd12762dc69f826703e077a859d8ec0 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
import os
path_base_default = os.getenv('ASTRO_IMAGING_DATA_PATH', default='./')
paths_default = Paths()
| 22.380952 | 70 | 0.66383 |
4011b94aee384459cb359f2d52855f8d32eb9b50 | 8,018 | py | Python | AT.py | MTandHJ/roboc | 43e5b2f9ea520b76221a7334d34ef4aaf9b3334b | [
"MIT"
] | 8 | 2021-06-07T11:02:38.000Z | 2022-03-17T11:30:28.000Z | AT.py | MTandHJ/roboc | 43e5b2f9ea520b76221a7334d34ef4aaf9b3334b | [
"MIT"
] | null | null | null | AT.py | MTandHJ/roboc | 43e5b2f9ea520b76221a7334d34ef4aaf9b3334b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from typing import Tuple
import argparse
from src.loadopts import *
METHOD = "RobOC-AT"
SAVE_FREQ = 5
PRINT_FREQ = 20
FMT = "{description}={scale}-{leverage}" \
"={learning_policy}-{optimizer}-{lr}" \
"={attack}-{epsilon:.4f}-{stepsize}-{steps}" \
"={batch_size}={transform}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
# for orthogonal classifier
parser.add_argument("--scale", type=float, default=10.,
help="the length of weights")
parser.add_argument("--leverage", type=float, default=0.15,
help="the hyper-parameter governs the relative weight between clean and adversarial samples")
# adversarial training settings
parser.add_argument("--attack", type=str, default="pgd-squared")
parser.add_argument("--epsilon", type=float, default=8/255)
parser.add_argument("--stepsize", type=float, default=0.25,
help="pgd:rel_stepsize, cwl2:step_size, deepfool:overshoot, bb:lr")
parser.add_argument("--steps", type=int, default=10)
# basic settings
parser.add_argument("--loss", type=str, default="square")
parser.add_argument("--optimizer", type=str, choices=("sgd", "adam"), default="sgd")
parser.add_argument("-mom", "--momentum", type=float, default=0.9,
help="the momentum used for SGD")
parser.add_argument("-beta1", "--beta1", type=float, default=0.9,
help="the first beta argument for Adam")
parser.add_argument("-beta2", "--beta2", type=float, default=0.999,
help="the second beta argument for Adam")
parser.add_argument("-wd", "--weight_decay", type=float, default=5e-4,
help="weight decay")
parser.add_argument("-lr", "--lr", "--LR", "--learning_rate", type=float, default=0.1)
parser.add_argument("-lp", "--learning_policy", type=str, default="default",
help="learning rate schedule defined in config.py")
parser.add_argument("--epochs", type=int, default=180)
parser.add_argument("-b", "--batch_size", type=int, default=128)
parser.add_argument("--transform", type=str, default='default',
help="the data augmentation which will be applied during training.")
parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--progress", action="store_true", default=False,
help="show the progress if true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("-m", "--description", type=str, default="RobOC-AT")
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
from src.utils import mkdirs, readme
cfg = load_cfg()
mkdirs(cfg.info_path, cfg.log_path)
readme(cfg.info_path, opts)
readme(cfg.log_path, opts, mode="a")
writter = SummaryWriter(log_dir=cfg.log_path, filename_suffix=METHOD)
main(**cfg)
cfg['coach'].save(cfg.info_path)
writter.close()
| 33.974576 | 109 | 0.669494 |
4012033dc557a9acee5693b0291d1d05afe295c0 | 680 | py | Python | notesapp/api_v1/models.py | kampkelly/drf_template | 44cda3fd4ebf0dc073a46205b392d5e783d9ceea | [
"MIT"
] | null | null | null | notesapp/api_v1/models.py | kampkelly/drf_template | 44cda3fd4ebf0dc073a46205b392d5e783d9ceea | [
"MIT"
] | null | null | null | notesapp/api_v1/models.py | kampkelly/drf_template | 44cda3fd4ebf0dc073a46205b392d5e783d9ceea | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 23.448276 | 80 | 0.733824 |
401276c3187f1d2baed3d5f8ab8ae0afba6d8f18 | 8,534 | py | Python | src/main_TS_tsconv_jma.py | inoue0406/radarJMA | f8996c3fe201f97d414fc96c4abfc6f930738d47 | [
"MIT"
] | 6 | 2018-12-20T00:32:17.000Z | 2021-05-24T08:29:08.000Z | src/main_TS_tsconv_jma.py | inoue0406/radarJMA | f8996c3fe201f97d414fc96c4abfc6f930738d47 | [
"MIT"
] | null | null | null | src/main_TS_tsconv_jma.py | inoue0406/radarJMA | f8996c3fe201f97d414fc96c4abfc6f930738d47 | [
"MIT"
] | 4 | 2018-09-20T07:08:03.000Z | 2020-06-07T21:43:31.000Z | # seq2seq LSTM (no-convolutional model) for time series prediction
import numpy as np
import torch
import torchvision
import torch.utils.data as data
import torchvision.transforms as transforms
import pandas as pd
import h5py
import os
import sys
import json
import time
import pdb
from jma_timeseries_dataset import *
from scaler import *
from train_valid_epoch_tsconv import *
from utils import Logger
from opts_ts import parse_opts
if __name__ == '__main__':
# parse command-line options
opt = parse_opts()
print(opt)
# create result dir
if not os.path.exists(opt.result_path):
os.mkdir(opt.result_path)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
# generic log file
logfile = open(os.path.join(opt.result_path, 'log_run.txt'),'w')
logfile.write('Start time:'+time.ctime()+'\n')
tstart = time.time()
# model information
modelinfo = open(os.path.join(opt.result_path, 'model_info.txt'),'w')
# prepare scaler for data
if opt.data_scaling == 'linear':
scl = LinearScaler()
if opt.data_scaling == 'root':
scl = RootScaler()
if not opt.no_train:
# loading datasets
train_dataset = JMATSConvDataset(csv_data=opt.train_data_path,
csv_anno=opt.train_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
valid_dataset = JMATSConvDataset(csv_data=opt.valid_data_path,
csv_anno=opt.valid_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
#tstdata = next(iter(train_dataset))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=False)
if opt.model_name == 'seq2seq':
# lstm seq2seq model
CONV_HID_DIM = 32
INPUT_DIM = 1 + CONV_HID_DIM
OUTPUT_DIM = 1
HID_DIM = 512
N_LAYERS = 3
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
from models.seq2seq_convlstm_ts import *
enc = Encoder(INPUT_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2SeqConv(enc, dec, CONV_HID_DIM, device='cuda').cuda()
if opt.transfer_path != 'None':
# Use pretrained weights for transfer learning
print('loading pretrained model:',opt.transfer_path)
model = torch.load(opt.transfer_path)
modelinfo.write('Model Structure \n')
modelinfo.write(str(model))
count_parameters(model,modelinfo)
# modelinfo.close()
if opt.loss_function == 'MSE':
loss_fn = torch.nn.MSELoss()
# Type of optimizers adam/rmsprop
if opt.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
elif opt.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.learning_rate)
# learning rate scheduler
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=opt.lr_decay)
# Prep logger
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'loss', 'lr'])
valid_logger = Logger(
os.path.join(opt.result_path, 'valid.log'),
['epoch', 'loss'])
# training
for epoch in range(1,opt.n_epochs+1):
if epoch < 10:
# freeze conv_encoder for first 10 epochs
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = False
else:
# unfreeze conv_encoder for the rest
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = True
count_parameters(model,modelinfo)
#import pdb;pdb.set_trace()
# step scheduler
scheduler.step()
# training & validation
train_epoch(epoch,opt.n_epochs,train_loader,model,loss_fn,optimizer,
train_logger,train_batch_logger,opt,scl)
valid_epoch(epoch,opt.n_epochs,valid_loader,model,loss_fn,
valid_logger,opt,scl)
if epoch % opt.checkpoint == 0:
# save the trained model for every checkpoint
# (1) as binary
torch.save(model,os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.model' % epoch))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.dict' % epoch))
# save the trained model
# (1) as binary
torch.save(model,os.path.join(opt.result_path, 'trained_seq2seq.model'))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path, 'trained_seq2seq.dict'))
# test datasets if specified
if opt.test:
if opt.no_train:
#load pretrained model from results directory
model_fname = os.path.join(opt.result_path, opt.test_model_fname)
print('loading pretrained model:',model_fname)
model = torch.load(model_fname)
loss_fn = torch.nn.MSELoss()
# prepare loader
test_dataset = JMATSConvDataset(csv_data=opt.test_data_path,
csv_anno=opt.test_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
transform=None)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=opt.batch_size,
batch_size=3, # small batch size used
num_workers=7,
drop_last=True,
shuffle=False)
# testing for the trained model
test_epoch(test_loader,model,loss_fn,opt,scl)
# output elapsed time
logfile.write('End time: '+time.ctime()+'\n')
tend = time.time()
tdiff = float(tend-tstart)/3600.0
logfile.write('Elapsed time[hours]: %f \n' % tdiff)
| 40.832536 | 99 | 0.522498 |
40131096d61db66fe053946df5d75b8d65c51a7a | 556 | py | Python | bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | e223fc8abceb2bf26f9a7752d72afe598ac4e1fd | [
"MIT"
] | 134 | 2015-01-04T04:54:19.000Z | 2021-10-16T07:39:02.000Z | bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | e223fc8abceb2bf26f9a7752d72afe598ac4e1fd | [
"MIT"
] | 10 | 2016-07-30T21:45:11.000Z | 2021-03-03T14:12:50.000Z | bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | e223fc8abceb2bf26f9a7752d72afe598ac4e1fd | [
"MIT"
] | 34 | 2015-01-14T16:39:27.000Z | 2021-10-31T11:29:50.000Z | import numpy as np
| 30.888889 | 85 | 0.591727 |
4013520787f6cc9bbf08df7635faa9848889aff8 | 13,500 | py | Python | onemsdk/parser/tag.py | mvnm/onemsdk | d6293c632d15af3b044f130343899d3b242e287a | [
"MIT"
] | null | null | null | onemsdk/parser/tag.py | mvnm/onemsdk | d6293c632d15af3b044f130343899d3b242e287a | [
"MIT"
] | 6 | 2019-07-05T07:54:03.000Z | 2019-09-30T10:47:10.000Z | onemsdk/parser/tag.py | mvnm/onemsdk | d6293c632d15af3b044f130343899d3b242e287a | [
"MIT"
] | 2 | 2019-08-30T07:36:48.000Z | 2020-01-13T01:40:06.000Z | import inspect
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Union, Type, Optional, Dict, Any
from pydantic import BaseModel
from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException
from .node import Node
__all__ = ['Tag', 'HeaderTag', 'FooterTag', 'BrTag', 'UlTag', 'LiTag', 'FormTag',
'SectionTag', 'InputTagAttrs', 'InputTag', 'FormTagAttrs', 'PTag', 'ATag',
'ATagAttrs', 'get_tag_cls', 'SectionTagAttrs', 'LiTagAttrs', 'InputTagType']
HeaderTag.update_forward_refs()
FooterTag.update_forward_refs()
InputTag.update_forward_refs()
LabelTag.update_forward_refs()
ATag.update_forward_refs()
LiTag.update_forward_refs()
UlTag.update_forward_refs()
PTag.update_forward_refs()
BrTag.update_forward_refs()
SectionTag.update_forward_refs()
FormTag.update_forward_refs()
_map_tag_cls = {}
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Tag):
_map_tag_cls[obj.Config.tag_name] = obj
| 28.421053 | 91 | 0.605333 |
4014970fe4ab56a4d4e3af8b117a8432e328801e | 12,113 | py | Python | backend/syntax/rule.py | austinmarsray/Ccompiler | b3ef61283e33d06294c491b71586a945c38c6e54 | [
"MIT"
] | null | null | null | backend/syntax/rule.py | austinmarsray/Ccompiler | b3ef61283e33d06294c491b71586a945c38c6e54 | [
"MIT"
] | null | null | null | backend/syntax/rule.py | austinmarsray/Ccompiler | b3ef61283e33d06294c491b71586a945c38c6e54 | [
"MIT"
] | null | null | null |
"""
1. program -> define-list
2. define-list -> define define-list
| empty
3. define -> type ID define-type
4. define-type -> var-define-follow
| fun-define-follow
5. var-define-follow -> ;
| [ NUM ] ;
6. type -> int
| void
7. fun-define-follow -> ( params ) code-block
8. params -> param-list
| empty
9. param-list -> param param-follow
10. param-follow -> , param param-follow
| empty
11. param -> type ID array-subscript
12. array-subscript -> [ ]
| empty
13. code-block -> { local-define-list code-list }
14. local-define-list -> local-var-define local-define-list
| empty
15. local-var-define -> type ID var-define-follow
16. code-list -> code code-list
| empty
17. code -> normal-statement
| selection-statement
| iteration-statement
| return-statement
18. normal-statement -> ;
| ID normal-statement-follow
19. normal-statement-follow -> var-follow = expression ;
| call-follow ;
20. call-follow -> ( call-params )
21. call-params -> call-param-list
| empty
22. call-param-list -> expression call-param-follow
23. call-param-follow -> , expression call-param-follow
| empty
24. selection-statement -> if ( expression ) { code-list } selection-follow
25. selection-follow -> else { code-list }
| empty
26. iteration-statement -> while ( expression ) iteration-follow
27. iteration-follow -> { code-list }
| code
28. return-statement -> return return-follow
29. return-follow -> ;
| expression ;
30. var-follow -> [ expression ]
| empty
31. expression -> additive-expr expression-follow
32. expression-follow -> rel-op additive-expr
| empty
33. rel-op -> <=
| <
| >
| >=
| ==
| !=
34. additive-expr -> term additive-expr-follow
35. additive-expr-follow -> add-op term additive-expr-follow
| empty
36. add-op -> +
| -
37. term -> factor term-follow
38. term-follow -> mul-op factor term-follow
| empty
39. mul-op -> *
| /
40. factor -> ( expression )
| ID id-factor-follow | NUM
41. id-factor-follow -> var-follow
| ( args )
42. args -> arg-list
| empty
43. arg-list -> expression arg-list-follow
44. arg-list-follow -> , expression arg-list-follow
| empty
"""
#
terminal_sign_type = [
'else',
'if',
'int',
'return',
'void',
'while',
'addition',
'subtraction',
'multiplication',
'division',
'bigger',
'bigger-equal',
'smaller',
'smaller-equal',
'equal',
'not-equal',
'evaluate',
'semicolon',
'comma',
'left-parentheses',
'right-parentheses',
'left-bracket',
'right-bracket',
'left-brace',
'right-brace',
'id',
'num',
# 'pound'
'pound'
]
#
non_terminal_sign_type = [
'program',
'define-list',
'define',
'define-type',
'var-define-follow',
'type',
'fun-define-follow',
'params',
'param-list',
'param-follow',
'param',
'array-subscript',
'code-block',
'local-define-list',
'local-var-define',
'code-list',
'code',
'normal-statement',
'normal-statement-follow',
'call-follow',
'call-params',
'call-param-list',
'call-param-follow',
'selection-statement',
'selection-follow',
'iteration-statement',
'iteration-follow',
'return-statement',
'return-follow',
# 'eval-statement',
# 'var',
'var-follow',
'expression',
'expression-follow',
'rel-op',
'additive-expr',
'additive-expr-follow',
'add-op',
'term',
'term-follow',
'mul-op',
'factor',
'id-factor-follow',
'args',
'arg-list',
'arg-list-follow'
]
#
productions = [
# 0
Production('program', ['define-list']),
# 1
Production('define-list', ['define', 'define-list']),
Production('define-list', []),
# 2
Production('define', ['type', 'id', 'define-type']),
# 3
Production('define-type', ['var-define-follow']),
Production('define-type', ['fun-define-follow']),
# 4
Production('var-define-follow', ['semicolon']),
Production('var-define-follow', ['left-bracket', 'num', 'right-bracket', 'semicolon']),
# 5
Production('type', ['int']),
Production('type', ['void']),
# 6
Production('fun-define-follow', ['left-parentheses', 'params', 'right-parentheses', 'code-block']),
# 7
Production('params', ['param-list']),
Production('params', []),
# 8
Production('param-list', ['param', 'param-follow']),
# 9
Production('param-follow', ['comma', 'param', 'param-follow']),
Production('param-follow', []),
# 10
Production('param', ['type', 'id', 'array-subscript']),
# 11
Production('array-subscript', ['left-bracket', 'right-bracket']),
Production('array-subscript', []),
# 12
Production('code-block', ['left-brace', 'local-define-list', 'code-list', 'right-brace']),
# 13
Production('local-define-list', ['local-var-define', 'local-define-list']),
Production('local-define-list', []),
# 14
Production('local-var-define', ['type', 'id', 'var-define-follow']),
# 15
Production('code-list', ['code', 'code-list']),
Production('code-list', []),
# 16
Production('code', ['normal-statement']),
Production('code', ['selection-statement']),
Production('code', ['iteration-statement']),
Production('code', ['return-statement']),
# Production('normal-statement', ['eval-statement', 'semicolon']),
# Production('normal-statement', ['semicolon']),
# 17
Production('normal-statement', ['semicolon']),
Production('normal-statement', ['id', 'normal-statement-follow']),
# 18
Production('normal-statement-follow', ['var-follow', 'evaluate', 'expression', 'semicolon']),
Production('normal-statement-follow', ['call-follow', 'semicolon']),
# 19
Production('call-follow', ['left-parentheses', 'call-params', 'right-parentheses']),
# 20
Production('call-params', ['call-param-list']),
Production('call-params', []),
# 21
Production('call-param-list', ['expression', 'call-param-follow']),
# 22
Production('call-param-follow', ['comma', 'expression', 'call-param-follow']),
Production('call-param-follow', []),
# 23
Production('selection-statement',
['if', 'left-parentheses', 'expression', 'right-parentheses', 'left-brace',
'code-list', 'right-brace', 'selection-follow']),
# 24
Production('selection-follow', ['else', 'left-brace', 'code-list', 'right-brace']),
Production('selection-follow', []),
# 25
Production('iteration-statement', ['while', 'left-parentheses', 'expression',
'right-parentheses', 'iteration-follow']),
# 26
Production('iteration-follow', ['left-brace', 'code-list', 'right-brace']),
Production('iteration-follow', ['code']),
# 27
Production('return-statement', ['return', 'return-follow']),
# 28
Production('return-follow', ['semicolon']),
Production('return-follow', ['expression', 'semicolon']),
# Production('eval-statement', ['var', 'evaluate', 'expression']),
# Production('var', ['id', 'var-follow']),
# 29
Production('var-follow', ['left-bracket', 'expression', 'right-bracket']),
Production('var-follow', []),
# 30
Production('expression', ['additive-expr', 'expression-follow']),
# 31
Production('expression-follow', ['rel-op', 'additive-expr']),
Production('expression-follow', []),
# 32
Production('rel-op', ['smaller-equal']),
Production('rel-op', ['smaller']),
Production('rel-op', ['bigger']),
Production('rel-op', ['bigger-equal']),
Production('rel-op', ['equal']),
Production('rel-op', ['not-equal']),
# 33
Production('additive-expr', ['term', 'additive-expr-follow']),
# 34
Production('additive-expr-follow', ['add-op', 'term', 'additive-expr-follow']),
Production('additive-expr-follow', []),
# 35
Production('add-op', ['addition']),
Production('add-op', ['subtraction']),
# 36
Production('term', ['factor', 'term-follow']),
# 37
Production('term-follow', ['mul-op', 'factor', 'term-follow']),
Production('term-follow', []),
# 38
Production('mul-op', ['multiplication']),
Production('mul-op', ['division']),
# 39
Production('factor', ['left-parentheses', 'expression', 'right-parentheses']),
Production('factor', ['id', 'id-factor-follow']),
Production('factor', ['num']),
# 40
Production('id-factor-follow', ['var-follow']),
Production('id-factor-follow', ['left-parentheses', 'args', 'right-parentheses']),
# 41
Production('args', ['arg-list']),
Production('args', []),
# 42
Production('arg-list', ['expression', 'arg-list-follow']),
Production('arg-list-follow', ['comma', 'expression', 'arg-list-follow']),
Production('arg-list-follow', [])
]
#
grammar_start = Sign('program') | 29.834975 | 103 | 0.536366 |
4015db6712f5e331d7a0bca4b41018047675a6cf | 24,566 | py | Python | redash/models.py | slachiewicz/redash | 84d95272f31885be00fbeef0cdbf6ddae6037f5d | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-06-27T07:40:51.000Z | 2019-06-27T07:40:51.000Z | redash/models.py | slachiewicz/redash | 84d95272f31885be00fbeef0cdbf6ddae6037f5d | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2021-03-20T05:38:23.000Z | 2021-03-20T05:38:23.000Z | redash/models.py | slachiewicz/redash | 84d95272f31885be00fbeef0cdbf6ddae6037f5d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import json
import hashlib
import logging
import os
import threading
import time
import datetime
import itertools
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
from flask.ext.login import UserMixin, AnonymousUserMixin
import psycopg2
from redash import utils, settings, redis_connection
from redash.query_runner import get_query_runner
db = Database()
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return self.password_hash and pwd_context.verify(password, self.password_hash)
class ActivityLog(BaseModel):
QUERY_EXECUTION = 1
id = peewee.PrimaryKeyField()
user = peewee.ForeignKeyField(User)
type = peewee.IntegerField()
activity = peewee.TextField()
created_at = DateTimeTZField(default=datetime.datetime.now)
class DataSource(BaseModel):
id = peewee.PrimaryKeyField()
name = peewee.CharField(unique=True)
type = peewee.CharField()
options = peewee.TextField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="queries")
created_at = DateTimeTZField(default=datetime.datetime.now)
def should_schedule_next(previous_iteration, now, schedule):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = schedule.split(':')
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
return now > next_iteration
class Query(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
query = peewee.TextField()
query_hash = peewee.CharField(max_length=32)
api_key = peewee.CharField(max_length=40)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
is_archived = peewee.BooleanField(default=False, index=True)
schedule = peewee.CharField(max_length=10, null=True)
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
def __unicode__(self):
return unicode(self.id)
class Dashboard(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
slug = peewee.CharField(max_length=140, index=True)
name = peewee.CharField(max_length=100)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
layout = peewee.TextField()
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
tries = 1
while self.select().where(Dashboard.slug == self.slug).first() is not None:
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
tries += 1
super(Dashboard, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.id, self.name)
class Visualization(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
type = peewee.CharField(max_length=100)
query = peewee.ForeignKeyField(Query, related_name='visualizations')
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
options = peewee.TextField()
class Widget(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
text = peewee.TextField(null=True)
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
query_id = peewee.IntegerField(null=True)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
| 33.790922 | 121 | 0.620492 |
40163fa4a642e9716f853bee7c3624573ecfac17 | 10,112 | py | Python | xclib/classifier/ova.py | sushantsondhi/pyxclib | ecdfab6b72f9a02892eee617f45bef73c928ca81 | [
"MIT"
] | 4 | 2019-07-11T14:43:22.000Z | 2019-08-08T19:12:53.000Z | xclib/classifier/ova.py | kunaldahiya/xclib | b40e4dd49533ac78231a12f8af362e7f8c6f5df2 | [
"MIT"
] | null | null | null | xclib/classifier/ova.py | kunaldahiya/xclib | b40e4dd49533ac78231a12f8af362e7f8c6f5df2 | [
"MIT"
] | null | null | null | import numpy as np
import time
import logging
from .base import BaseClassifier
import scipy.sparse as sp
from ._svm import train_one
from functools import partial
from ..utils import sparse
from ..data import data_loader
from ._svm import train_one, _get_liblinear_solver_type
from joblib import Parallel, delayed
from ..utils.matrix import SMatrix
from tqdm import tqdm
| 37.313653 | 79 | 0.598497 |
40179a2e52133e978bed3c8e59ac4742ba5dae20 | 6,555 | py | Python | ipgroup_test.py | RyPeck/python-ipgroup | 8fb1037d886a52127e7231f051403396dcb4dc60 | [
"Apache-2.0"
] | 1 | 2015-01-10T18:34:51.000Z | 2015-01-10T18:34:51.000Z | ipgroup_test.py | RyPeck/python-ipgroup | 8fb1037d886a52127e7231f051403396dcb4dc60 | [
"Apache-2.0"
] | null | null | null | ipgroup_test.py | RyPeck/python-ipgroup | 8fb1037d886a52127e7231f051403396dcb4dc60 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import ipaddress
import random
import unittest
import ipgroup
if __name__ == "__main__":
unittest.main()
| 31.666667 | 79 | 0.438139 |
4017c147f527555c7fa69c7bf75c0f142e6a0a28 | 2,566 | py | Python | progress.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
] | 2 | 2022-03-07T06:41:35.000Z | 2022-03-11T04:26:40.000Z | progress.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
] | 1 | 2022-02-22T02:08:06.000Z | 2022-02-22T02:08:06.000Z | progress.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
] | 1 | 2022-02-21T19:47:10.000Z | 2022-02-21T19:47:10.000Z | from dataclasses import dataclass
from pickle import FALSE
from dol import Dol
from asm_section_list import AsmSection, AsmSectionType
def calc_generic_progress(dol: Dol, asm_list: list[AsmSection]):
# Sum up code/data in ASM
asm_code_size = 0
asm_data_size = 0
for section in asm_list:
if section.type == AsmSectionType.CODE:
asm_code_size += section.size
elif section.type == AsmSectionType.DATA:
asm_data_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_code_size = dol.code_size()
dol_data_size = dol.data_size()
# Decompiled sizes
decomp_code_size = dol_code_size - asm_code_size
decomp_data_size = dol_data_size - asm_data_size
# Percentages
code_percent = decomp_code_size / dol_code_size
data_percent = decomp_data_size / dol_data_size
print(f"Code sections: {decomp_code_size} / {dol_code_size} bytes in src ({code_percent:%})")
print(f"Data sections: {decomp_data_size} / {dol_data_size} bytes in src ({data_percent:%})")
def calc_slice_progress(slices: SliceGroup, asm_list: list[AsmSection]):
asm_slice_size = 0
for section in asm_list:
if slices.contains_section(section):
if section.type == AsmSectionType.CODE:
asm_slice_size += section.size
elif section.type == AsmSectionType.DATA:
asm_slice_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_slice_size = slices.total_size()
# Decompiled sizes
decomp_slice_size = dol_slice_size - asm_slice_size
# Percentages
slice_percent = decomp_slice_size / dol_slice_size
print(f"\t{slices.name}: {decomp_slice_size} / {dol_slice_size} bytes in src ({slice_percent:%})") | 33.763158 | 102 | 0.66212 |
4018589aba6937e4ecc7ee0d948bf2a417774d03 | 13,993 | py | Python | main_qm9.py | maxxxzdn/en_flows | 04ed4dd45431cafcd23f8bf5199a47f917a72058 | [
"MIT"
] | null | null | null | main_qm9.py | maxxxzdn/en_flows | 04ed4dd45431cafcd23f8bf5199a47f917a72058 | [
"MIT"
] | null | null | null | main_qm9.py | maxxxzdn/en_flows | 04ed4dd45431cafcd23f8bf5199a47f917a72058 | [
"MIT"
] | null | null | null | import utils
import argparse
import wandb
from os.path import join
from qm9 import dataset
from qm9 import losses
from qm9.models import get_optim, get_model
from flows.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked
import torch
import time
import pickle
import numpy as np
import qm9.visualizer as vis
from qm9.analyze import analyze_stability_for_molecules
from qm9.utils import prepare_context
from qm9.sampling import sample_chain, sample
from qm9 import mol_dim
parser = argparse.ArgumentParser(description='SE3')
parser.add_argument('--exp_name', type=str, default='debug_10')
parser.add_argument('--model', type=str, default='egnn_dynamics',
help='our_dynamics | schnet | simple_dynamics | '
'kernel_dynamics | egnn_dynamics |gnn_dynamics')
parser.add_argument('--n_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--brute_force', type=eval, default=False,
help='True | False')
parser.add_argument('--actnorm', type=eval, default=True,
help='True | False')
parser.add_argument('--break_train_epoch', type=eval, default=False,
help='True | False')
parser.add_argument('--dp', type=eval, default=True,
help='True | False')
parser.add_argument('--condition_time', type=eval, default=True,
help='True | False')
parser.add_argument('--clip_grad', type=eval, default=True,
help='True | False')
parser.add_argument('--trace', type=str, default='hutch',
help='hutch | exact')
parser.add_argument('--n_layers', type=int, default=6,
help='number of layers')
parser.add_argument('--nf', type=int, default=64,
help='number of layers')
parser.add_argument('--ode_regularization', type=float, default=1e-3)
parser.add_argument('--dataset', type=str, default='qm9',
help='qm9 | qm9_positional')
parser.add_argument('--dequantization', type=str, default='argmax_variational',
help='uniform | variational | argmax_variational')
parser.add_argument('--tanh', type=eval, default=True,
help='use tanh in the coord_mlp')
parser.add_argument('--attention', type=eval, default=True,
help='use attention in the EGNN')
parser.add_argument('--n_report_steps', type=int, default=1)
parser.add_argument('--wandb_usr', type=str, default='')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--save_model', type=eval, default=True,
help='save model')
parser.add_argument('--generate_epochs', type=int, default=1,
help='save model')
parser.add_argument('--num_workers', type=int, default=0, help='Number of worker for the dataloader')
parser.add_argument('--test_epochs', type=int, default=1)
parser.add_argument('--physics', type=int, default=0, help='Minimize energy loss or not')
parser.add_argument('--data_augmentation', type=eval, default=False,
help='use attention in the EGNN')
parser.add_argument('--x_aggregation', type=str, default='sum',
help='sum | mean')
parser.add_argument("--conditioning", nargs='+', default=[],
help='multiple arguments can be passed, '
'including: homo | onehot | lumo | num_atoms | etc. '
'usage: "--conditioning H_thermo homo onehot H_thermo"')
parser.add_argument('--resume', type=str, default=None,
help='')
parser.add_argument('--start_epoch', type=int, default=0,
help='')
args, unparsed_args = parser.parse_known_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
if args.resume is not None:
exp_name = args.exp_name + '_resume'
start_epoch = args.start_epoch
resume = args.resume
wandb_usr = args.wandb_usr
with open(join(args.resume, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
args.resume = resume
args.break_train_epoch = False
args.exp_name = exp_name
args.start_epoch = start_epoch
args.wandb_usr = wandb_usr
print(args)
utils.create_folders(args)
print(args)
# Log all args to wandb
wandb.init(entity='aipp', project='eegVAE', name=args.exp_name, config=args)
wandb.save('*.txt')
# Retrieve QM9 dataloaders
dataloaders, charge_scale = dataset.retrieve_dataloaders(args.batch_size, args.num_workers)
data_dummy = next(iter(dataloaders['train']))
if len(args.conditioning) > 0:
print(f'Conditioning on {args.conditioning}')
context_dummy = prepare_context(args.conditioning, data_dummy)
context_node_nf = context_dummy.size(2)
else:
context_node_nf = 0
args.context_node_nf = context_node_nf
# Create EGNN flow
prior, flow, dequantizer, nodes_dist = get_model(args, device)
flow = flow.to(device)
dequantizer = dequantizer.to(device)
optim = get_optim(args, flow, dequantizer)
print(flow)
gradnorm_queue = utils.Queue()
gradnorm_queue.add(3000) # Add large value that will be flushed.
if __name__ == "__main__":
main()
| 38.977716 | 116 | 0.622811 |
40186bf606d530f7f4ad08aa9b623b5881609f5c | 230 | py | Python | vise/tests/util/test_string.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 16 | 2020-07-14T13:14:05.000Z | 2022-03-04T13:39:30.000Z | vise/tests/util/test_string.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 10 | 2021-03-15T20:47:45.000Z | 2021-08-19T00:47:12.000Z | vise/tests/util/test_string.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 6 | 2020-03-03T00:42:39.000Z | 2022-02-22T02:34:47.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from vise.util.string import numbers_to_lowercases
| 32.857143 | 70 | 0.73913 |
401926cb60c477135712ef8b53eac69d6cf43064 | 421 | py | Python | code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 4400b219198c14ea0d7d9453cf6d367123b6ce8c | [
"MIT"
] | 679 | 2016-06-16T22:19:40.000Z | 2022-03-25T19:31:45.000Z | code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 4400b219198c14ea0d7d9453cf6d367123b6ce8c | [
"MIT"
] | 11 | 2017-04-17T15:25:42.000Z | 2019-11-30T15:58:28.000Z | code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 4400b219198c14ea0d7d9453cf6d367123b6ce8c | [
"MIT"
] | 199 | 2016-06-21T19:13:47.000Z | 2022-03-25T03:36:54.000Z |
accounts = find_accounts('python')
if accounts is None:
print("Error: DB not available")
else:
print("Accounts found: Would list them here...")
db_is_availble = True
| 10.268293 | 52 | 0.655582 |
401988f94a7b7ebda02b1f821bbce411385f8136 | 3,885 | py | Python | pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 5105c39a535ad401f7babe4eecb3861bed1f8326 | [
"BSD-3-Clause"
] | null | null | null | pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 5105c39a535ad401f7babe4eecb3861bed1f8326 | [
"BSD-3-Clause"
] | 3 | 2015-06-09T19:22:50.000Z | 2015-06-09T21:41:22.000Z | pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 5105c39a535ad401f7babe4eecb3861bed1f8326 | [
"BSD-3-Clause"
] | null | null | null | import os
import json
import shutil
import tempfile
import mock
import pytest
from opencivicdata.models import Person
from pupa.scrape import Person as ScrapePerson
from pupa.scrape import Organization as ScrapeOrganization
from pupa.importers.base import omnihash, BaseImporter
from pupa.importers import PersonImporter, OrganizationImporter
from pupa.exceptions import UnresolvedIdError, DataImportError
# doing these next few tests just on a Person because it is the same code that handles it
# but for completeness maybe it is better to do these on each type?
| 31.585366 | 94 | 0.686486 |
401b154f2a06b6253bd915fb79af056b04b243aa | 6,008 | py | Python | packaging/bdist_trinoadmin.py | wgzhao/trino-admin | cd2c71e4d0490cf836a7ddf0dbab69b967408ac8 | [
"Apache-2.0"
] | null | null | null | packaging/bdist_trinoadmin.py | wgzhao/trino-admin | cd2c71e4d0490cf836a7ddf0dbab69b967408ac8 | [
"Apache-2.0"
] | 2 | 2021-10-19T05:37:09.000Z | 2022-03-29T22:07:21.000Z | packaging/bdist_trinoadmin.py | wgzhao/trino-admin | cd2c71e4d0490cf836a7ddf0dbab69b967408ac8 | [
"Apache-2.0"
] | 1 | 2021-12-27T02:38:32.000Z | 2021-12-27T02:38:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from distutils import log as logger
from distutils.dir_util import remove_tree
import pip
try:
from setuptools import Command
except ImportError:
from distutils.core import Command
from packaging import package_dir
| 37.55 | 116 | 0.621172 |
401bf5f8c246403323fb3816b89f804ced1d9820 | 547 | py | Python | 2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | 2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | 2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Day 22: Sort Characters By Frequency
#
# Given a string, sort it in decreasing order based on the frequency of
# characters.
import collections
# Tests
assert Solution().frequencySort("tree") in ["eert", "eetr"]
assert Solution().frequencySort("cccaaa") in ["cccaaa", "aaaccc"]
assert Solution().frequencySort("Aabb") in ["bbAa", "bbaA"]
| 27.35 | 71 | 0.659963 |
401c85c8336927c2f23953dd8bb76eb17a0d8316 | 1,877 | py | Python | loc.py | relax-space/pandas-first | c8aceae09263a9566ef7dc7631e27f25d569aad4 | [
"Apache-2.0"
] | null | null | null | loc.py | relax-space/pandas-first | c8aceae09263a9566ef7dc7631e27f25d569aad4 | [
"Apache-2.0"
] | null | null | null | loc.py | relax-space/pandas-first | c8aceae09263a9566ef7dc7631e27f25d569aad4 | [
"Apache-2.0"
] | null | null | null | '''
: lociloc
1.
2. 1
3.
dataframe, indexcolumns,ilocloc
,iloc, loc
'''
import numpy as np
import pandas as pd
| 26.069444 | 79 | 0.559403 |
401e8c47a022914e9d9cdffe16372061e6ecc752 | 4,673 | py | Python | checkproject/runner.py | perror/checkproject | 9321470164e010778d32e24dc77c0b28eccd9429 | [
"BSD-3-Clause"
] | null | null | null | checkproject/runner.py | perror/checkproject | 9321470164e010778d32e24dc77c0b28eccd9429 | [
"BSD-3-Clause"
] | null | null | null | checkproject/runner.py | perror/checkproject | 9321470164e010778d32e24dc77c0b28eccd9429 | [
"BSD-3-Clause"
] | null | null | null | """Runner to discover, run and collect the results of all the checks."""
def import_module(module_path):
"""Import a Python file as a module in the current context.
@param module_path: Path to the Python file.
@return: A reference to the module once loaded.
"""
import os
import sys
module_filename = module_path.split(os.sep)[-1]
if int(sys.version[0]) >= 3:
if int(sys.version[2]) >= 5:
# Running a Python 3.5+ version
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location(module_filename, module_path)
module = module_from_spec(spec)
spec.loader.exec_module(module)
else:
# Running a Python <= 3.4 version
from importlib.machinery import SourceFileLoader
module = SourceFileLoader(module_filename, module_path).load_module()
else:
# Running a Python 2 version
import imp
module = imp.load_source(module_filename, module_path)
return module
| 32.227586 | 85 | 0.596833 |
401fd2803f10b2fab1010a7dfe0776cbe8cc8571 | 11,612 | py | Python | neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 59bad17387d15f86ea7d08f8675208160a999ffe | [
"Apache-2.0"
] | null | null | null | neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 59bad17387d15f86ea7d08f8675208160a999ffe | [
"Apache-2.0"
] | null | null | null | neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 59bad17387d15f86ea7d08f8675208160a999ffe | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from debtcollector import moves
from neutron.api.v2 import resource_helper
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api import extensions
from neutron_lib.exceptions import firewall_v2 as f_exc
from neutron_lib.services import base as service_base
from oslo_config import cfg
import six
from neutron_fwaas._i18n import _
from neutron_fwaas.common import fwaas_constants
FirewallGroupNotFound = moves.moved_class(
f_exc.FirewallGroupNotFound, 'FirewallGroupNotFound', __name__)
FirewallGroupInUse = moves.moved_class(
f_exc.FirewallGroupInUse, 'FirewallGroupInUse', __name__)
FirewallGroupInPendingState = moves.moved_class(
f_exc.FirewallGroupInPendingState, 'FirewallGroupInPendingState', __name__)
FirewallGroupPortInvalid = moves.moved_class(
f_exc.FirewallGroupPortInvalid, 'FirewallGroupPortInvalid', __name__)
FirewallGroupPortInvalidProject = moves.moved_class(
f_exc.FirewallGroupPortInvalidProject, 'FirewallGroupPortInvalidProject',
__name__)
FirewallGroupPortInUse = moves.moved_class(
f_exc.FirewallGroupPortInUse, 'FirewallGroupPortInUse', __name__)
FirewallPolicyNotFound = moves.moved_class(
f_exc.FirewallPolicyNotFound, 'FirewallPolicyNotFound', __name__)
FirewallPolicyInUse = moves.moved_class(
f_exc.FirewallPolicyInUse, 'FirewallPolicyInUse', __name__)
FirewallPolicyConflict = moves.moved_class(
f_exc.FirewallPolicyConflict, 'FirewallPolicyConflict', __name__)
FirewallRuleSharingConflict = moves.moved_class(
f_exc.FirewallRuleSharingConflict, 'FirewallRuleSharingConflict',
__name__)
FirewallPolicySharingConflict = moves.moved_class(
f_exc.FirewallPolicySharingConflict, 'FirewallPolicySharingConflict',
__name__)
FirewallRuleNotFound = moves.moved_class(
f_exc.FirewallRuleNotFound, 'FirewallRuleNotFound', __name__)
FirewallRuleInUse = moves.moved_class(
f_exc.FirewallRuleInUse, 'FirewallRuleInUse', __name__)
FirewallRuleNotAssociatedWithPolicy = moves.moved_class(
f_exc.FirewallRuleNotAssociatedWithPolicy,
'FirewallRuleNotAssociatedWithPolicy',
__name__)
FirewallRuleInvalidProtocol = moves.moved_class(
f_exc.FirewallRuleInvalidProtocol, 'FirewallRuleInvalidProtocol',
__name__)
FirewallRuleInvalidAction = moves.moved_class(
f_exc.FirewallRuleInvalidAction, 'FirewallRuleInvalidAction',
__name__)
FirewallRuleInvalidICMPParameter = moves.moved_class(
f_exc.FirewallRuleInvalidICMPParameter,
'FirewallRuleInvalidICMPParameter', __name__)
FirewallRuleWithPortWithoutProtocolInvalid = moves.moved_class(
f_exc.FirewallRuleWithPortWithoutProtocolInvalid,
'FirewallRuleWithPortWithoutProtocolInvalid', __name__)
FirewallRuleInvalidPortValue = moves.moved_class(
f_exc.FirewallRuleInvalidPortValue, 'FirewallRuleInvalidPortValue',
__name__)
FirewallRuleInfoMissing = moves.moved_class(
f_exc.FirewallRuleInfoMissing, 'FirewallRuleInfoMissing', __name__)
FirewallIpAddressConflict = moves.moved_class(
f_exc.FirewallIpAddressConflict, 'FirewallIpAddressConflict', __name__)
FirewallInternalDriverError = moves.moved_class(
f_exc.FirewallInternalDriverError, 'FirewallInternalDriverError', __name__)
FirewallRuleConflict = moves.moved_class(
f_exc.FirewallRuleConflict, 'FirewallRuleConflict', __name__)
FirewallRuleAlreadyAssociated = moves.moved_class(
f_exc.FirewallRuleAlreadyAssociated, 'FirewallRuleAlreadyAssociated',
__name__)
default_fwg_rules_opts = [
cfg.StrOpt('ingress_action',
default=api_const.FWAAS_DENY,
help=_('Firewall group rule action allow or '
'deny or reject for ingress. '
'Default is deny.')),
cfg.StrOpt('ingress_source_ipv4_address',
default=None,
help=_('IPv4 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_ipv6_address',
default=None,
help=_('IPv6 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for ingress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('ingress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('egress_action',
default=api_const.FWAAS_ALLOW,
help=_('Firewall group rule action allow or '
'deny or reject for egress. '
'Default is allow.')),
cfg.StrOpt('egress_source_ipv4_address',
default=None,
help=_('IPv4 source address for egress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('egress_source_ipv6_address',
default=None,
help=_('IPv6 source address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.StrOpt('egress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.BoolOpt('shared',
default=False,
help=_('Firewall group rule shared. '
'Default is False.')),
cfg.StrOpt('protocol',
default=None,
help=_('Network protocols (tcp, udp, ...). '
'Default is None.')),
cfg.BoolOpt('enabled',
default=True,
help=_('Firewall group rule enabled. '
'Default is True.')),
]
firewall_quota_opts = [
cfg.IntOpt('quota_firewall_group',
default=10,
help=_('Number of firewall groups allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=10,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=100,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(default_fwg_rules_opts, 'default_fwg_rules')
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
# TODO(Reedip): Remove the convert_to functionality after bug1706061 is fixed.
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'source_port']['convert_to'] = convert_to_string
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'destination_port']['convert_to'] = convert_to_string
| 38.323432 | 79 | 0.673527 |
40202bd57c8aba134557450b58ae36c3239d01dd | 4,345 | py | Python | model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | """
Various utility functions for using mmdetection in Determined that may be useful
even if not using the provided MMDetTrial.
build_fp16_loss_scaler is large derived from the original mmcv code at
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py
mmcv is covered by the Apache 2.0 License. Copyright (c) OpenMMLab. All rights reserved.
"""
import os
from typing import Any, Dict, Tuple
import mmcv
import torch
import model_hub.utils
def get_config_pretrained_url_mapping() -> Dict[str, str]:
"""
Walks the MMDETECTION_CONFIG_DIR and creates a mapping of configs
to urls for pretrained checkpoints. The url for pretrained checkpoints
are parsed from the README files in each of the mmdetection config folders.
MMDETECTION_CONFIG_DIR is set to /mmdetection/configs in the default
determinedai/model-hub-mmdetection docker image.
"""
models = {}
config_dir = os.getenv("MMDETECTION_CONFIG_DIR")
if config_dir:
for root, _, files in os.walk(config_dir):
for f in files:
if "README" in f:
with open(os.path.join(root, f), "r") as readme:
lines = readme.readlines()
for line in lines:
if "[config]" in line:
start = line.find("[config]")
end = line.find(".py", start)
start = line.rfind("/", start, end)
config_name = line[start + 1 : end + 3]
start = line.find("[model]")
end = line.find(".pth", start)
ckpt_name = line[start + 8 : end + 4]
models[config_name] = ckpt_name
return models
CONFIG_TO_PRETRAINED = get_config_pretrained_url_mapping()
def get_pretrained_ckpt_path(download_directory: str, config_file: str) -> Tuple[Any, Any]:
"""
If the config_file has an associated pretrained checkpoint,
return path to downloaded checkpoint and preloaded checkpoint
Arguments:
download_directory: path to download checkpoints to
config_file: mmdet config file path for which to find and load pretrained weights
Returns:
checkpoint path, loaded checkpoint
"""
config_file = config_file.split("/")[-1]
if config_file in CONFIG_TO_PRETRAINED:
ckpt_path = model_hub.utils.download_url(
download_directory, CONFIG_TO_PRETRAINED[config_file]
)
return ckpt_path, torch.load(ckpt_path) # type: ignore
return None, None
def build_fp16_loss_scaler(loss_scale: mmcv.Config) -> Any:
"""
This function is derived from mmcv, which is coverd by the Apache 2.0 License.
Copyright (c) OpenMMLab. All rights reserved.
Arguments:
loss_scale (float | str | dict): Scale factor configuration.
If loss_scale is a float, static loss scaling will be used with
the specified scale. If loss_scale is a string, it must be
'dynamic', then dynamic loss scaling will be used.
It can also be a dict containing arguments of GradScalar.
Defaults to 512. For PyTorch >= 1.6, mmcv uses official
implementation of GradScaler. If you use a dict version of
loss_scale to create GradScaler, please refer to:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
for the parameters.
Examples:
>>> loss_scale = dict(
... init_scale=65536.0,
... growth_factor=2.0,
... backoff_factor=0.5,
... growth_interval=2000
... )
"""
if loss_scale == "dynamic":
loss_scaler = torch.cuda.amp.GradScaler() # type: ignore
elif isinstance(loss_scale, float):
loss_scaler = torch.cuda.amp.GradScaler(init_scale=loss_scale) # type: ignore
elif isinstance(loss_scale, dict):
loss_scaler = torch.cuda.amp.GradScaler(**loss_scale) # type: ignore
else:
raise Exception(
"Cannot parse fp16 configuration. Expected cfg to be str(dynamic), float or dict."
)
return loss_scaler
| 40.990566 | 95 | 0.61519 |
40203044d0b70862532fc8cce70af574c829a8d8 | 2,465 | py | Python | gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 2 | 2017-03-01T20:09:06.000Z | 2019-02-08T17:10:16.000Z | gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 40 | 2015-10-10T15:02:21.000Z | 2020-03-17T22:32:04.000Z | gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 2 | 2018-11-14T21:50:58.000Z | 2022-03-07T20:59:27.000Z | """tests bigquery client"""
import unittest
from gcloud.datastores.bigquery import BigqueryClient
| 22.409091 | 78 | 0.539959 |
40219219083fe79c8f213a75f899041ef2518cf2 | 354 | py | Python | filter_hash.py | mbougarne/python-algos | f05c491903dfce95ee134852252c55c2cee1b07a | [
"MIT"
] | null | null | null | filter_hash.py | mbougarne/python-algos | f05c491903dfce95ee134852252c55c2cee1b07a | [
"MIT"
] | null | null | null | filter_hash.py | mbougarne/python-algos | f05c491903dfce95ee134852252c55c2cee1b07a | [
"MIT"
] | null | null | null | fruits = ["orange", "banana", "apple", "avocado", "kiwi", "apricot",
"cherry", "grape", "coconut", "lemon", "mango", "peach",
"pear", "strawberry", "pineapple", "apple", "orange", "pear",
"grape", "banana"
]
filters = dict()
for key in fruits:
filters[key] = 1
result = set(filters.keys())
print(result) | 27.230769 | 73 | 0.536723 |
4022d54aeba2badfe2c92ef3c771f491343dff82 | 1,919 | py | Python | teste/knn.py | joandesonandrade/nebulosa | 5bc157322ed0bdb81f6f00f6ed1ea7f7a5cadfe0 | [
"MIT"
] | null | null | null | teste/knn.py | joandesonandrade/nebulosa | 5bc157322ed0bdb81f6f00f6ed1ea7f7a5cadfe0 | [
"MIT"
] | null | null | null | teste/knn.py | joandesonandrade/nebulosa | 5bc157322ed0bdb81f6f00f6ed1ea7f7a5cadfe0 | [
"MIT"
] | null | null | null | from sklearn import preprocessing
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#Abrindo o dados como Dataframe
dados = pd.read_csv('dados/001.csv')
#Iniciando o mtodo para binanizar as classe sim=1; no=0
pre = preprocessing.LabelBinarizer()
#Binazirando a classe jogou, e atribundo a uma matriz n-dimencional
y_binary = pre.fit_transform(dados['jogou'])
y = np.array(y_binary).ravel()
lista_clima = [x for x in dados['clima']]
lista_temperatura = [x for x in dados['temperatura']]
lista_jogou = [x for x in dados['jogou']]
pre = preprocessing.LabelEncoder()
clima_encoding = pre.fit_transform(lista_clima)
temperatura_encoding = pre.fit_transform(lista_temperatura)
jogou_encoding = pre.fit_transform(lista_jogou)
lista = list(zip(clima_encoding, temperatura_encoding, jogou_encoding))
X = np.array(lista, dtype=np.int32)
#colunas = ['A', 'B', 'C']
# print(pd.DataFrame(X, columns=colunas, dtype=np.int32))
# print(pd.DataFrame(y, columns=['Classe'], dtype=np.int32))
#
# xX = []
# for i, x in enumerate(X):
# xX.append([list(x), y[i][0]])
#
# dX = [(x[0][0] + x[0][1] + x[0][2]) for x in xX]
# dY = [x[1] for x in xX]
#
# print('Soma dos rtulos:', dX)
# print('Classe:', dY)
#
# fig, ax = plt.subplots()
# ax.plot(dX)
# ax.plot(dY)
# plt.show()
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
#Dividido os dados, onde o treinamento ficar com 75% e teste 25%, eu sempre uso este padro :)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=0)
#Gerando o modelo, vou deixar os parmetros padro
knn = KNeighborsClassifier()
#Treinando o modelo
knn.fit(X=X_train, y=y_train)
#Avaliando a pontuao do modelo, usando os dados de teste
pontuacao = str(accuracy_score(y_test, knn.predict(X_test)) * 100)
print("Preciso: "+pontuacao+"%")
| 28.641791 | 105 | 0.727983 |
40255e51d495409353d842161452761a11a4b039 | 8,940 | py | Python | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 4e89973504980ff89d896fda09fc29a339b2d744 | [
"Apache-2.0"
] | null | null | null | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 4e89973504980ff89d896fda09fc29a339b2d744 | [
"Apache-2.0"
] | null | null | null | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 4e89973504980ff89d896fda09fc29a339b2d744 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Vertex AI Batch Prediction Job Remote Runner Client module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
| 45.380711 | 115 | 0.794519 |
4027f13cd3b7d7bc0f1afe366ba2f0949bed351e | 805 | py | Python | rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
] | 18 | 2022-03-18T08:20:28.000Z | 2022-03-31T15:19:15.000Z | rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
] | 4 | 2021-01-23T13:05:39.000Z | 2021-09-11T14:29:56.000Z | rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
] | 1 | 2022-03-20T05:52:38.000Z | 2022-03-20T05:52:38.000Z | from ctypes import c_uint32, c_void_p, string_at
from rotypes.idldsl import define_winrt_com_method, GUID
from rotypes.inspectable import IInspectable, IUnknown
define_winrt_com_method(IBufferByteAccess, 'Buffer', retval=c_void_p)
define_winrt_com_method(IBuffer, 'get_Capacity', propget=c_uint32)
define_winrt_com_method(IBuffer, 'get_Length', propget=c_uint32)
define_winrt_com_method(IBuffer, 'put_Length', propput=c_uint32)
| 27.758621 | 69 | 0.773913 |
40293f7dca9ef672564fb8730fe1d23ecd590f2b | 23,410 | py | Python | simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 72ec42987a33175103191fa9722e0e002f889954 | [
"MIT"
] | null | null | null | simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 72ec42987a33175103191fa9722e0e002f889954 | [
"MIT"
] | null | null | null | simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 72ec42987a33175103191fa9722e0e002f889954 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Playground documentation.
Module defining Playground Base Class
"""
import os
from abc import ABC
import yaml
import pymunk
from .utils import PositionAreaSampler
from .utils.definitions import SPACE_DAMPING, CollisionTypes, SceneElementTypes
# pylint: disable=unused-argument
# pylint: disable=line-too-long
def reset(self):
""" Reset the Playground to its initial state.
"""
# remove entities and filter out entities which are temporary
for entity in self.scene_elements.copy():
self.remove_scene_element(entity)
# reset and replace entities that are not temporary
for entity in self._disappeared_scene_elements.copy():
entity.reset()
self.add_scene_element(entity)
# reset fields
for entity in self.fields:
entity.reset()
# reset agents
for agent in self.agents.copy():
agent.reset()
self.remove_agent(agent)
self.add_agent(agent)
self.done = False
def add_agent(self, new_agent, tries=100):
""" Method to add an Agent to the Playground.
If the Agent has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Args:
new_agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the agent
"""
# If already there
if new_agent in self.scene_elements:
raise ValueError('Agent already in Playground')
# Inform agent of the playground size
new_agent.size_playground = self.size
if new_agent.allow_overlapping:
self._add_agent(new_agent)
else:
success = self._add_agent_without_ovelapping(new_agent, tries = tries)
if not success:
raise ValueError("Agent couldn't be placed without overlapping")
def _add_agent(self, agent):
""" Add an agent to the playground.
Args:
agent: Agent.
"""
self.agents.append(agent)
if agent.initial_position is not None:
pass
elif self.initial_agent_position is not None:
agent.initial_position = self.initial_agent_position
else:
raise ValueError("""Agent initial position should be defined in the playground or passed as an argument)
to the class agent""")
agent.position = agent.initial_position
for body_part in agent.parts:
self.space.add(*body_part.pm_elements)
def _add_agent_without_ovelapping(self, agent, tries=100):
""" Method to add am Agent to the Playground without overlapping.
Useful when an Agent has a random initial position, to avoid overlapping.
Args:
agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
trial = 0
visible_collide_parts = True
interactive_collide_parts = True
all_shapes = self.space.shapes.copy()
while (interactive_collide_parts or visible_collide_parts) and trial < tries:
self._add_agent(agent)
visible_collide_parts = False
interactive_collide_parts = False
for part in agent.parts:
visible_collide = False
interactive_collide = False
if part.pm_visible_shape is not None:
collisions = [part.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if part.pm_interaction_shape is not None:
collisions = [part.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
visible_collide_parts = visible_collide or visible_collide_parts
interactive_collide_parts = interactive_collide or interactive_collide_parts
if visible_collide_parts or interactive_collide_parts:
self.remove_agent(agent)
trial += 1
if interactive_collide_parts or visible_collide_parts:
return False
return True
def _add_scene_element(self, new_scene_element, new_position):
""" Method to add a SceneElement to the Playground.
"""
if new_scene_element in self.scene_elements:
raise ValueError('Scene element already in Playground')
new_scene_element.size_playground = self.size
if new_position:
new_scene_element.position = new_scene_element.initial_position
self.space.add(*new_scene_element.pm_elements)
self.scene_elements.append(new_scene_element)
if new_scene_element in self._disappeared_scene_elements:
self._disappeared_scene_elements.remove(new_scene_element)
def add_scene_element(self, scene_element, tries=100, new_position=True):
""" Method to add a SceneElement to the Playground.
If the Element has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Useful when a SceneElement has a random initial position, to avoid overlapping.
Args:
scene_element: Scene Element to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
if scene_element.entity_type is SceneElementTypes.FIELD:
# If already there
if scene_element in self.fields:
raise ValueError('Field already in Playground')
self.fields.append(scene_element)
else:
if scene_element in self.scene_elements:
raise ValueError('Field already in Playground')
# Else
scene_element.size_playground = self.size
if scene_element.allow_overlapping:
self._add_scene_element(scene_element, new_position)
else:
success = self._add_scene_element_without_ovelapping(scene_element, tries = tries, new_position=new_position)
if not success:
raise ValueError('Entity could not be placed without overlapping')
def get_scene_element_from_shape(self, pm_shape):
"""
Returns: Returns the Scene Element associated with the pymunk shape.
"""
entity = next(iter([e for e in self.scene_elements if pm_shape in e.pm_elements]), None)
return entity
def get_agent_from_shape(self, pm_shape):
"""
Returns: Returns the Agent associated with the pymunk shape.
"""
for agent in self.agents:
if agent.owns_shape(pm_shape):
return agent
return None
def get_entity_from_shape(self, pm_shape):
"""
Returns the element associated with the pymunk shape
Args:
pm_shape: Pymunk shaape
Returns:
Single entitiy or None
"""
scene_element = self.get_scene_element_from_shape(pm_shape)
if scene_element is not None: return scene_element
for agent in self.agents:
part = agent.get_bodypart_from_shape(pm_shape)
if part is not None: return part
return None
def add_interaction(self, collision_type_1, collision_type_2, interaction_function):
"""
Args:
collision_type_1: collision type of the first entity
collision_type_2: collision type of the second entity
interaction_function: function that handles the interaction
Returns: None
"""
handler = self.space.add_collision_handler(collision_type_1, collision_type_2)
handler.pre_solve = interaction_function
class PlaygroundRegister:
"""
Class to register Playgrounds.
"""
playgrounds = {}
| 31.893733 | 125 | 0.641948 |
402b9f4345d8a408ad36e88d31b1b6668765cd8b | 2,679 | py | Python | UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 970b24dd80fc6b5d599d1bd77de78a1b19f4432e | [
"MIT"
] | null | null | null | UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 970b24dd80fc6b5d599d1bd77de78a1b19f4432e | [
"MIT"
] | null | null | null | UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 970b24dd80fc6b5d599d1bd77de78a1b19f4432e | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import zlib
from UEManifestReader.enums import *
from UEManifestReader.classes.FCustomFields import FCustomFields
from UEManifestReader.classes.FManifestMeta import FManifestMeta
from UEManifestReader.classes.FChunkDataList import FChunkDataList
from UEManifestReader.classes.FManifestHeader import FManifestHeader
from UEManifestReader.classes.stream_reader import ConstBitStreamWrapper
from UEManifestReader.classes.FFileManifestList import FFileManifestList
# FManifestData - The public interface to load/saving manifest files.
| 46.189655 | 117 | 0.711459 |
402ba89b6c4bbf8923f29b3e69bf5634d07e5b15 | 98 | py | Python | Python/module.py | minjibyeongho/KOSA-Pytorch | 80d71a8c579d645bea4c3352c9babdf232a8630e | [
"MIT"
] | 2 | 2021-05-25T08:52:07.000Z | 2021-08-13T23:49:42.000Z | Python/module.py | minjibyeongho/KOSA-Pytorch | 80d71a8c579d645bea4c3352c9babdf232a8630e | [
"MIT"
] | null | null | null | Python/module.py | minjibyeongho/KOSA-Pytorch | 80d71a8c579d645bea4c3352c9babdf232a8630e | [
"MIT"
] | 2 | 2021-05-24T00:49:45.000Z | 2021-06-11T01:30:12.000Z | #module.py
#if __name__=="__main__":
# print(__name__) | 14 | 26 | 0.581633 |
402c6d1527bb64bf420904254134ab7105236ec8 | 10,690 | py | Python | data_utils.py | algoprog/Quin | c1fd3b8e5e2163217f6c8062620ee0c1dfeed0e8 | [
"MIT"
] | 47 | 2020-08-02T12:28:07.000Z | 2022-03-30T01:56:57.000Z | data_utils.py | algoprog/Quin | c1fd3b8e5e2163217f6c8062620ee0c1dfeed0e8 | [
"MIT"
] | 4 | 2020-09-20T17:31:51.000Z | 2021-12-02T17:40:03.000Z | data_utils.py | algoprog/Quin | c1fd3b8e5e2163217f6c8062620ee0c1dfeed0e8 | [
"MIT"
] | 4 | 2020-11-23T15:47:34.000Z | 2021-03-30T02:02:02.000Z | import csv
import json
import pickle
import logging
import re
import pandas
import gzip
import os
import numpy as np
from random import randint, random
from tqdm import tqdm
from retriever.dense_retriever import DenseRetriever
from models.tokenization import tokenize
from typing import Union, List
def get_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
guid = "%s-%d" % (filename, id)
id += 1
if label == 'entailment':
label = 0
elif label == 'contradiction':
label = 1
else:
label = 2
examples.append(InputExample(guid=guid,
texts=[sample['s1'], sample['s2']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def get_qa_examples(filename, max_examples=0, dev=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['relevant']
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if not dev:
if label == 1:
for _ in range(13):
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def map_label(label):
labels = {"relevant": 0, "irrelevant": 1}
return labels[label.strip().lower()]
def get_qar_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_qar_artificial_examples():
examples = []
id = 0
print('Loading passages...')
passages = []
file = open('data/msmarco/collection.tsv', 'r', encoding='utf8')
while True:
line = file.readline()
if not line:
break
line = line.rstrip('\n').split('\t')
passages.append(line[1])
print('Loaded passages')
with open('data/qar/qar_artificial_queries.csv') as f:
for i, line in enumerate(f):
queries = line.rstrip('\n').split('|')
for query in queries:
guid = "%s-%d" % ('', id)
id += 1
examples.append(InputExample(guid=guid,
texts=[query, passages[i]],
label=1.0))
return examples
def get_single_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['text']],
label=1))
if 0 < max_examples <= len(examples):
break
return examples
def get_qnli_examples(filename, max_examples=0, no_contradictions=False, fever_only=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
if label == 'contradiction' and no_contradictions:
continue
if sample['evidence'] == '':
continue
if fever_only and sample['source'] != 'fever':
continue
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['statement'].strip(), sample['evidence'].strip()],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_retrieval_examples(filename, negative_corpus='data/msmarco/collection.tsv', max_examples=0, no_statements=True,
encoder_model=None, negative_samples_num=4):
examples = []
queries = []
passages = []
negative_passages = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
if 'evidence' in sample and sample['evidence'] == '':
continue
guid = "%s-%d" % (filename, id)
id += 1
if sample['type'] == 'question':
query = sample['question']
passage = sample['answer']
else:
query = sample['statement']
passage = sample['evidence']
query = query.strip()
passage = passage.strip()
if sample['type'] == 'statement' and no_statements:
continue
queries.append(query)
passages.append(passage)
if sample['source'] == 'natural-questions':
negative_passages.append(passage)
if max_examples == len(passages):
break
if encoder_model is not None:
# Load MSMARCO passages
logging.info('Loading MSM passages...')
with open(negative_corpus) as file:
for line in file:
p = line.rstrip('\n').split('\t')[1]
negative_passages.append(p)
logging.info('Building ANN index...')
dense_retriever = DenseRetriever(model=encoder_model, batch_size=1024, use_gpu=True)
dense_retriever.create_index_from_documents(negative_passages)
results = dense_retriever.search(queries=queries, limit=100, probes=256)
negative_samples = [
[negative_passages[p[0]] for p in r if negative_passages[p[0]] != passages[i]][:negative_samples_num]
for i, r in enumerate(results)
]
# print(queries[0])
# print(negative_samples[0][0])
for i in range(len(queries)):
texts = [queries[i], passages[i]] + negative_samples[i]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
else:
for i in range(len(queries)):
texts = [queries[i], passages[i]]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
return examples
def get_pair_input(tokenizer, sent1, sent2, max_len=256):
text = "[CLS] {} [SEP] {} [SEP]".format(sent1, sent2)
tokenized_text = tokenizer.tokenize(text)[:max_len]
indexed_tokens = tokenizer.encode(text)[:max_len]
segments_ids = []
sep_flag = False
for i in range(len(tokenized_text)):
if tokenized_text[i] == '[SEP]' and not sep_flag:
segments_ids.append(0)
sep_flag = True
elif sep_flag:
segments_ids.append(1)
else:
segments_ids.append(0)
return indexed_tokens, segments_ids
def build_batch(tokenizer, text_list, max_len=256):
token_id_list = []
segment_list = []
attention_masks = []
longest = -1
for pair in text_list:
sent1, sent2 = pair
ids, segs = get_pair_input(tokenizer, sent1, sent2, max_len=max_len)
if ids is None or segs is None:
continue
token_id_list.append(ids)
segment_list.append(segs)
attention_masks.append([1] * len(ids))
if len(ids) > longest:
longest = len(ids)
if len(token_id_list) == 0:
return None, None, None
# padding
assert (len(token_id_list) == len(segment_list))
for ii in range(len(token_id_list)):
token_id_list[ii] += [0] * (longest - len(token_id_list[ii]))
attention_masks[ii] += [1] * (longest - len(attention_masks[ii]))
segment_list[ii] += [1] * (longest - len(segment_list[ii]))
return token_id_list, segment_list, attention_masks
def load_unsupervised_dataset(dataset_file):
print('Loading dataset...')
x = pickle.load(open(dataset_file, "rb"))
print('Done')
return x, len(x[0])
def load_supervised_dataset(dataset_file):
print('Loading dataset...')
d = pickle.load(open(dataset_file, "rb"))
print('Done')
return d[0], d[1]
| 31.627219 | 119 | 0.528718 |
402d9bbc776d0b10c128c8af7e8de8955e864e57 | 327 | py | Python | hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
] | null | null | null | hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
] | 1 | 2021-06-10T23:14:00.000Z | 2021-06-10T23:14:00.000Z | hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.1.5 on 2019-01-22 08:33
from django.db import migrations
| 18.166667 | 48 | 0.590214 |
402da872d024b72e61193b2048d5c5fe8a54f2e6 | 6,671 | py | Python | openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
#from proxyclient.v2 import client as proxy_client
from openstack_dashboard.utils import proxy_client
#
#def logout(request):
# _proxy(request).logout(request.user.id)
#
| 35.296296 | 86 | 0.746665 |
402e10f9fc439179bd0a8ffc8b3cd706de061b93 | 251 | py | Python | tfl_data.py | dongyan1024/overtime | 4f722a823585890026fe9584ba5985963b2a586c | [
"MIT"
] | 9 | 2020-10-15T13:53:36.000Z | 2022-03-08T12:08:09.000Z | tfl_data.py | dongyan1024/overtime | 4f722a823585890026fe9584ba5985963b2a586c | [
"MIT"
] | 6 | 2021-02-07T15:43:12.000Z | 2021-04-24T04:03:39.000Z | tfl_data.py | dongyan1024/overtime | 4f722a823585890026fe9584ba5985963b2a586c | [
"MIT"
] | 7 | 2020-10-15T13:55:12.000Z | 2022-03-12T03:54:02.000Z |
import overtime as ot
times = ['14:00','14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55']
tfl_data = ot.TflInput(['victoria', 'central', 'bakerloo', 'piccadilly'], ['inbound', 'outbound'], times)
| 41.833333 | 116 | 0.565737 |
402eafa1a88db63bd7cacd91e03e8377d8b8d5d8 | 2,375 | py | Python | apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 42950e93305846b640a1c6135c9da16ba76c1b3a | [
"Apache-2.0"
] | 29 | 2019-09-18T10:21:07.000Z | 2022-03-10T07:46:57.000Z | apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 42950e93305846b640a1c6135c9da16ba76c1b3a | [
"Apache-2.0"
] | 259 | 2019-12-11T03:19:01.000Z | 2022-03-31T22:46:11.000Z | apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 42950e93305846b640a1c6135c9da16ba76c1b3a | [
"Apache-2.0"
] | 18 | 2020-01-22T14:50:27.000Z | 2022-03-01T14:48:12.000Z | import json
from pathlib import Path
import click
import datacube
from datacube.index.hl import Doc2Dataset
from odc.apps.dc_tools.utils import (
index_update_dataset,
update_if_exists,
allow_unsafe,
transform_stac,
)
from ._stac import stac_transform
from typing import Generator, Optional
import logging
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S",
)
if __name__ == "__main__":
cli()
| 26.388889 | 84 | 0.596211 |
4030d959e7cf60e57a2223602eae1667433715a2 | 651 | py | Python | scripts/fullizer.py | stijm/jazzjackrabbit2 | e47f1c42fd7c450c2e12bcb7dcaae0f695a0dc12 | [
"MIT"
] | 5 | 2021-08-03T20:02:00.000Z | 2021-11-19T20:29:36.000Z | scripts/fullizer.py | stijm/jj2 | e47f1c42fd7c450c2e12bcb7dcaae0f695a0dc12 | [
"MIT"
] | null | null | null | scripts/fullizer.py | stijm/jj2 | e47f1c42fd7c450c2e12bcb7dcaae0f695a0dc12 | [
"MIT"
] | null | null | null | """
WARNING:
Using this script outside any server except one with IP 127.0.0.1 means risking getting
an instant and permanent ban, anywhere you use it.
The script was created *ONLY FOR LOCAL* testing purposes.
NEVER, NEVER, *NEVER* run it in an online multiplayer server.
At least unless you're a dumb freak.
"""
import multiprocessing
import time
from scripts import play
if __name__ == '__main__':
for i in range(1, 33):
process = multiprocessing.Process(
target=play,
kwargs=dict(nick=f'Player {i}', connect=['127.0.0.1'], new_sgip=False),
)
process.start()
time.sleep(0.09)
| 26.04 | 91 | 0.666667 |
403251bad5543a2ea9b5b81f85773876a2b6f3ba | 1,458 | py | Python | setup.py | pranithk/gluster-georep-tools | 3c8c7dcf63042613b002385edcead7c1ec079e61 | [
"MIT"
] | null | null | null | setup.py | pranithk/gluster-georep-tools | 3c8c7dcf63042613b002385edcead7c1ec079e61 | [
"MIT"
] | null | null | null | setup.py | pranithk/gluster-georep-tools | 3c8c7dcf63042613b002385edcead7c1ec079e61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
gluster-georep-tools.setup.py
:copyright: (c) 2016 by Aravinda VK
:license: MIT, see LICENSE for more details.
"""
from setuptools import setup
setup(
name="gluster-georep-tools",
version="0.2",
packages=["gluster_georep_tools",
"gluster_georep_tools.status",
"gluster_georep_tools.setup"],
include_package_data=True,
install_requires=['argparse', 'paramiko', 'glustercli'],
entry_points={
"console_scripts": [
"gluster-georep-setup = gluster_georep_tools.setup.cli:main",
"gluster-georep-status = gluster_georep_tools.status.cli:main",
]
},
platforms="linux",
zip_safe=False,
author="Aravinda VK",
author_email="mail@aravindavk.in",
description="Gluster Geo-replication tools",
license="MIT",
keywords="gluster, tool, geo-replication",
url="https://github.com/aravindavk/gluster-georep-tools",
long_description="""
Gluster Geo-replication Tools
""",
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only"
],
)
| 30.375 | 75 | 0.61454 |
403346598a2baf176ef8cdcf1186f9c5ce45137d | 14,184 | py | Python | docs/_downloads/dbc5873471dad3c21022112121cbd008/tensorboard_profiler_tutorial.py | woojinsong/PyTorch-tutorials-kr | 36fefd556f45c2b1f5db912793172c0369430fd4 | [
"BSD-3-Clause"
] | 221 | 2018-04-06T01:42:58.000Z | 2021-11-28T10:12:45.000Z | intermediate_source/tensorboard_profiler_tutorial.py | konlidoo/tutorials | 75b1c673a73ca285a16f52a62fc8ffcc6d069936 | [
"BSD-3-Clause"
] | 280 | 2018-05-25T08:53:21.000Z | 2021-12-02T05:37:25.000Z | intermediate_source/tensorboard_profiler_tutorial.py | konlidoo/tutorials | 75b1c673a73ca285a16f52a62fc8ffcc6d069936 | [
"BSD-3-Clause"
] | 181 | 2018-05-25T02:00:28.000Z | 2021-11-19T11:56:39.000Z | """
PyTorch Profiler With TensorBoard
====================================
This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler
to detect performance bottlenecks of the model.
Introduction
------------
PyTorch 1.8 includes an updated profiler API capable of
recording the CPU side operations as well as the CUDA kernel launches on the GPU side.
The profiler can visualize this information
in TensorBoard Plugin and provide analysis of the performance bottlenecks.
In this tutorial, we will use a simple Resnet model to demonstrate how to
use TensorBoard plugin to analyze model performance.
Setup
-----
To install ``torch`` and ``torchvision`` use the following command:
::
pip install torch torchvision
"""
######################################################################
# Steps
# -----
#
# 1. Prepare the data and model
# 2. Use profiler to record execution events
# 3. Run the profiler
# 4. Use TensorBoard to view results and analyze model performance
# 5. Improve performance with the help of profiler
# 6. Analyze performance with other advanced features
#
# 1. Prepare the data and model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# First, import all necessary libraries:
#
import torch
import torch.nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
######################################################################
# Then prepare the input data. For this tutorial, we use the CIFAR10 dataset.
# Transform it to the desired format and use DataLoader to load each batch.
transform = T.Compose(
[T.Resize(224),
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
######################################################################
# Next, create Resnet model, loss function, and optimizer objects.
# To run on GPU, move model and loss to GPU device.
device = torch.device("cuda:0")
model = torchvision.models.resnet18(pretrained=True).cuda(device)
criterion = torch.nn.CrossEntropyLoss().cuda(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
######################################################################
# Define the training step for each batch of input data.
######################################################################
# 2. Use profiler to record execution events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The profiler is enabled through the context manager and accepts several parameters,
# some of the most useful are:
#
# - ``schedule`` - callable that takes step (int) as a single parameter
# and returns the profiler action to perform at each step.
#
# In this example with ``wait=1, warmup=1, active=3, repeat=2``,
# profiler will skip the first step/iteration,
# start warming up on the second,
# record the following three iterations,
# after which the trace will become available and on_trace_ready (when set) is called.
# In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin.
#
# During ``wait`` steps, the profiler is disabled.
# During ``warmup`` steps, the profiler starts tracing but the results are discarded.
# This is for reducing the profiling overhead.
# The overhead at the beginning of profiling is high and easy to bring skew to the profiling result.
# During ``active`` steps, the profiler works and records events.
# - ``on_trace_ready`` - callable that is called at the end of each cycle;
# In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard.
# After profiling, result files will be saved into the ``./log/resnet18`` directory.
# Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard.
# - ``record_shapes`` - whether to record shapes of the operator inputs.
# - ``profile_memory`` - Track tensor memory allocation/deallocation.
# - ``with_stack`` - Record source information (file and line number) for the ops.
# If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_),
# clicking a stack frame will navigate to the specific code line.
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
record_shapes=True,
with_stack=True
) as prof:
for step, batch_data in enumerate(train_loader):
if step >= (1 + 1 + 3) * 2:
break
train(batch_data)
prof.step() # Need to call this at the end of each step to notify profiler of steps' boundary.
######################################################################
# 3. Run the profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run the above code. The profiling result will be saved under ``./log/resnet18`` directory.
######################################################################
# 4. Use TensorBoard to view results and analyze model performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Install PyTorch Profiler TensorBoard Plugin.
#
# ::
#
# pip install torch_tb_profiler
#
######################################################################
# Launch the TensorBoard.
#
# ::
#
# tensorboard --logdir=./log
#
######################################################################
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
#
# ::
#
# http://localhost:6006/#pytorch_profiler
#
######################################################################
# You could see Profiler plugin page as shown below.
#
# - Overview
# .. image:: ../../_static/img/profiler_overview1.png
# :scale: 25 %
#
# The overview shows a high-level summary of model performance.
#
# The "GPU Summary" panel shows the GPU configuration and the GPU usage.
# In this example, the GPU Utilization is low.
# The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_.
#
# The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution.
# In this example, you can see the ``DataLoader`` overhead is significant.
#
# The bottom "Performance Recommendation" uses the profiling data
# to automatically highlight likely bottlenecks,
# and gives you actionable optimization suggestions.
#
# You can change the view page in left "Views" dropdown list.
#
# .. image:: ../../_static/img/profiler_views_list.png
# :alt:
#
#
# - Operator view
# The operator view displays the performance of every PyTorch operator
# that is executed either on the host or device.
#
# .. image:: ../../_static/img/profiler_operator_view.png
# :scale: 25 %
# The "Self" duration does not include its child operators time.
# The "Total" duration includes its child operators time.
#
# - View call stack
# Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown.
# Then click a "View Callstack" in this sub-table, the call stack frames will be shown.
#
# .. image:: ../../_static/img/profiler_callstack.png
# :scale: 25 %
#
# If the TensorBoard is launched inside VSCode
# (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_),
# clicking a call stack frame will navigate to the specific code line.
#
# .. image:: ../../_static/img/profiler_vscode.png
# :scale: 25 %
#
#
# - Kernel view
# The GPU kernel view shows all kernels time spent on GPU.
#
# .. image:: ../../_static/img/profiler_kernel_view.png
# :scale: 25 %
# Mean Blocks per SM:
# Blocks per SM = Blocks of this kernel / SM number of this GPU.
# If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.
# "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each runs duration as weight.
#
# Mean Est. Achieved Occupancy:
# Est. Achieved Occupancy is defined in this columns tooltip.
# For most cases such as memory bandwidth bounded kernels, the higher the better.
# "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name,
# using each runs duration as weight.
#
# - Trace view
# The trace view shows timeline of profiled operators and GPU kernels.
# You can select it to see details as below.
#
# .. image:: ../../_static/img/profiler_trace_view1.png
# :scale: 25 %
#
# You can move the graph and zoom in/out with the help of right side toolbar.
# And keyboard can also be used to zoom and move around inside the timeline.
# The w and s keys zoom in centered around the mouse,
# and the a and d keys move the timeline left and right.
# You can hit these keys multiple times until you see a readable representation.
#
# In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time.
# And during most of this period, the GPU is idle.
# Because this function is loading data and transforming data on host side,
# during which the GPU resource is wasted.
######################################################################
# 5. Improve performance with the help of profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader.
# The PyTorch DataLoader uses single process by default.
# User could enable multi-process data loading by setting the parameter ``num_workers``.
# `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details.
#
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
#
# ::
#
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
#
######################################################################
# Then lets choose the recently profiled run in left "Runs" dropdown list.
#
# .. image:: ../../_static/img/profiler_overview2.png
# :scale: 25 %
#
# From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms,
# and the time reduction of ``DataLoader`` mainly contributes.
#
# .. image:: ../../_static/img/profiler_trace_view2.png
# :scale: 25 %
#
# From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced,
# and the GPU utilization is increased.
######################################################################
# 6. Analyze performance with other advanced features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Memory view
# To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``.
#
# Note: Because of the current non-optimized implementation of PyTorch profiler,
# enabling ``profile_memory=True`` will take about several minutes to finish.
# To save time, you can try our existing examples first by running:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo
#
# The profiler records all memory allocation/release events during profiling.
# For every specific operator, the plugin aggregates all these memory events inside its life span.
#
# .. image:: ../../_static/img/profiler_memory_view.png
# :scale: 25 %
#
# The memory type could be selected in "Device" selection box.
# For example, "GPU0" means the following table only shows each operators memory usage on GPU 0, not including CPU or other GPUs.
#
# The "Size Increase" sums up all allocation bytes and minus all the memory release bytes.
#
# The "Allocation Size" sums up all allocation bytes without considering the memory release.
#
# - Distributed view
# The plugin now supports distributed view on profiling DDP with NCCL as backend.
#
# You can try it by using existing example on Azure:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert
#
# .. image:: ../../_static/img/profiler_distributed_view.png
# :scale: 25 %
#
# The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree.
# From this view, User can figure out load balance issue among workers.
# For example, if the computation + overlapping time of one worker is much larger than others,
# there may be a problem of load balance or this worker may be a straggler.
#
# The "Synchronizing/Communication Overview" shows the efficiency of communication.
# "Data Transfer Time" is the time for actual data exchanging.
# "Synchronizing Time" is the time for waiting and synchronizing with other workers.
#
# If one workers "Synchronizing Time" is much shorter than that of other workers,
# this worker may be a straggler which may have more computation workload than other workers.
#
# The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker.
######################################################################
# Learn More
# ----------
#
# Take a look at the following documents to continue your learning,
# and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_.
#
# - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_
# - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
| 40.758621 | 150 | 0.666314 |
403352816f5874a59e3b9fffa9b383a34c03d749 | 311 | py | Python | imgtoch/__init__.py | hrpzcf/imgtoch | 13b59dd4c6b65b8ee17bbd22ac1133a86d34d5fb | [
"MIT"
] | null | null | null | imgtoch/__init__.py | hrpzcf/imgtoch | 13b59dd4c6b65b8ee17bbd22ac1133a86d34d5fb | [
"MIT"
] | null | null | null | imgtoch/__init__.py | hrpzcf/imgtoch | 13b59dd4c6b65b8ee17bbd22ac1133a86d34d5fb | [
"MIT"
] | null | null | null | # coding: utf-8
from .__utils__ import grayscaleOf, makeImage, sortByGrayscale
NAME = "imgtoch"
VERSIONNUM = 0, 2, 3
VERSION = ".".join(map(str, VERSIONNUM))
AUTHOR = "hrpzcf"
EMAIL = "hrpzcf@foxmail.com"
WEBSITE = "https://gitee.com/hrpzcf/imgtoch"
__all__ = ["grayscaleOf", "makeImage", "sortByGrayscale"]
| 23.923077 | 62 | 0.717042 |
40339ee3fc200a5b40a0b837adca77cf33b0c95c | 4,298 | py | Python | packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | g-nightingale/testing-and-monitoring-ml-deployments | 770d2889968e7195dba1697c164b3344cff3c5ee | [
"BSD-3-Clause"
] | 99 | 2019-11-14T11:58:51.000Z | 2022-03-19T14:23:17.000Z | packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | hoai-nguyen/testing-and-monitoring-ml-deployments | c4c0bc8d857326cc10899be6fe7c5bb03586347c | [
"BSD-3-Clause"
] | 1 | 2020-03-05T04:08:26.000Z | 2020-03-05T04:08:26.000Z | packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | hoai-nguyen/testing-and-monitoring-ml-deployments | c4c0bc8d857326cc10899be6fe7c5bb03586347c | [
"BSD-3-Clause"
] | 188 | 2019-12-13T16:48:23.000Z | 2022-03-29T09:25:12.000Z | import typing as t
from gradient_boosting_model.config.core import config
import numpy as np
import pandas as pd
from marshmallow import fields, Schema, ValidationError
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
if input_data[config.model_config.numerical_na_not_allowed].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.model_config.numerical_na_not_allowed
)
return validated_data
def validate_inputs(
*, input_data: pd.DataFrame
) -> t.Tuple[pd.DataFrame, t.Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
validated_data = drop_na_inputs(input_data=input_data)
# set many=True to allow passing in a list
schema = HouseDataInputSchema(many=True)
errors = None
try:
# replace numpy nans so that Marshmallow can validate
schema.load(validated_data.replace({np.nan: None}).to_dict(orient="records"))
except ValidationError as exc:
errors = exc.messages
return validated_data, errors
| 34.66129 | 85 | 0.704281 |
4035dbde81734e9262f7a5d9f7fcf21b0a2fc083 | 1,006 | py | Python | RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | c88c4111bf67d324b471ad87ad962e7bc8c2a202 | [
"MIT"
] | 13 | 2019-05-25T20:25:51.000Z | 2022-03-19T13:36:23.000Z | RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | c88c4111bf67d324b471ad87ad962e7bc8c2a202 | [
"MIT"
] | 53 | 2019-06-07T13:31:59.000Z | 2022-03-28T22:53:47.000Z | RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | c88c4111bf67d324b471ad87ad962e7bc8c2a202 | [
"MIT"
] | 78 | 2019-06-30T08:42:13.000Z | 2022-03-23T20:11:42.000Z | import os
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.base_dotnet_agent import BaseDotNetAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
| 43.73913 | 102 | 0.744533 |
4036ce0b3a0763152669516459e91450d4954edb | 2,640 | py | Python | v3_experiments.py | runekaagaard/workflows | 7bb7fe3821bc33b5e82c65dda3ca61f69ee8bcfa | [
"Unlicense"
] | null | null | null | v3_experiments.py | runekaagaard/workflows | 7bb7fe3821bc33b5e82c65dda3ca61f69ee8bcfa | [
"Unlicense"
] | null | null | null | v3_experiments.py | runekaagaard/workflows | 7bb7fe3821bc33b5e82c65dda3ca61f69ee8bcfa | [
"Unlicense"
] | null | null | null | # coding=utf-8
import inspect
from functools import wraps
def contract(pre_conditions, post_conditions):
"""
Pre is before. Post is after.
"""
return _
def processing(pre_process, post_process):
"Procemanns"
return _
someworkflow = compose(contract, processing, add_one)
print someworkflow
print somefunc(2)
help(somefunc)
| 22.372881 | 75 | 0.576894 |
4037b08c119c1be84f8a39d7cd954a0ebc06a052 | 1,198 | py | Python | externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 889093599eb59c90e4cbcff2817f4421302fada1 | [
"MIT"
] | 40 | 2018-05-14T11:05:03.000Z | 2020-10-20T03:03:06.000Z | externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 889093599eb59c90e4cbcff2817f4421302fada1 | [
"MIT"
] | 4 | 2019-08-19T13:07:10.000Z | 2020-10-17T02:45:04.000Z | externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 889093599eb59c90e4cbcff2817f4421302fada1 | [
"MIT"
] | 14 | 2018-05-28T09:45:02.000Z | 2018-12-18T10:54:26.000Z | '''
Gets the core asm module out of an emscripten output file.
By default it adds a ';' to end the
var asm = ...
statement. You can add a third param to customize that. If the third param is 'swap-in', it will emit code to swap this asm module in, instead of the default one.
XXX this probably doesn't work with closure compiler advanced yet XXX
'''
import os, sys
import asm_module
infile = sys.argv[1]
outfile = sys.argv[2]
extra = sys.argv[3] if len(sys.argv) >= 4 else ';'
module = asm_module.AsmModule(infile).asm_js
if extra == 'swap-in':
# we do |var asm = | just like the original codebase, so that gets overridden anyhow (assuming global scripts).
extra = r''' (Module.asmGlobalArg, Module.asmLibraryArg, Module['buffer']);
// special fixups
asm.stackRestore(Module['asm'].stackSave()); // if this fails, make sure the original was built to be swappable (-s SWAPPABLE_ASM_MODULE=1)
// Finish swap
Module['asm'] = asm;
if (Module['onAsmSwap']) Module['onAsmSwap']();
'''
elif extra == 'just-func':
module = module[module.find('=')+1:] # strip the initial "var asm =" bit, leave just the raw module as a function
extra = ';'
open(outfile, 'w').write(module + extra)
| 32.378378 | 162 | 0.69783 |
4037b4c2546a2c9d2335471a4c5869528e8d4f28 | 2,399 | py | Python | apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 23cfb57638497fdd1f2d8c09728b439b0e83efde | [
"BSD-3-Clause"
] | null | null | null | apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 23cfb57638497fdd1f2d8c09728b439b0e83efde | [
"BSD-3-Clause"
] | null | null | null | apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 23cfb57638497fdd1f2d8c09728b439b0e83efde | [
"BSD-3-Clause"
] | null | null | null | import torch
import pdb
from torch.autograd import gradcheck
import fused_conv_bias_relu
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
| 31.155844 | 93 | 0.681951 |
403ab8cc728f6138166c183502ef116ca738da28 | 3,037 | py | Python | ironic_inspector/cmd/dbsync.py | namnx228/ironic-inspector | fb5955bccef367af58c972718643fe5fdb18ffa5 | [
"Apache-2.0"
] | 31 | 2015-06-23T08:06:05.000Z | 2021-11-20T05:34:32.000Z | ironic_inspector/cmd/dbsync.py | sapcc/ironic-inspector | dee8734f8ca2b0fb0acc4c56f1806237234bf55d | [
"Apache-2.0"
] | 1 | 2019-11-22T12:07:56.000Z | 2019-11-22T12:07:59.000Z | ironic_inspector/cmd/dbsync.py | sapcc/ironic-inspector | dee8734f8ca2b0fb0acc4c56f1806237234bf55d | [
"Apache-2.0"
] | 33 | 2015-12-02T05:27:56.000Z | 2022-02-28T07:57:43.000Z | # Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_log import log
from ironic_inspector import conf # noqa
CONF = cfg.CONF
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
| 33.01087 | 76 | 0.709582 |
403ac1f41e289fbd9825b8c92a8b0c154ef6090e | 1,300 | py | Python | trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 06baf78b31e4b40d8db9fc5be67700be32c66cba | [
"MIT"
] | 1 | 2020-06-06T17:09:55.000Z | 2020-06-06T17:09:55.000Z | trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 06baf78b31e4b40d8db9fc5be67700be32c66cba | [
"MIT"
] | null | null | null | trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 06baf78b31e4b40d8db9fc5be67700be32c66cba | [
"MIT"
] | null | null | null | from jogo import desenha_jogo
from random import randint
import sys
| 23.214286 | 127 | 0.412308 |
403b3bdafa5f824c48528757629f8e664b7cbcd3 | 9,018 | py | Python | DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 3e9ccdb62a11ec0ffd0747511f5512bcdb0df729 | [
"Apache-2.0"
] | null | null | null | DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 3e9ccdb62a11ec0ffd0747511f5512bcdb0df729 | [
"Apache-2.0"
] | 21 | 2017-01-17T12:19:47.000Z | 2021-06-03T07:56:56.000Z | DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 3e9ccdb62a11ec0ffd0747511f5512bcdb0df729 | [
"Apache-2.0"
] | 1 | 2017-05-03T21:42:49.000Z | 2017-05-03T21:42:49.000Z | from datetime import date, datetime
from DesksReminder.Basics.dataFinder import Data
from DesksReminder.Basics.nickNames import ContactBook
from Config.settings import JIRA_URL
__author__ = 'Manuel Escriche'
if __name__ == "__main__":
pass
| 49.01087 | 120 | 0.555001 |
403c902e2dd03cc231fcbd2349b64917b93e7dde | 826 | py | Python | scripts/ip2hex.py | Kidlike/dotfiles | b9c4daa4da1f416662b708338a497b5a620ddcbf | [
"Apache-2.0"
] | null | null | null | scripts/ip2hex.py | Kidlike/dotfiles | b9c4daa4da1f416662b708338a497b5a620ddcbf | [
"Apache-2.0"
] | null | null | null | scripts/ip2hex.py | Kidlike/dotfiles | b9c4daa4da1f416662b708338a497b5a620ddcbf | [
"Apache-2.0"
] | 1 | 2018-05-28T08:08:25.000Z | 2018-05-28T08:08:25.000Z | #!/usr/bin/python
import sys
import re
if __name__ == '__main__':
main()
| 26.645161 | 68 | 0.468523 |
403cacc3c31596cf185f47bf3504df89608d6f14 | 1,329 | py | Python | src/models/CVX_weighted.py | DanqingZ/social-DCM | 3c2541a7ed0e7f4519d97783b5b673fa6c06ae94 | [
"MIT"
] | 14 | 2017-08-10T17:00:20.000Z | 2021-12-23T09:00:50.000Z | src/models/CVX_weighted.py | DanqingZ/social-DCM | 3c2541a7ed0e7f4519d97783b5b673fa6c06ae94 | [
"MIT"
] | null | null | null | src/models/CVX_weighted.py | DanqingZ/social-DCM | 3c2541a7ed0e7f4519d97783b5b673fa6c06ae94 | [
"MIT"
] | 1 | 2019-08-13T08:47:43.000Z | 2019-08-13T08:47:43.000Z | import random
import numpy as np
import numpy.linalg as LA
import scipy as spy
import time
from itertools import *
import sys
import cvxpy as cvx
from random import randint
import numpy as np
import random
from scipy.sparse import csc_matrix
from scipy import sparse as sp
import networkx as nx | 26.58 | 157 | 0.699774 |
403ceb47a5257374ece3af5ee6603178afb5bfd2 | 5,704 | py | Python | experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
] | null | null | null | experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
] | null | null | null | experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
] | null | null | null | import sys
import glob
from os.path import join
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color
from PIL import Image, ImageEnhance
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
import torchvision.transforms as T
from tqdm import tqdm
import joint_bilateral_filter as jbf
import config as c
offsets = (47.5, 2.4, 7.4)
scales = (25.6, 11.2, 16.8)
def apply_filt(args):
'''multiprocessing wrapper for applying the joint bilateral filter'''
L_i, ab_i = args
return jbf.upsample(L_i[0], ab_i, s_x=6, s_l=0.10)
def norm_lab_to_rgb(L, ab, norm=True, filt=False, bw=False):
'''given an Nx1xWxH Tensor L and an Nx2xwxh Tensor ab, normalized accoring to offsets and
scales above, upsample the ab channels and combine with L, and form an RGB image.
norm: If false, assume that L, ab are not normalized and already in the correct range
filt: Use joint bilateral upsamling to do the upsampling. Slow, but improves image quality.
bw: Simply produce a grayscale RGB, ignoring the ab channels'''
if bw:
filt=False
if filt:
with Pool(12) as p:
ab_up_list = p.map(apply_filt, [(L[i], ab[i]) for i in range(len(L))])
ab = np.stack(ab_up_list, axis=0)
ab = torch.Tensor(ab)
else:
ab = F.interpolate(ab, size=L.shape[2], mode='bilinear')
lab = torch.cat([L, ab], dim=1)
for i in range(1 + 2*norm):
lab[:, i] = lab[:, i] * scales[i] + offsets[i]
lab[:, 0].clamp_(0., 100.)
lab[:, 1:].clamp_(-128, 128)
if bw:
lab[:, 1:].zero_()
lab = lab.cpu().data.numpy()
rgb = [color.lab2rgb(np.transpose(l, (1, 2, 0))).transpose(2, 0, 1) for l in lab]
return np.array(rgb)
# Data transforms for training and test/validation set
transf = T.Compose([T.RandomHorizontalFlip(),
T.RandomResizedCrop(c.img_dims_orig[0], scale=(0.2, 1.))])
transf_test = T.Compose([T.Resize(c.img_dims_orig[0]),
T.CenterCrop(c.img_dims_orig[0])])
if c.dataset == 'imagenet':
with open('./imagenet/training_images.txt') as f:
train_list = [join('./imagenet', fname[2:]) for fname in f.read().splitlines()]
with open(c.validation_images) as f:
test_list = [ t for t in f.read().splitlines()if t[0] != '#']
test_list = [join('./imagenet', fname) for fname in test_list]
if c.val_start is not None:
test_list = test_list[c.val_start:c.val_stop]
else:
data_dir = '/home/diz/data/coco17'
complete_list = sorted(glob.glob(join(data_dir, '*.jpg')))
train_list = complete_list[64:]
test_list = complete_list[64:]
train_data = LabColorDataset(train_list,transf)
test_data = LabColorDataset(test_list, transf_test)
train_loader = DataLoader(train_data, batch_size=c.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=min(64, len(test_list)), shuffle=c.shuffle_val, num_workers=4, pin_memory=True, drop_last=False)
if __name__ == '__main__':
# Determine mean and standard deviation of RGB channels
# (i.e. set global variables scale and offsets to 1., then use the results as new scale and offset)
for x in test_loader:
x_l, x_ab, _, x_ab_pred = model.prepare_batch(x)
#continue
img_gt = norm_lab_to_rgb(x_l, x_ab)
img_pred = norm_lab_to_rgb(x_l, x_ab_pred)
for i in range(c.batch_size):
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img_gt[i].transpose(1,2,0))
plt.subplot(2,2,2)
plt.scatter(x_ab[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='gt')
plt.scatter(x_ab_pred[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab_pred[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='pred')
plt.legend()
plt.subplot(2,2,3)
plt.imshow(img_pred[i].transpose(1,2,0))
plt.show()
sys.exit()
means = []
stds = []
for i, x in enumerate(train_loader):
print('\r', '%i / %i' % (i, len(train_loader)), end='')
mean = []
std = []
for i in range(3):
mean.append(x[:, i].mean().item())
std.append(x[:, i].std().item())
means.append(mean)
stds.append(std)
if i >= 1000:
break
means, stds = np.array(means), np.array(stds)
print()
print('Mean ', means.mean(axis=0))
print('Std dev', stds.mean(axis=0))
#[-0.04959071 0.03768991 0.11539354]
#[0.51175581 0.17507738 0.26179135]
| 32.971098 | 144 | 0.603086 |
403d3f7c3cad2d68df2456deb94e9f014798faf1 | 16,215 | py | Python | utils/editor.py | tien1504/idinvert_pytorch | 19999e9945aef4843a464930426a565256863ded | [
"MIT"
] | 415 | 2020-04-02T03:06:47.000Z | 2022-03-28T09:32:13.000Z | utils/editor.py | tien1504/idinvert_pytorch | 19999e9945aef4843a464930426a565256863ded | [
"MIT"
] | 52 | 2020-04-03T04:13:57.000Z | 2021-11-23T16:52:31.000Z | utils/editor.py | tien1504/idinvert_pytorch | 19999e9945aef4843a464930426a565256863ded | [
"MIT"
] | 68 | 2020-04-03T10:08:30.000Z | 2021-10-29T20:13:45.000Z | # python 3.7
"""Utility functions for image editing from latent space."""
import os.path
import numpy as np
__all__ = [
'parse_indices', 'interpolate', 'mix_style',
'get_layerwise_manipulation_strength', 'manipulate', 'parse_boundary_list'
]
def parse_indices(obj, min_val=None, max_val=None):
"""Parses indices.
If the input is a list or tuple, this function has no effect.
The input can also be a string, which is either a comma separated list of
numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will
be ignored.
Args:
obj: The input object to parse indices from.
min_val: If not `None`, this function will check that all indices are equal
to or larger than this value. (default: None)
max_val: If not `None`, this function will check that all indices are equal
to or smaller than this field. (default: None)
Returns:
A list of integers.
Raises:
If the input is invalid, i.e., neither a list or tuple, nor a string.
"""
if obj is None or obj == '':
indices = []
elif isinstance(obj, int):
indices = [obj]
elif isinstance(obj, (list, tuple, np.ndarray)):
indices = list(obj)
elif isinstance(obj, str):
indices = []
splits = obj.replace(' ', '').split(',')
for split in splits:
numbers = list(map(int, split.split('-')))
if len(numbers) == 1:
indices.append(numbers[0])
elif len(numbers) == 2:
indices.extend(list(range(numbers[0], numbers[1] + 1)))
else:
raise ValueError(f'Invalid type of input: {type(obj)}!')
assert isinstance(indices, list)
indices = sorted(list(set(indices)))
for idx in indices:
assert isinstance(idx, int)
if min_val is not None:
assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
if max_val is not None:
assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
return indices
def interpolate(src_codes, dst_codes, step=5):
"""Interpolates two sets of latent codes linearly.
Args:
src_codes: Source codes, with shape [num, *code_shape].
dst_codes: Target codes, with shape [num, *code_shape].
step: Number of interplolation steps, with source and target included. For
example, if `step = 5`, three more samples will be inserted. (default: 5)
Returns:
Interpolated codes, with shape [num, step, *code_shape].
Raises:
ValueError: If the input two sets of latent codes are with different shapes.
"""
if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
raise ValueError(f'Shapes of source codes and target codes should both be '
f'[num, *code_shape], but {src_codes.shape} and '
f'{dst_codes.shape} are received!')
num = src_codes.shape[0]
code_shape = src_codes.shape[1:]
a = src_codes[:, np.newaxis]
b = dst_codes[:, np.newaxis]
l = np.linspace(0.0, 1.0, step).reshape(
[step if axis == 1 else 1 for axis in range(a.ndim)])
results = a + l * (b - a)
assert results.shape == (num, step, *code_shape)
return results
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
def get_layerwise_manipulation_strength(num_layers,
truncation_psi,
truncation_layers):
"""Gets layer-wise strength for manipulation.
Recall the truncation trick played on layer [0, truncation_layers):
w = truncation_psi * w + (1 - truncation_psi) * w_avg
So, when using the same boundary to manipulate different layers, layer
[0, truncation_layers) and layer [truncation_layers, num_layers) should use
different strength to eliminate the effect from the truncation trick. More
concretely, the strength for layer [0, truncation_layers) is set as
`truncation_psi`, while that for other layers are set as 1.
"""
strength = [1.0 for _ in range(num_layers)]
if truncation_layers > 0:
for layer_idx in range(0, truncation_layers):
strength[layer_idx] = truncation_psi
return strength
def manipulate(latent_codes,
boundary,
start_distance=-5.0,
end_distance=5.0,
step=21,
layerwise_manipulation=False,
num_layers=1,
manipulate_layers=None,
is_code_layerwise=False,
is_boundary_layerwise=False,
layerwise_manipulation_strength=1.0):
"""Manipulates the given latent codes with respect to a particular boundary.
Basically, this function takes a set of latent codes and a boundary as inputs,
and outputs a collection of manipulated latent codes.
For example, let `step` to be 10, `latent_codes` to be with shape [num,
*code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm.
Then the output will be with shape [num, 10, *code_shape]. For each 10-element
manipulated codes, the first code is `start_distance` away from the original
code (i.e., the input) along the `boundary` direction, while the last code is
`end_distance` away. Remaining codes are linearly interpolated. Here,
`distance` is sign sensitive.
NOTE: This function also supports layer-wise manipulation, in which case the
generator should be able to take layer-wise latent codes as inputs. For
example, if the generator has 18 convolutional layers in total, and each of
which takes an independent latent code as input. It is possible, sometimes
with even better performance, to only partially manipulate these latent codes
corresponding to some certain layers yet keeping others untouched.
NOTE: Boundary is assumed to be normalized to unit norm already.
Args:
latent_codes: The input latent codes for manipulation, with shape
[num, *code_shape] or [num, num_layers, *code_shape].
boundary: The semantic boundary as reference, with shape [1, *code_shape] or
[1, num_layers, *code_shape].
start_distance: Start point for manipulation. (default: -5.0)
end_distance: End point for manipulation. (default: 5.0)
step: Number of manipulation steps. (default: 21)
layerwise_manipulation: Whether to perform layer-wise manipulation.
(default: False)
num_layers: Number of layers. Only active when `layerwise_manipulation` is
set as `True`. Should be a positive integer. (default: 1)
manipulate_layers: Indices of the layers to perform manipulation. `None`
means to manipulate latent codes from all layers. (default: None)
is_code_layerwise: Whether the input latent codes are layer-wise. If set as
`False`, the function will first repeat the input codes for `num_layers`
times before perform manipulation. (default: False)
is_boundary_layerwise: Whether the input boundary is layer-wise. If set as
`False`, the function will first repeat boundary for `num_layers` times
before perform manipulation. (default: False)
layerwise_manipulation_strength: Manipulation strength for each layer. Only
active when `layerwise_manipulation` is set as `True`. This field can be
used to resolve the strength discrepancy across layers when truncation
trick is on. See function `get_layerwise_manipulation_strength()` for
details. A tuple, list, or `numpy.ndarray` is expected. If set as a single
number, this strength will be used for all layers. (default: 1.0)
Returns:
Manipulated codes, with shape [num, step, *code_shape] if
`layerwise_manipulation` is set as `False`, or shape [num, step,
num_layers, *code_shape] if `layerwise_manipulation` is set as `True`.
Raises:
ValueError: If the input latent codes, boundary, or strength are with
invalid shape.
"""
if not (boundary.ndim >= 2 and boundary.shape[0] == 1):
raise ValueError(f'Boundary should be with shape [1, *code_shape] or '
f'[1, num_layers, *code_shape], but '
f'{boundary.shape} is received!')
if not layerwise_manipulation:
assert not is_code_layerwise
assert not is_boundary_layerwise
num_layers = 1
manipulate_layers = None
layerwise_manipulation_strength = 1.0
# Preprocessing for layer-wise manipulation.
# Parse indices of manipulation layers.
layer_indices = parse_indices(
manipulate_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
# Make latent codes layer-wise if needed.
assert num_layers > 0
if not is_code_layerwise:
x = latent_codes[:, np.newaxis]
x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
else:
x = latent_codes
if x.shape[1] != num_layers:
raise ValueError(f'Latent codes should be with shape [num, num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {x.shape} is received!')
# Make boundary layer-wise if needed.
if not is_boundary_layerwise:
b = boundary
b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
else:
b = boundary[0]
if b.shape[0] != num_layers:
raise ValueError(f'Boundary should be with shape [num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {b.shape} is received!')
# Get layer-wise manipulation strength.
if isinstance(layerwise_manipulation_strength, (int, float)):
s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
elif isinstance(layerwise_manipulation_strength, (list, tuple)):
s = layerwise_manipulation_strength
if len(s) != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
f'mismatches number of layers `{num_layers}`!')
elif isinstance(layerwise_manipulation_strength, np.ndarray):
s = layerwise_manipulation_strength
if s.size != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
f'mismatches number of layers `{num_layers}`!')
else:
raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
s = np.array(s).reshape(
[num_layers if axis == 0 else 1 for axis in range(b.ndim)])
b = b * s
if x.shape[1:] != b.shape:
raise ValueError(f'Latent code shape {x.shape} and boundary shape '
f'{b.shape} mismatch!')
num = x.shape[0]
code_shape = x.shape[2:]
x = x[:, np.newaxis]
b = b[np.newaxis, np.newaxis, :]
l = np.linspace(start_distance, end_distance, step).reshape(
[step if axis == 1 else 1 for axis in range(x.ndim)])
results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
is_manipulatable = np.zeros(results.shape, dtype=bool)
is_manipulatable[:, :, layer_indices] = True
results = np.where(is_manipulatable, x + l * b, results)
assert results.shape == (num, step, num_layers, *code_shape)
return results if layerwise_manipulation else results[:, :, 0]
def parse_boundary_list(boundary_list_path):
"""Parses boundary list.
Sometimes, a text file containing a list of boundaries will significantly
simplify image manipulation with a large amount of boundaries. This function
is used to parse boundary information from such list file.
Basically, each item in the list should be with format
`($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can
disable a particular boundary.
Sample:
(age, z): $AGE_BOUNDARY_PATH
(gender, w): $GENDER_BOUNDARY_PATH
DISABLE(pose, wp): $POSE_BOUNDARY_PATH
Args:
boundary_list_path: Path to the boundary list.
Returns:
A dictionary, whose key is a two-element tuple (boundary_name, space_type)
and value is the corresponding boundary path.
Raise:
ValueError: If the given boundary list does not exist.
"""
if not os.path.isfile(boundary_list_path):
raise ValueError(f'Boundary list `boundary_list_path` does not exist!')
boundaries = {}
with open(boundary_list_path, 'r') as f:
for line in f:
if line[:len('DISABLE')] == 'DISABLE':
continue
boundary_info, boundary_path = line.strip().split(':')
boundary_name, space_type = boundary_info.strip()[1:-1].split(',')
boundary_name = boundary_name.strip()
space_type = space_type.strip().lower()
boundary_path = boundary_path.strip()
boundaries[(boundary_name, space_type)] = boundary_path
return boundaries
| 41.259542 | 80 | 0.687882 |
403d7ac07f1f092095ae4d7caf15898f47658901 | 96 | py | Python | venv/lib/python3.8/site-packages/pyls/_version.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pyls/_version.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pyls/_version.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/24/e8/39/183700a0b2d2a9545f3da2571d82b53df290aab3a51dc229b113d16e6c | 96 | 96 | 0.895833 |
403e17c5ec985065a02c6baa32d0dcd4699f18d1 | 1,277 | py | Python | pymoo/util/normalization.py | Electr0phile/pymoo | 652428473cc68b6d9deada3792635bc8a831b255 | [
"Apache-2.0"
] | 1 | 2020-08-27T09:51:27.000Z | 2020-08-27T09:51:27.000Z | pymoo/util/normalization.py | Asurada2015/pymoo | 023a787d0b78813e789f170a3e94b2de85605aff | [
"Apache-2.0"
] | null | null | null | pymoo/util/normalization.py | Asurada2015/pymoo | 023a787d0b78813e789f170a3e94b2de85605aff | [
"Apache-2.0"
] | null | null | null | import numpy as np
| 21.644068 | 92 | 0.617071 |
4040a877bb3e28b9851ff90970e6bf5e768e303c | 31,211 | py | Python | alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | c2468f11b8398edc9b16e1552ac8d609d8347677 | [
"BSD-2-Clause"
] | 1 | 2018-01-20T14:56:01.000Z | 2018-01-20T14:56:01.000Z | alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | c2468f11b8398edc9b16e1552ac8d609d8347677 | [
"BSD-2-Clause"
] | 25 | 2015-01-21T09:16:26.000Z | 2021-12-13T20:01:21.000Z | alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | c2468f11b8398edc9b16e1552ac8d609d8347677 | [
"BSD-2-Clause"
] | 3 | 2015-07-28T18:40:05.000Z | 2017-03-28T08:14:37.000Z | """Check new
Revision ID: 92235b77ea53
Revises: 381fdb66ec27
Create Date: 2017-10-14 02:38:51.007307
"""
# revision identifiers, used by Alembic.
revision = '92235b77ea53'
down_revision = '381fdb66ec27'
from alembic import op
import sqlalchemy as sa
# op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
# op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
| 90.205202 | 149 | 0.79834 |
4040e2297e78d48d586c2e4b34ffa775eb46c92e | 5,633 | py | Python | build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 94e39cfe4b285517ee2502f658ab23af4ff18643 | [
"MIT"
] | null | null | null | build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 94e39cfe4b285517ee2502f658ab23af4ff18643 | [
"MIT"
] | null | null | null | build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 94e39cfe4b285517ee2502f658ab23af4ff18643 | [
"MIT"
] | null | null | null | import subprocess
import os
def get_connected_devices() -> list:
"""
Returns a list of tuples containing the Device name and the android Version
:return:
"""
devices = []
devices_output = subprocess.check_output(["adb", "devices"]).decode("utf-8").strip("List of devices attached").split("\n")
for device in devices_output:
if device is None or device == "":
pass
else:
device_name = device.strip('\tdevice')
android_version = subprocess.check_output(["adb", "-s", device_name, "shell", "getprop", "ro.build.version.release"])
devices.append((device_name, android_version.decode('utf-8').strip("\r\n")))
return devices
def install_app(apk_path=None, device=None) -> bool:
"""
Installs an APK file into a device.
The app installed with the -r option so the apk gets replaced it exists or installed if it doenst
:param apk_path: Path for the APK
:param device: Device name
:return: True if success , False if fail
"""
path = os.getcwd() + apk_path if str(apk_path).startswith("/") else os.getcwd() + "/" + apk_path
if apk_path is not None and device is not None:
if os.path.isfile(path):
command = ["adb", "-s" , device, "install", "-r", path]
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
print("APK {0} was installed in {1}".format(apk_path, device))
return True
else:
print("File {0} not found!".format(path))
else:
print("Device and/or apk not found or not specified")
return False
def unintall_app(package=None, device=None) -> None:
"""
Uninstall an app from the device
:return:
"""
command = ["adb", "-s", device, "uninstall", package]
if package is not None:
if device is None:
command.pop(1)
command.pop(1)
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
else:
print("App package was not specified.")
def is_app_installed(package=None, device=None) -> bool:
"""
Returns True if the package is installed or False if it is not
:param package:
:return:
"""
command = ["adb", "-s", device, "shell", "pm", "list", "packages |", "grep", package]
if device is None:
command.pop(1)
command.pop(1)
out = subprocess.check_output(command, stderr=None)
return True if out.decode('utf-8').strip("\r\n") == "package:{0}".format(package) else False
def run_command(arg_string=None, arg_list=None) -> None:
"""
Run a general ABD command
:return:
"""
command = arg_list if arg_list else str(arg_string).split(" ")
p = subprocess.check_output(command, stderr=None)
print(p.decode('utf-8'))
def kill_server() -> None:
"""
Kills the ADB server
:return: None
"""
command = ["adb", "kill-server"]
p = subprocess.Popen(command, stdout=None, stderr=None)
p.wait(timeout=10)
print("ADB server has been killed.")
def start_server() -> None:
"""
Starts the ADB server
:return: None
"""
command = ["adb", "start-server"]
p = subprocess.Popen(command, stderr=None, stdout=None)
p.wait(timeout=10)
print("ADB server has been started.")
def get_apk_from_device(package=None, device=None) -> bool:
"""
Retrieves the APK of an application if it exists
:param package:
:param device:
:return: bool
"""
# adb shell pm path com.example.someapp
# adb pull /data/app/com.example.someapp-2.apk path/to/desired/destination
command_apk_path = ["adb", "-s", device, "pm", "path", package]
if package is None:
print("Package is required but it was not specified.")
return False
if device is None and len(get_connected_devices()) != 1:
print("There are multiple devices connected, please specify a device to get the APK from")
return False
elif device is None:
command_apk_path.pop(1)
command_apk_path.pop(1)
apk_path = subprocess.check_output(command_apk_path, stderr=None)
# TODO: Rest of the stuff
def push_file_to_device() -> None: # For now...
"""
Pushes a file to the device
:param device:
:return: None
"""
pass
def list_files_in_device() -> None:
"""
Gets a list of files in a specific folder
:param device:
:param path:
:return: list of files
"""
pass
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True
| 27.081731 | 129 | 0.617788 |
4041a6092503143d16664ce5f9772df9bdedc920 | 2,664 | py | Python | tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 0675677d1cb8dc4b09dfe5d76129df4483725fce | [
"MIT"
] | 1 | 2021-11-16T15:23:24.000Z | 2021-11-16T15:23:24.000Z | tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 0675677d1cb8dc4b09dfe5d76129df4483725fce | [
"MIT"
] | null | null | null | tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 0675677d1cb8dc4b09dfe5d76129df4483725fce | [
"MIT"
] | null | null | null | import glob
import os
import sys
from tempfile import TemporaryDirectory
import netCDF4
import numpy as np
import numpy.ma as ma
from all_products_fun import Check
from lidar_fun import LidarFun
from cloudnetpy import concat_lib
from cloudnetpy.instruments import ceilo2nc
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_PATH)
FILES = glob.glob(f"{SCRIPT_PATH}/data/cl61d/*.nc")
FILES.sort()
SITE_META = {
"name": "Hyytil",
"altitude": 123,
"calibration_factor": 2.0,
"latitude": 45.0,
"longitude": 22.0,
}
| 30.62069 | 81 | 0.634384 |
4043eb802b57171a6cc605056ffc3abeca7f2a68 | 1,343 | py | Python | tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | d4be4a818c2fadef8eeb76a59d11ff82fc2c433a | [
"MIT"
] | 1 | 2021-07-07T09:06:00.000Z | 2021-07-07T09:06:00.000Z | tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | d4be4a818c2fadef8eeb76a59d11ff82fc2c433a | [
"MIT"
] | null | null | null | tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | d4be4a818c2fadef8eeb76a59d11ff82fc2c433a | [
"MIT"
] | null | null | null | from sqlalchemy import Column, func
from clickhouse_sqlalchemy import types, Table
from tests.testcase import (
BaseAbstractTestCase, HttpSessionTestCase, NativeSessionTestCase,
)
| 27.408163 | 72 | 0.63589 |