hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
426e4afa33488c3f61e9819e1e0e8ab285e730fe | 902 | py | Python | config.py | rajatomar788/pyblog | d450dc1ceb3a6b3aeb747648a0fb1b4334e4b3ae | [
"MIT"
] | null | null | null | config.py | rajatomar788/pyblog | d450dc1ceb3a6b3aeb747648a0fb1b4334e4b3ae | [
"MIT"
] | null | null | null | config.py | rajatomar788/pyblog | d450dc1ceb3a6b3aeb747648a0fb1b4334e4b3ae | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
| 29.096774 | 80 | 0.674058 |
426e9a71b5a0425ef77735be32bb8398f28a2e1e | 45 | py | Python | ceefax/fonts/size7extracondensed/__init__.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | 1 | 2020-03-28T15:53:22.000Z | 2020-03-28T15:53:22.000Z | ceefax/fonts/size7extracondensed/__init__.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | 1 | 2021-02-05T13:43:52.000Z | 2021-02-05T13:43:52.000Z | ceefax/fonts/size7extracondensed/__init__.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | null | null | null | from .default import size7extracondensedfont
| 22.5 | 44 | 0.888889 |
426e9a7b517f49d7e52664e4ad563ce95c7c8446 | 3,433 | py | Python | bom/helpers.py | gxyp/indabom | 114991be2471eda2cf658c68706ab7bb05b06959 | [
"MIT"
] | null | null | null | bom/helpers.py | gxyp/indabom | 114991be2471eda2cf658c68706ab7bb05b06959 | [
"MIT"
] | null | null | null | bom/helpers.py | gxyp/indabom | 114991be2471eda2cf658c68706ab7bb05b06959 | [
"MIT"
] | null | null | null | from bom.octopart_parts_match import match_part
from bom.models import Part, PartClass, Seller, SellerPart, Subpart, \
Manufacturer, Organization, PartFile
| 23.040268 | 77 | 0.642587 |
426f6bd9b353f10dd5dac6c8afa818c7319f5d74 | 8,612 | py | Python | keycodes/key/codes/win.py | jonchun/ptoys-mapper | a2dde413d37e897ec41b69ac979e538afb7435f0 | [
"MIT"
] | null | null | null | keycodes/key/codes/win.py | jonchun/ptoys-mapper | a2dde413d37e897ec41b69ac979e538afb7435f0 | [
"MIT"
] | null | null | null | keycodes/key/codes/win.py | jonchun/ptoys-mapper | a2dde413d37e897ec41b69ac979e538afb7435f0 | [
"MIT"
] | null | null | null | # Source:
# https://github.com/tpn/winsdk-10/blob/46c66795f49679eb4783377968ce25f6c778285a/Include/10.0.10240.0/um/WinUser.h
# # convert all C-style comments to python multi-line string comment
# find: (^/\*[\s\S\r]+?\*/)
# replace: """\n$1\n"""
# # convert all keycode #defines to be python constants
# find: #define\s(.+_.+?)\s+([\w]+)(\s*)(/[/*].+)?
# replace: $1 = $2$3# $4\n
# # clean up results by removing lines with only a single # caused by previous regex
# find: ^# $\n
# replace:
# # clean up duplicate newlines
# find: (\s#.+\n)\n
# replace: $1
# # clean up multi-line comments.
# find: ^(\s{3,})(\S.+)
# replace: $1 # $2
from enum import IntEnum
| 20.407583 | 114 | 0.576637 |
426fdd67326d3cc89802cd8abeba99af022807c1 | 117 | py | Python | application/flicket_errors/__init__.py | abbas0001/flicket | 547a5e783cccf157d10df88608440aa2919d7e7b | [
"MIT"
] | null | null | null | application/flicket_errors/__init__.py | abbas0001/flicket | 547a5e783cccf157d10df88608440aa2919d7e7b | [
"MIT"
] | null | null | null | application/flicket_errors/__init__.py | abbas0001/flicket | 547a5e783cccf157d10df88608440aa2919d7e7b | [
"MIT"
] | null | null | null | #! python3
# -*- coding: utf-8 -*-
#
from flask import Blueprint
bp_errors = Blueprint('flicket-errors', __name__)
| 14.625 | 49 | 0.683761 |
4270b9f3f72e8e2eb3176ce2f540c1dc258f357c | 399 | py | Python | SmartBuild/modules/migrations/0003_module_shortcut.py | ampamo/smart-build | a0c9a3871eb7ca06a6cd6d4a15aba70e2291f4fb | [
"CC0-1.0"
] | null | null | null | SmartBuild/modules/migrations/0003_module_shortcut.py | ampamo/smart-build | a0c9a3871eb7ca06a6cd6d4a15aba70e2291f4fb | [
"CC0-1.0"
] | null | null | null | SmartBuild/modules/migrations/0003_module_shortcut.py | ampamo/smart-build | a0c9a3871eb7ca06a6cd6d4a15aba70e2291f4fb | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 19.95 | 53 | 0.601504 |
4273f06b359f41b03bbac8b02773bc579762c6fd | 6,789 | py | Python | app/common.py | yxonic/dl-boilerplate | d503716ad514929ddfdc41341e37b0e3f1a1f0f5 | [
"MIT"
] | 1 | 2017-09-26T05:13:19.000Z | 2017-09-26T05:13:19.000Z | app/common.py | yxonic/dl-boilerplate | d503716ad514929ddfdc41341e37b0e3f1a1f0f5 | [
"MIT"
] | null | null | null | app/common.py | yxonic/dl-boilerplate | d503716ad514929ddfdc41341e37b0e3f1a1f0f5 | [
"MIT"
] | null | null | null | import abc
import argparse
import logging
import pathlib
from collections import namedtuple
from operator import itemgetter
import toml
def __str__(self):
return str(self.config)
def build_model(self):
"""Build model according to the configurations in current
workspace."""
return self.model_cls.build(**self.config)
def logger(self, name: str):
"""Get a logger that logs to a file.
Notice that same logger instance is returned for same names.
Args:
name(str): logger name
"""
logger = logging.getLogger(name)
if logger.handlers:
# previously configured, remain unchanged
return logger
fileFormatter = logging.Formatter('%(levelname)s [%(name)s] '
'%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fileHandler = logging.FileHandler(
str(self.log_path / (name + '.log')))
fileHandler.setFormatter(fileFormatter)
logger.addHandler(fileHandler)
return logger
def _load(self):
"""Load configuration."""
try:
cfg = toml.load((self.path / 'config.toml').open())
self._set_model(cfg['model_name'], cfg[cfg['model_name'].lower()])
except (FileNotFoundError, KeyError):
raise NotConfiguredError('config.toml doesn\'t exist or '
'is incomplete')
def _save(self):
"""Save configuration."""
f = (self.path / 'config.toml').open('w')
toml.dump({'model_name': self.model_name,
self.model_name.lower(): self.config}, f)
f.close()
class Command(abc.ABC):
"""Command interface."""
| 28.405858 | 78 | 0.577405 |
4274e96e6ce5245f31a18bf5087d02fdafd53341 | 2,737 | py | Python | training/train_pos_dep.py | ex00/spacy-ru | 7284d8127dca322fcc2aa9ce0267699cfc9baf38 | [
"MIT"
] | null | null | null | training/train_pos_dep.py | ex00/spacy-ru | 7284d8127dca322fcc2aa9ce0267699cfc9baf38 | [
"MIT"
] | null | null | null | training/train_pos_dep.py | ex00/spacy-ru | 7284d8127dca322fcc2aa9ce0267699cfc9baf38 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, print_function
import sys
from pathlib import Path
import spacy
from spacy.lang.ru import Russian
from spacy.pipeline import Tagger, DependencyParser
from spacy.util import fix_random_seed, set_lang_class
from models.dep import MyDEP
from models.loadvec import get_ft_vec
from models.pos import MyPOS
from models.t2v import build_tok2vec
from training.corpora.syntagrus import get_syntagrus_example, get_syntagrus
from training.trainer import Trainer, Extractor
from utils.corpus import tag_morphology
CFG = {"device": 0, 'verbose': 1}
GPU_1 = "-g1" in sys.argv[1:]
if GPU_1:
CFG["device"] = 1
TESTS = False
spacy.require_gpu(CFG['device'])
TEST_MODE = "--test" in sys.argv[1:]
if TEST_MODE:
SynTagRus = get_syntagrus_example(Path("data/syntagrus/"))
else:
SynTagRus = get_syntagrus(Path("data/syntagrus/"))
ft_vectors = get_ft_vec()
tok2vec = build_tok2vec(embed_size=2000, vectors={"word_vectors": ft_vectors})
if __name__ == "__main__":
main()
| 27.646465 | 111 | 0.700037 |
4275177baedf41f1ab31ef0704dfda58eb058f5e | 1,512 | py | Python | tests/test_peephole_optimizations.py | capuanob/angr | 4e5bb119965cb282f5bcb3dea5b598e88097f715 | [
"BSD-2-Clause"
] | null | null | null | tests/test_peephole_optimizations.py | capuanob/angr | 4e5bb119965cb282f5bcb3dea5b598e88097f715 | [
"BSD-2-Clause"
] | null | null | null | tests/test_peephole_optimizations.py | capuanob/angr | 4e5bb119965cb282f5bcb3dea5b598e88097f715 | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=missing-class-docstring,no-self-use
import os
import unittest
import archinfo
import ailment
import angr
from angr.analyses.decompiler.peephole_optimizations import ConstantDereferences
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
if __name__ == "__main__":
unittest.main()
| 38.769231 | 119 | 0.683201 |
42755fd81a38eefae8f526ffb8db205e1141d33b | 604 | py | Python | PySpace/mysql/mysql_insertdata.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/mysql/mysql_insertdata.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/mysql/mysql_insertdata.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# mysql_createtable.py
import pymysql
#
db = pymysql.connect('localhost','root','1234','fdtest')
# cursor()cursor
cursor = db.cursor()
# SQL
sql = """INSERT INTO EMPLOYEE(
FIRST_NAME,LAST_NAME,AGE,SEX,INCOME)
VALUES('Mac2','Mohan2',20,'M',6000)"""
"""
sql = "INSERT INTO EMPLOYEE(FIRST_NAME, \
LAST_NAME, AGE, SEX, INCOME) \
VALUES ('%s', '%s', '%d', '%c', '%d' )" % \
('Mac', 'Mohan', 20, 'M', 2000)
"""
try:
# sql
cursor.execute(sql)
#
db.commit()
except:
#
db.rollback()
#
db.close()
| 16.777778 | 56 | 0.61755 |
42787867fa3db8b6721924f36a9b0de8973ee0ae | 1,572 | py | Python | appleseed_python.py | AllegorithmicSAS/sat-scons | 3eea609385fb9bdd93562097d302a3707c3c6ebe | [
"BSD-3-Clause"
] | 25 | 2018-09-17T00:10:30.000Z | 2021-11-07T06:41:35.000Z | appleseed_python.py | AllegorithmicSAS/sat-scons | 3eea609385fb9bdd93562097d302a3707c3c6ebe | [
"BSD-3-Clause"
] | null | null | null | appleseed_python.py | AllegorithmicSAS/sat-scons | 3eea609385fb9bdd93562097d302a3707c3c6ebe | [
"BSD-3-Clause"
] | 3 | 2018-08-28T15:01:43.000Z | 2021-05-04T16:54:51.000Z | import os
import subprocess
import threading
mutex = threading.Lock()
| 40.307692 | 125 | 0.688295 |
42793637f0ad1d6b8bdb63c8ad74420df516a382 | 1,327 | py | Python | conjureup/ui/views/credentials.py | iMichka/conjure-up | 8e4599e6f58b52163384150d8d71e7802462d126 | [
"MIT"
] | 1 | 2019-06-26T23:39:13.000Z | 2019-06-26T23:39:13.000Z | conjureup/ui/views/credentials.py | iMichka/conjure-up | 8e4599e6f58b52163384150d8d71e7802462d126 | [
"MIT"
] | null | null | null | conjureup/ui/views/credentials.py | iMichka/conjure-up | 8e4599e6f58b52163384150d8d71e7802462d126 | [
"MIT"
] | 1 | 2020-10-05T14:42:31.000Z | 2020-10-05T14:42:31.000Z | from ubuntui.utils import Padding
from ubuntui.widgets.hr import HR
from conjureup.app_config import app
from conjureup.ui.views.base import BaseView, SchemaFormView
from conjureup.ui.widgets.selectors import MenuSelectButtonList
| 33.175 | 73 | 0.694047 |
427ab04e73a73ae528a76eac0fdda4742addfcf9 | 355 | py | Python | sleekxmpp/plugins/__init__.py | aristanetworks/SleekXMPP | 91f53bf1964a564f6f12477a31884e9ec38cef75 | [
"MIT"
] | null | null | null | sleekxmpp/plugins/__init__.py | aristanetworks/SleekXMPP | 91f53bf1964a564f6f12477a31884e9ec38cef75 | [
"MIT"
] | 1 | 2020-04-10T22:09:06.000Z | 2020-04-10T22:09:06.000Z | sleekxmpp/plugins/__init__.py | aristanetworks/SleekXMPP | 91f53bf1964a564f6f12477a31884e9ec38cef75 | [
"MIT"
] | 1 | 2019-12-05T12:10:16.000Z | 2019-12-05T12:10:16.000Z | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
__all__ = ['xep_0004', 'xep_0012', 'xep_0030', 'xep_0033', 'xep_0045',
'xep_0050', 'xep_0085', 'xep_0092', 'xep_0199', 'gmail_notify',
'xep_0060', 'xep_0202']
| 32.272727 | 74 | 0.63662 |
427af261bc83da6fc8ac5c1ea1e2a2473e51e220 | 5,276 | py | Python | main.py | pwillworth/dfkreport | ae10226430a3a74ac3c07ae888cab14dde778db8 | [
"Apache-2.0"
] | 11 | 2022-01-18T17:36:12.000Z | 2022-03-21T21:09:17.000Z | main.py | pwillworth/dfkreport | ae10226430a3a74ac3c07ae888cab14dde778db8 | [
"Apache-2.0"
] | null | null | null | main.py | pwillworth/dfkreport | ae10226430a3a74ac3c07ae888cab14dde778db8 | [
"Apache-2.0"
] | 4 | 2022-01-18T18:37:48.000Z | 2022-01-22T02:14:48.000Z | #!/usr/bin/env python3
import transactions
import taxmap
import db
import settings
import datetime
import argparse
import uuid
import pickle
import jsonpickle
import logging
import logging.handlers
import traceback
if __name__ == "__main__":
main()
| 47.531532 | 224 | 0.666035 |
427b0f2bb086452498a9bfd3a4dc95c14c7879d6 | 127 | py | Python | src/tarski/fstrips/contingent/__init__.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 29 | 2018-11-26T20:31:04.000Z | 2021-12-29T11:08:40.000Z | src/tarski/fstrips/contingent/__init__.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 101 | 2018-06-07T13:10:01.000Z | 2022-03-11T11:54:00.000Z | src/tarski/fstrips/contingent/__init__.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 18 | 2018-11-01T22:44:39.000Z | 2022-02-28T04:57:15.000Z | from .problem import ContingentProblem as Problem
from .. action import Action
from .sensor import Sensor
from . import errors
| 25.4 | 49 | 0.811024 |
427bd9dc45f6695e499240fef94ddec3e4b3fe88 | 80 | py | Python | p40-49/p48.py | kbrose/project_euler | f582ef1887f44628997e05d88253adad0822d6b9 | [
"Unlicense"
] | 1 | 2015-10-11T15:53:00.000Z | 2015-10-11T15:53:00.000Z | p40-49/p48.py | kbrose/project_euler | f582ef1887f44628997e05d88253adad0822d6b9 | [
"Unlicense"
] | null | null | null | p40-49/p48.py | kbrose/project_euler | f582ef1887f44628997e05d88253adad0822d6b9 | [
"Unlicense"
] | null | null | null | sum = 0
for i in xrange(1,1001):
sum = sum + i**i
print sum % 10000000000
| 11.428571 | 24 | 0.6 |
427dedadfbbcbe3c95d00fdafba41ac3a4018d6f | 2,121 | py | Python | property_proteome/length/run.py | rrazban/proteomevis_scripts | 2b6309a78287ffab4ee745383c21b9f474b93b60 | [
"MIT"
] | 1 | 2020-11-11T06:14:10.000Z | 2020-11-11T06:14:10.000Z | property_proteome/length/run.py | rrazban/proteomevis_scripts | 2b6309a78287ffab4ee745383c21b9f474b93b60 | [
"MIT"
] | null | null | null | property_proteome/length/run.py | rrazban/proteomevis_scripts | 2b6309a78287ffab4ee745383c21b9f474b93b60 | [
"MIT"
] | 1 | 2019-05-28T19:13:24.000Z | 2019-05-28T19:13:24.000Z | #!/usr/bin/python
help_msg = 'get uniprot length of entire proteome'
import os, sys
CWD = os.getcwd()
UTLTS_DIR = CWD[:CWD.index('proteomevis_scripts')]+'/proteomevis_scripts/utlts'
sys.path.append(UTLTS_DIR)
from parse_user_input import help_message
from read_in_file import read_in
from parse_data import organism
from uniprot_api import UniProtAPI
from output import writeout
if __name__ == "__main__":
args = help_message(help_msg, bool_add_verbose = True)
d_ref = read_in('Entry', 'Gene names (ordered locus )', filename = 'proteome')
uniprot_length = UniProtLength(args.verbose, d_ref)
d_output = uniprot_length.run()
if organism!='protherm':
d_output = {d_ref[uniprot]: res for uniprot, res in d_output.iteritems()}
xlabel = 'oln'
else: #not supported for ProTherm
xlabel = 'uniprot'
writeout([xlabel, 'length'], d_output, filename = 'UniProt')
| 29.054795 | 87 | 0.705799 |
427e1e9e41044ab46aedd645fb3078c3369fa522 | 2,086 | py | Python | machine_learning/torch_time_series_forecasting/src/data/dataset.py | iimuz/til | b100438e8ce2f369331b3be215a4b9cdce9ffda5 | [
"MIT"
] | 4 | 2020-07-25T01:20:08.000Z | 2020-10-03T12:58:15.000Z | machine_learning/torch_time_series_forecasting/src/data/dataset.py | iimuz/til | b100438e8ce2f369331b3be215a4b9cdce9ffda5 | [
"MIT"
] | 29 | 2019-09-30T08:04:14.000Z | 2022-03-12T13:51:08.000Z | machine_learning/torch_time_series_forecasting/src/data/dataset.py | iimuz/til | b100438e8ce2f369331b3be215a4b9cdce9ffda5 | [
"MIT"
] | 1 | 2020-08-14T05:15:51.000Z | 2020-08-14T05:15:51.000Z | """."""
# default packages
import logging
import pathlib
import traceback
import urllib.request as request
# third party
import pandas as pd
import tqdm as tqdm_std
# my packages
import src.data.directory as directory
# logger
logger = logging.getLogger(__name__)
def _main() -> None:
"""."""
logging.basicConfig(level=logging.INFO)
filepath = get_raw_filepath()
if filepath.exists() is False:
url = get_raw_url()
filepath.parent.mkdir(exist_ok=True, parents=True)
with TqdmUpTo(
unit="B", unit_scale=True, miniters=1, desc=filepath.name
) as pbar:
request.urlretrieve(
url, filename=filepath, reporthook=pbar.update_to, data=None
)
else:
logger.info(f"data already exists: {filepath}")
# show dataset description.
df = pd.read_csv(filepath)
logger.info(df.info())
logger.info(df.head())
logger.info(df.tail())
if __name__ == "__main__":
try:
_main()
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
| 25.13253 | 85 | 0.622244 |
427f14f88f6ffd13fff4e9351ec1d15fe8db0b86 | 3,320 | py | Python | app.py | aracnid/i-xero | 2fb2f093a8a92e0ba2f4cdbe440e962a38c09f7e | [
"MIT"
] | null | null | null | app.py | aracnid/i-xero | 2fb2f093a8a92e0ba2f4cdbe440e962a38c09f7e | [
"MIT"
] | null | null | null | app.py | aracnid/i-xero | 2fb2f093a8a92e0ba2f4cdbe440e962a38c09f7e | [
"MIT"
] | null | null | null | """Primary application.
"""
import json
import logging
import logging.config
import os
import sys
from flask import url_for, render_template, redirect, request
from i_xero import Xero2
from i_xero.i_flask import FlaskInterface
from utils import jsonify, serialize_model
# initialize logging
# The SlackBot app doesn't handle logging in the same way.
# I tried to pass in a logger object from aracnid_logger,
# but it seems to disable all loggers
logging_filename = os.environ.get('LOGGING_CONFIG_FILE')
command_dir = os.path.dirname(sys.argv[0])
logging_dir = os.path.join(os.getcwd(), command_dir)
logging_path = os.path.join(os.getcwd(), logging_filename)
with open(logging_path, 'rt') as file:
logging_config = json.load(file)
formatter = os.environ.get('LOGGING_FORMATTER')
logging_config['handlers']['console']['formatter'] = formatter
logging.config.dictConfig(logging_config)
env_str = os.environ.get('LOG_UNHANDLED_EXCEPTIONS')
LOG_UNHANDLED_EXCEPTIONS = env_str.lower() in ('true', 'yes') if env_str else False
# configure flask application
flask_app = FlaskInterface(__name__).get_app()
# configure xero application
xero_app = Xero2(flask_app)
# start the app locally
if __name__ == '__main__':
flask_app.run(host='localhost', port=5000)
| 28.376068 | 83 | 0.71506 |
427fcbdb91cef4c0c0751c48d3eb5d865ef45367 | 8,023 | py | Python | ui/Ui_main.py | realm520/aimless | 772e87f5b5a00eeac88be948e424310128fcec1a | [
"MIT"
] | null | null | null | ui/Ui_main.py | realm520/aimless | 772e87f5b5a00eeac88be948e424310128fcec1a | [
"MIT"
] | null | null | null | ui/Ui_main.py | realm520/aimless | 772e87f5b5a00eeac88be948e424310128fcec1a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'F:\work\code\pyqt5\ui\main.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 52.782895 | 108 | 0.720429 |
4283b88a83b93254b8e97d4642f0ca0d5d69279d | 68 | py | Python | examples/02_pybind/01_basic/example.py | BlockResearchGroup/WS_interoperability | 604ab29c242b30b2ee9125a589afe69010ba1844 | [
"MIT"
] | 1 | 2019-07-26T22:25:25.000Z | 2019-07-26T22:25:25.000Z | examples/02_pybind/01_basic/example.py | BlockResearchGroup/WS_interoperability | 604ab29c242b30b2ee9125a589afe69010ba1844 | [
"MIT"
] | 5 | 2019-04-14T21:07:03.000Z | 2019-05-27T21:46:37.000Z | examples/02_pybind/01_basic/example.py | BlockResearchGroup/WS_interoperability | 604ab29c242b30b2ee9125a589afe69010ba1844 | [
"MIT"
] | null | null | null | # example.py
import basic
result = basic.add(1, 5)
print(result)
| 8.5 | 24 | 0.691176 |
4284396ea1fd88ed33820f0870333abd8149c2de | 2,213 | py | Python | cocotbext/spi/devices/TI/DRV8304.py | eoshea/cocotbext-spi | 9b610ca27945e22e168da5774cab8051304ea90f | [
"MIT"
] | 2 | 2021-08-13T20:10:41.000Z | 2022-03-09T19:24:24.000Z | cocotbext/spi/devices/TI/DRV8304.py | eoshea/cocotbext-spi | 9b610ca27945e22e168da5774cab8051304ea90f | [
"MIT"
] | 3 | 2021-08-23T15:34:00.000Z | 2022-01-18T19:27:26.000Z | cocotbext/spi/devices/TI/DRV8304.py | eoshea/cocotbext-spi | 9b610ca27945e22e168da5774cab8051304ea90f | [
"MIT"
] | 2 | 2021-11-12T12:47:45.000Z | 2021-11-18T10:35:43.000Z |
import cocotb
from cocotb.triggers import FallingEdge, RisingEdge, First, Timer, Event
from ... import SpiSlaveBase, SpiConfig, SpiFrameError, SpiFrameTimeout
| 30.315068 | 89 | 0.589245 |
4289d7f6e86034585cd9c9cf37666cc58aab806e | 540 | py | Python | manage.py | Kenneth-joseph/Blogs | b6c508d36cdf2f874c233485003021d10567de7b | [
"Unlicense"
] | null | null | null | manage.py | Kenneth-joseph/Blogs | b6c508d36cdf2f874c233485003021d10567de7b | [
"Unlicense"
] | null | null | null | manage.py | Kenneth-joseph/Blogs | b6c508d36cdf2f874c233485003021d10567de7b | [
"Unlicense"
] | 1 | 2021-11-17T11:03:08.000Z | 2021-11-17T11:03:08.000Z | from app import create_app,db
from flask_script import Manager,Server
from app.models import User,Comment,Blog
from flask_migrate import Migrate, MigrateCommand
#manage.shell
# Creating app instance
app = create_app('production')
migrate = Migrate(app,db)
manager = Manager(app)
manager.add_command('db',MigrateCommand)
manager.add_command('server',Server)
if __name__== '__main__':
manager.run()
db.create_all() | 24.545455 | 72 | 0.766667 |
428a08abf8ca4b32d91aa59e5ac79f8b3eb02d8f | 901 | py | Python | src/apps/core/migrations/0005_auto_20180417_1219.py | zhiyuli/HydroLearn | b2c2b44e49d37391149d0896ce5124e882f22ee3 | [
"BSD-3-Clause"
] | null | null | null | src/apps/core/migrations/0005_auto_20180417_1219.py | zhiyuli/HydroLearn | b2c2b44e49d37391149d0896ce5124e882f22ee3 | [
"BSD-3-Clause"
] | null | null | null | src/apps/core/migrations/0005_auto_20180417_1219.py | zhiyuli/HydroLearn | b2c2b44e49d37391149d0896ce5124e882f22ee3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-17 17:19
from __future__ import unicode_literals
from django.db import migrations
import django_extensions.db.fields
| 33.37037 | 266 | 0.657048 |
428b1f1d92a691f7e032bddbf0f11e16a416cdf3 | 186 | py | Python | syncgateway/__init__.py | ecordell/syncgateway-admin-client | 78a8d45ff290b42b5c771b901fb92edcde126ff4 | [
"MIT"
] | null | null | null | syncgateway/__init__.py | ecordell/syncgateway-admin-client | 78a8d45ff290b42b5c771b901fb92edcde126ff4 | [
"MIT"
] | 1 | 2015-12-10T20:42:12.000Z | 2015-12-10T20:42:12.000Z | syncgateway/__init__.py | ecordell/syncgateway-admin-client | 78a8d45ff290b42b5c771b901fb92edcde126ff4 | [
"MIT"
] | null | null | null | __author__ = 'Evan Cordell'
__copyright__ = 'Copyright 2012-2015 Localmed, Inc.'
__version__ = "0.1.6"
__version_info__ = tuple(__version__.split('.'))
__short_version__ = __version__
| 23.25 | 52 | 0.758065 |
428b51da1bd2717103e2c7bb03266fb5b3a3af22 | 7,452 | py | Python | members/views.py | leonrenkema/makerspaceleiden-crm | 36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c | [
"Apache-2.0"
] | 5 | 2019-03-12T21:38:32.000Z | 2021-11-06T15:26:56.000Z | members/views.py | leonrenkema/makerspaceleiden-crm | 36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c | [
"Apache-2.0"
] | 33 | 2019-01-21T15:54:50.000Z | 2021-05-18T17:54:52.000Z | members/views.py | leonrenkema/makerspaceleiden-crm | 36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c | [
"Apache-2.0"
] | 5 | 2019-01-21T15:47:26.000Z | 2021-09-22T07:14:34.000Z | from django.shortcuts import render, redirect
from django.contrib.auth.forms import PasswordResetForm
from django.core.mail import EmailMessage
from django.template import loader
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.db.utils import IntegrityError
from django.urls import reverse
from django.template.loader import render_to_string, get_template
from .forms import NewUserForm, NewAuditRecordForm
from acl.models import Entitlement, PermitType
from members.models import Tag, User, clean_tag_string, AuditRecord
from mailinglists.models import Mailinglist, Subscription
import logging
import datetime
import sys
import re
logger = logging.getLogger(__name__)
def drop(request):
if not request.user.can_escalate_to_priveleged:
return HttpResponse("XS denied", status=403, content_type="text/plain")
record = AuditRecord(
user=request.user, final=True, action="Drop privs from webinterface"
)
if request.user.is_privileged:
record.changereason = f"DROP in webinterface by {request.user}"
else:
record.changereason = f"DROP in webinterface by {request.user} - but actual permission had already timed out."
record.save()
return redirect(request.META["HTTP_REFERER"])
| 35.485714 | 118 | 0.554616 |
428b5eca1188b78557324447b1ddda687b1af59c | 3,911 | py | Python | test_scripts/ns_instance/duan/service/vfc/nfvo/lcm/lcm/ns/views/deprecated/create_ns_view.py | lremember/VFC | 837559db1396091811382359100bfc60e1aab5b2 | [
"MIT"
] | 1 | 2019-10-10T00:52:18.000Z | 2019-10-10T00:52:18.000Z | test_scripts/ns_instance/duan/service/vfc/nfvo/lcm/lcm/ns/views/deprecated/create_ns_view.py | lremember/VFC-Files | 837559db1396091811382359100bfc60e1aab5b2 | [
"MIT"
] | null | null | null | test_scripts/ns_instance/duan/service/vfc/nfvo/lcm/lcm/ns/views/deprecated/create_ns_view.py | lremember/VFC-Files | 837559db1396091811382359100bfc60e1aab5b2 | [
"MIT"
] | null | null | null | # Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from lcm.ns.biz.ns_create import CreateNSService
from lcm.ns.biz.ns_get import GetNSInfoService
from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsReqSerializer
from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsRespSerializer
from lcm.ns.serializers.deprecated.ns_serializers import _QueryNsRespSerializer
from lcm.pub.exceptions import NSLCMException
from lcm.pub.exceptions import BadRequestException
from lcm.pub.utils.values import ignore_case_get
from .common import view_safe_call_with_log
logger = logging.getLogger(__name__)
| 41.168421 | 92 | 0.692662 |
428be7b7fc4fa9ed70e9c54b4441f37388d4cbd4 | 3,304 | py | Python | parse_training_input.py | alexpotter1/vulndetect-ml | 338fbf919b24520f9107a1604d1c8af48aadff76 | [
"MIT"
] | 1 | 2020-02-25T01:53:23.000Z | 2020-02-25T01:53:23.000Z | parse_training_input.py | alexpotter1/vulndetect-ml | 338fbf919b24520f9107a1604d1c8af48aadff76 | [
"MIT"
] | null | null | null | parse_training_input.py | alexpotter1/vulndetect-ml | 338fbf919b24520f9107a1604d1c8af48aadff76 | [
"MIT"
] | 1 | 2020-10-24T15:30:38.000Z | 2020-10-24T15:30:38.000Z | #!/usr/bin/env python3
import javalang
| 33.714286 | 121 | 0.59776 |
428caa0f2af4107e3b019feaf07304cc2bf7796d | 17,226 | py | Python | src/mist/api/rules/models/main.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | 6 | 2017-08-24T00:34:30.000Z | 2022-01-16T21:29:22.000Z | src/mist/api/rules/models/main.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | 9 | 2021-03-31T18:50:47.000Z | 2022-01-09T23:20:02.000Z | src/mist/api/rules/models/main.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | 13 | 2017-09-21T18:17:02.000Z | 2022-02-21T04:29:25.000Z | import uuid
import mongoengine as me
from mist.api import config
from mist.api.exceptions import BadRequestError
from mist.api.users.models import Organization
from mist.api.selectors.models import SelectorClassMixin
from mist.api.rules.base import NoDataRuleController
from mist.api.rules.base import ResourceRuleController
from mist.api.rules.base import ArbitraryRuleController
from mist.api.rules.models import RuleState
from mist.api.rules.models import Window
from mist.api.rules.models import Frequency
from mist.api.rules.models import TriggerOffset
from mist.api.rules.models import QueryCondition
from mist.api.rules.models import BaseAlertAction
from mist.api.rules.models import NotificationAction
from mist.api.rules.plugins import GraphiteNoDataPlugin
from mist.api.rules.plugins import GraphiteBackendPlugin
from mist.api.rules.plugins import InfluxDBNoDataPlugin
from mist.api.rules.plugins import InfluxDBBackendPlugin
from mist.api.rules.plugins import ElasticSearchBackendPlugin
from mist.api.rules.plugins import FoundationDBNoDataPlugin
from mist.api.rules.plugins import FoundationDBBackendPlugin
from mist.api.rules.plugins import VictoriaMetricsNoDataPlugin
from mist.api.rules.plugins import VictoriaMetricsBackendPlugin
class ArbitraryRule(Rule):
"""A rule defined by a single, arbitrary query string.
Arbitrary rules permit the definition of complex query expressions by
allowing users to define fully qualified queries in "raw mode" as a
single string. In such case, a query expression may be a composite query
that includes nested aggregations and/or additional queries.
An `ArbitraryRule` must define a single `QueryCondition`, whose `target`
defines the entire query expression as a single string.
"""
_controller_cls = ArbitraryRuleController
class ResourceRule(Rule, SelectorClassMixin):
"""A rule bound to a specific resource type.
Resource-bound rules are less elastic than arbitrary rules, but allow
users to perform quick, more dynamic filtering given a resource object's
UUID, tags, or model fields.
Every subclass of `ResourceRule` MUST define its `selector_resource_cls`
class attribute in order for queries to be executed against the intended
mongodb collection.
A `ResourceRule` may also apply to multiple resources, which depends on
the rule's list of `selectors`. By default such a rule will trigger an
alert if just one of its queries evaluates to True.
"""
_controller_cls = ResourceRuleController
# FIXME All following properties are for backwards compatibility.
def _populate_rules():
"""Populate RULES with mappings from rule type to rule subclass.
RULES is a mapping (dict) from rule types to subclasses of Rule.
A rule's type is the concat of two strings: <str1>-<str2>, where
str1 denotes whether the rule is arbitrary or not and str2 equals
the `_data_type_str` class attribute of the rule, which is simply
the type of the requesting data, like logs or monitoring metrics.
The aforementioned concatenation is simply a way to categorize a
rule, such as saying a rule on arbitrary logs or a resource-bound
rule referring to the monitoring data of machine A.
"""
public_rule_map = {}
hidden_rule_cls = (ArbitraryRule, ResourceRule, NoDataRule, )
for key, value in list(globals().items()):
if not key.endswith('Rule'):
continue
if value in hidden_rule_cls:
continue
if not issubclass(value, (ArbitraryRule, ResourceRule, )):
continue
str1 = 'resource' if issubclass(value, ResourceRule) else 'arbitrary'
rule_key = '%s-%s' % (str1, value._data_type_str)
public_rule_map[rule_key] = value
return public_rule_map
RULES = _populate_rules()
| 35.155102 | 79 | 0.673981 |
428d613a4c439197af5e225dec64ebdd98da7d00 | 1,685 | py | Python | setup.py | andrewwhitehead/django-oidc-rp | 233f1daeef96dbe84ecbb37fa31393c84f9c2805 | [
"MIT"
] | 20 | 2018-04-16T13:17:35.000Z | 2021-06-05T00:08:33.000Z | setup.py | andrewwhitehead/django-oidc-rp | 233f1daeef96dbe84ecbb37fa31393c84f9c2805 | [
"MIT"
] | 9 | 2018-07-20T18:19:13.000Z | 2021-12-22T08:57:18.000Z | setup.py | bcgov/django-oidc-rp | 50e6fa143e61b04849b4c66beef078be0d7669de | [
"MIT"
] | 21 | 2018-07-10T16:05:44.000Z | 2022-01-24T05:57:09.000Z | # -*- coding: utf-8 -*-
import codecs
from os.path import abspath
from os.path import dirname
from os.path import join
from setuptools import find_packages
from setuptools import setup
import oidc_rp
def read_relative_file(filename):
""" Returns contents of the given file, whose path is supposed relative to this module. """
with codecs.open(join(dirname(abspath(__file__)), filename), encoding='utf-8') as f:
return f.read()
setup(
name='django-oidc-rp',
version=oidc_rp.__version__,
author='impak Finance',
author_email='tech@impakfinance.com',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
url='https://github.com/impak-finance/django-oidc-rp',
license='MIT',
description='A server side OpenID Connect Relying Party (RP/Client) implementation for Django.',
long_description=read_relative_file('README.rst'),
keywords='django openidconnect oidc client rp authentication auth',
zip_safe=False,
install_requires=[
'django>=1.11',
'jsonfield2',
'pyjwkest>=1.4',
'requests>2.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 31.792453 | 100 | 0.648071 |
428e0c3390f490eb7e09d675c22baad9bedb5ba6 | 171 | py | Python | nndet/evaluator/detection/__init__.py | joeranbosma/nnDetection | 2ebbf1cdc8a8794c73e325f06fea50632c78ae8c | [
"BSD-3-Clause"
] | 242 | 2021-05-17T12:31:39.000Z | 2022-03-31T11:51:29.000Z | nndet/evaluator/detection/__init__.py | joeranbosma/nnDetection | 2ebbf1cdc8a8794c73e325f06fea50632c78ae8c | [
"BSD-3-Clause"
] | 59 | 2021-06-02T07:32:10.000Z | 2022-03-31T18:45:52.000Z | nndet/evaluator/detection/__init__.py | joeranbosma/nnDetection | 2ebbf1cdc8a8794c73e325f06fea50632c78ae8c | [
"BSD-3-Clause"
] | 38 | 2021-05-31T14:01:37.000Z | 2022-03-21T08:24:40.000Z | from nndet.evaluator.detection.froc import FROCMetric
from nndet.evaluator.detection.coco import COCOMetric
from nndet.evaluator.detection.hist import PredictionHistogram
| 42.75 | 62 | 0.877193 |
428e40b791a018156767a64f9f6283399ebd2b1c | 289 | py | Python | tests/test_update.py | sosie-js/ankisync2 | a41580197eab7f180f02a38a4aa912eb54cfaa93 | [
"MIT"
] | 39 | 2020-02-12T23:41:24.000Z | 2022-02-28T15:46:23.000Z | tests/test_update.py | sosie-js/ankisync2 | a41580197eab7f180f02a38a4aa912eb54cfaa93 | [
"MIT"
] | 9 | 2019-08-02T18:25:07.000Z | 2022-02-07T23:14:43.000Z | tests/test_update.py | sosie-js/ankisync2 | a41580197eab7f180f02a38a4aa912eb54cfaa93 | [
"MIT"
] | 6 | 2019-09-09T14:27:48.000Z | 2021-08-31T08:13:00.000Z | # from ankisync2.apkg import Apkg, db
# Has to be done through normal database methods
# def test_update():
# apkg = Apkg("example1.apkg")
# for n in db.Notes.filter(db.Notes.data["field1"] == "data1"):
# n.data["field3"] = "data2"
# n.save()
# apkg.close()
| 24.083333 | 67 | 0.598616 |
428eac96b1905cf94fc5b1f167e60c8c46762f48 | 16,931 | py | Python | lib/place_model.py | ihaeyong/drama-graph | 60c3c216cd74bb19efd6baf836f6c7c2b42b764f | [
"MIT"
] | 3 | 2021-04-28T07:19:39.000Z | 2022-03-07T09:34:19.000Z | lib/place_model.py | ihaeyong/drama-graph | 60c3c216cd74bb19efd6baf836f6c7c2b42b764f | [
"MIT"
] | 18 | 2020-08-24T12:40:38.000Z | 2022-03-12T00:47:14.000Z | lib/place_model.py | ihaeyong/drama-graph | 60c3c216cd74bb19efd6baf836f6c7c2b42b764f | [
"MIT"
] | 1 | 2020-10-15T10:09:20.000Z | 2020-10-15T10:09:20.000Z | import torch
import torch.nn as nn
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os, sys, math
import os.path
import torch
import json
import torch.utils.model_zoo as model_zoo
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.yolo_net import Yolo
from Yolo_v2_pytorch.src.yolo_tunning import YoloD
import numpy as np
import torch.nn.functional as F
from Yolo_v2_pytorch.src.rois_utils import anchorboxes
from Yolo_v2_pytorch.src.anotherMissOh_dataset import FaceCLS
from lib.person_model import person_model
label_dict = {'' : 9, 'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
label_dict_wo_none = {'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
sample_default = [105, 462, 953, 144, 108, 13, 123, 510, 1690, 19914, 1541, 126, 67, 592, 1010, 53, 2087, 0, 1547, 576, 74, 0]
def CB_loss(labels, logits, beta=0.99, gamma=0.5, samples_per_cls=sample_default, no_of_classes=22, loss_type='softmax'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
labels_one_hot = F.one_hot(labels, no_of_classes).cpu().float()
weights = torch.tensor(weights).float()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot.cuda(), logits, weights.cuda(), gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input = logits,target = labels_one_hot, weights = weights)
elif loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input = pred, target = labels_one_hot.cuda(), weight = weights.cuda())
return cb_loss
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input = logits, target = labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
| 36.647186 | 126 | 0.631268 |
428f4631d1d991fd823deb6aae84c7555b191363 | 9,127 | py | Python | ch01/challenge.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
] | null | null | null | ch01/challenge.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
] | null | null | null | ch01/challenge.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
] | null | null | null | """
Challenge Exercises for Chapter 1.
"""
import random
import timeit
from algs.table import DataTable, ExerciseNum, caption
from algs.counting import RecordedItem
def partition(A, lo, hi, idx):
"""
Partition using A[idx] as value. Note lo and hi are INCLUSIVE on both
ends and idx must be valid index. Count the number of comparisons
by populating A with RecordedItem instances.
"""
if lo == hi:
return lo
A[idx],A[lo] = A[lo],A[idx] # swap into position
i = lo
j = hi + 1
while True:
while True:
i += 1
if i == hi: break
if A[lo] < A[i]: break
while True:
j -= 1
if j == lo: break
if A[j] < A[lo]: break
# doesn't count as comparing two values
if i >= j: break
A[i],A[j] = A[j],A[i]
A[lo],A[j] = A[j],A[lo]
return j
def linear_median(A):
"""
Efficient implementation that returns median value in arbitrary list,
assuming A has an odd number of values. Note this algorithm will
rearrange values in A.
"""
# if len(A) % 2 == 0:
# raise ValueError('linear_median() only coded to work with odd number of values.')
lo = 0
hi = len(A) - 1
mid = hi // 2
while lo < hi:
idx = random.randint(lo, hi) # select valid index randomly
j = partition(A, lo, hi, idx)
if j == mid:
return A[j]
if j < mid:
lo = j+1
else:
hi = j-1
return A[lo]
def counting_sort(A, M):
"""
Update A in place to be sorted in ascending order if all elements
are guaranteed to be in the range 0 to and not including M.
"""
counts = [0] * M
for v in A:
counts[v] += 1
pos = 0
v = 0
while pos < len(A):
for idx in range(counts[v]):
A[pos+idx] = v
pos += counts[v]
v += 1
def counting_sort_improved(A,M):
"""
Update A in place to be sorted in ascending order if all elements
are guaranteed to be in the range 0 to and not including M.
"""
counts = [0] * M
for val in A:
counts[val] += 1
pos = 0
val = 0
while pos < len(A):
if counts[val] > 0:
A[pos:pos+counts[val]] = [val] * counts[val]
pos += counts[val]
val += 1
def run_counting_sort_trials(max_k=15, output=True):
"""Generate table for counting sort up to (but not including) max_k=15."""
tbl = DataTable([8,15,15],
['N', 'counting_sort', 'counting_sort_improved'], output=output)
M = 20 # arbitrary value, and results are dependent on this value.
trials = [2**k for k in range(8, max_k)]
for n in trials:
t_cs = min(timeit.repeat(stmt='counting_sort(a,{})\nis_sorted(a)'.format(M),
setup='''
import random
from ch01.challenge import counting_sort
from algs.sorting import is_sorted
w = [{0}-1] * {1}
b = [0] * {1}
a = list(range({0})) * {1}
random.shuffle(a)'''.format(M,n), repeat=100, number=1))
t_csi = min(timeit.repeat(stmt='counting_sort_improved(a,{})\nis_sorted(a)'.format(M),
setup='''
import random
from ch01.challenge import counting_sort_improved
from algs.sorting import is_sorted
w = [{0}-1] * {1}
b = [0] * {1}
a = list(range({0})) * {1}
random.shuffle(a)'''.format(M,n), repeat=100, number=1))
tbl.row([n, t_cs, t_csi])
return tbl
def run_median_trial():
"""Generate table for Median Trial."""
tbl = DataTable([10,15,15],['N', 'median_time', 'sort_median'])
trials = [2**k+1 for k in range(8,20)]
for n in trials:
t_med = 1000*min(timeit.repeat(stmt='assert(linear_median(a) == {}//2)'.format(n),
setup='''
import random
from ch01.challenge import linear_median
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
t_sort = 1000*min(timeit.repeat(stmt='assert(median_from_sorted_list(a) == {0}//2)'.format(n),
setup='''
import random
from ch01.challenge import median_from_sorted_list
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
tbl.row([n, t_med, t_sort])
return tbl
def run_median_less_than_trial(max_k=20, output=True):
"""Use RecordedItem to count # of times Less-than invoked up to (but not including) max_k=20."""
tbl = DataTable([10,15,15],['N', 'median_count', 'sort_median_count'], output=output)
tbl.format('median_count', ',d')
tbl.format('sort_median_count', ',d')
trials = [2**k+1 for k in range(8, max_k)]
for n in trials:
A = list([RecordedItem(i) for i in range(n)])
random.shuffle(A)
# Generated external sorted to reuse list
RecordedItem.clear()
med2 = median_from_sorted_list(A)
sort_lt = RecordedItem.report()[1]
RecordedItem.clear()
med1 = linear_median(A)
lin_lt = RecordedItem.report()[1]
assert med1 == med2
tbl.row([n, lin_lt, sort_lt])
return tbl
def is_palindrome1(w):
"""Create slice with negative step and confirm equality with w."""
return w[::-1] == w
def is_palindrome2(w):
"""Strip outermost characters if same, return false when mismatch."""
while len(w) > 1:
if w[0] != w[-1]: # if mismatch, return False
return False
w = w[1:-1] # strip characters on either end; repeat
return True # must have been a Palindrome
def is_palindrome3(w):
"""iterate from start and from end and compare, without copying arrays"""
for i in range(0,round(len(w)/2)):
if w[i] != w[-(i+1)]:
return False
return True # must have been a Palindrome
def is_palindrome_letters_only(s):
"""
Confirm Palindrome, even when string contains non-alphabet letters
and ignore capitalization.
casefold() method, which was introduced in Python 3.3, could be
used instead of this older method, which converts to lower().
"""
i = 0
j = hi = len(s) - 1
while i < j:
# This type of logic appears in partition.
# Find alpha characters and compare
while not s[i].isalpha():
i += 1
if i == hi: break
while not s[j].isalpha():
j -= 1
if j == 0: break
if s[i].lower() != s[j].lower(): return False
i += 1
j -= 1
return True
def tournament_allows_odd(A):
"""
Returns two largest values in A. Works for odd lists
"""
from ch01.largest_two import Match
if len(A) < 2:
raise ValueError('Must have at least two values')
tourn = []
for i in range(0, len(A)-1, 2):
tourn.append(Match(A[i], A[i+1]))
odd_one_out = None
if len(A) % 2 == 1:
odd_one_out = A[-1]
while len(tourn) > 1:
tourn.append(Match.advance(tourn[0], tourn[1]))
del tourn[0:2]
# Find where second is hiding!
m = tourn[0]
largest = m.larger
second = m.smaller
# Wait until the end, and see where it belongs
if odd_one_out:
if odd_one_out > largest:
largest,second = odd_one_out,largest
elif odd_one_out > second:
second = odd_one_out
while m.prior:
m = m.prior
if second < m.smaller:
second = m.smaller
return (largest,second)
def two_largest_attempt(A):
"""Failed attempt to implement two largest."""
m1 = max(A[:len(A)//2])
m2 = max(A[len(A)//2:])
if m1 < m2:
return (m2, m1)
return (m1, m2)
#######################################################################
if __name__ == '__main__':
chapter = 1
with ExerciseNum(1) as exercise_number:
sample = 'A man, a plan, a canal. Panama!'
print(sample,'is a palindrome:', is_palindrome_letters_only(sample))
print(caption(chapter, exercise_number),
'Palindrome Detector')
with ExerciseNum(2) as exercise_number:
run_median_less_than_trial()
print()
run_median_trial()
print(caption(chapter, exercise_number),
'Median Counting')
with ExerciseNum(3) as exercise_number:
run_counting_sort_trials()
print(caption(chapter, exercise_number),
'Counting Sort Trials')
with ExerciseNum(4) as exercise_number:
print('see tournament_allows_odd in ch01.challenge')
print(caption(chapter, exercise_number),
'Odd tournament')
with ExerciseNum(5) as exercise_number:
print('Should print (9, 8)', two_largest_attempt([9, 3, 5, 7, 8, 1]))
print('Fails to print (9, 8)', two_largest_attempt([9, 8, 5, 7, 3, 1]))
print(caption(chapter, exercise_number),
'Failed Two largest')
| 29.066879 | 102 | 0.573573 |
428f6c9308ecfc2aebd2c05427a3eb4c4bcb191b | 522 | py | Python | exaslct_src/lib/data/dependency_collector/dependency_image_info_collector.py | mace84/script-languages | d586cbe212bbb4efbfb39e095183729c65489360 | [
"MIT"
] | null | null | null | exaslct_src/lib/data/dependency_collector/dependency_image_info_collector.py | mace84/script-languages | d586cbe212bbb4efbfb39e095183729c65489360 | [
"MIT"
] | 1 | 2019-05-06T07:36:11.000Z | 2019-05-06T07:36:11.000Z | exaslct_src/lib/data/dependency_collector/dependency_image_info_collector.py | mace84/script-languages | d586cbe212bbb4efbfb39e095183729c65489360 | [
"MIT"
] | 1 | 2019-05-03T08:49:29.000Z | 2019-05-03T08:49:29.000Z | from typing import Dict
from exaslct_src.lib.data.image_info import ImageInfo
from exaslct_src.lib.data.dependency_collector.dependency_collector import DependencyInfoCollector
IMAGE_INFO = "image_info"
| 29 | 98 | 0.764368 |
42914f6fbdf21a73ae8be4659f5689614360b711 | 3,131 | py | Python | tensorflow_transform/test_case_test.py | LaudateCorpus1/transform | afee306046b8f656355b0170793ee64423f30e23 | [
"Apache-2.0"
] | 970 | 2017-02-10T04:33:46.000Z | 2022-03-26T08:11:20.000Z | tensorflow_transform/test_case_test.py | LaudateCorpus1/transform | afee306046b8f656355b0170793ee64423f30e23 | [
"Apache-2.0"
] | 216 | 2017-02-23T04:50:59.000Z | 2022-03-31T13:52:57.000Z | tensorflow_transform/test_case_test.py | LaudateCorpus1/transform | afee306046b8f656355b0170793ee64423f30e23 | [
"Apache-2.0"
] | 238 | 2017-02-17T16:30:55.000Z | 2022-03-03T20:10:25.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.test_case."""
import re
from tensorflow_transform import test_case
import unittest
if __name__ == '__main__':
unittest.main()
| 36.835294 | 79 | 0.544874 |
4293119f4fbe0691576ba0bf3959decad7140860 | 6,388 | py | Python | metageta/icons.py | ssutee/metageta | 70b7e572acefcce7a8f4d8de719f936934319064 | [
"MIT"
] | null | null | null | metageta/icons.py | ssutee/metageta | 70b7e572acefcce7a8f4d8de719f936934319064 | [
"MIT"
] | null | null | null | metageta/icons.py | ssutee/metageta | 70b7e572acefcce7a8f4d8de719f936934319064 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Australian Government, Department of the Environment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
base 64 encoded gif images for the GUI buttons
'''
| 63.88 | 89 | 0.688009 |
42934dc5c7b47e76f47a4a49a47981e068b48692 | 1,417 | py | Python | chmp/src/chmp/torch_utils/_test_bayes.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 6 | 2017-10-31T20:54:37.000Z | 2020-10-23T19:03:00.000Z | chmp/src/chmp/torch_utils/_test_bayes.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 7 | 2020-03-24T16:14:34.000Z | 2021-03-18T20:51:37.000Z | chmp/src/chmp/torch_utils/_test_bayes.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 1 | 2019-07-29T07:55:49.000Z | 2019-07-29T07:55:49.000Z | import torch
import pytest
# NOTE: also registers the KL divergence
from chmp.torch_utils import NormalModule, WeightsHS, fixed
| 28.918367 | 73 | 0.666902 |
4293fa719a880b9bfe3a700da09a0f285fc6495b | 867 | py | Python | test/hummingbot/core/utils/test_fixed_rate_source.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | test/hummingbot/core/utils/test_fixed_rate_source.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | test/hummingbot/core/utils/test_fixed_rate_source.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | from decimal import Decimal
from unittest import TestCase
from hummingbot.core.utils.fixed_rate_source import FixedRateSource
| 32.111111 | 83 | 0.731257 |
4297be6e9ea671a123810cad1577476fb18a42d0 | 5,293 | py | Python | src/graphnet/models/detector/icecube.py | kaareendrup/gnn-reco | 21f4e36ef17c765a04cde0b2e34d5f802a988055 | [
"Apache-2.0"
] | null | null | null | src/graphnet/models/detector/icecube.py | kaareendrup/gnn-reco | 21f4e36ef17c765a04cde0b2e34d5f802a988055 | [
"Apache-2.0"
] | null | null | null | src/graphnet/models/detector/icecube.py | kaareendrup/gnn-reco | 21f4e36ef17c765a04cde0b2e34d5f802a988055 | [
"Apache-2.0"
] | null | null | null | import torch
from torch_geometric.data import Data
from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute
from graphnet.data.constants import FEATURES
from graphnet.models.detector.detector import Detector
| 33.713376 | 128 | 0.575477 |
429945dde445e0205f0ceeefa695def22a8e1795 | 450 | py | Python | tests/routes_parsing/test1.py | hellojoechip/bambleweeny | ef65f574081eb169aef5a2f7363c3f8ba9ebf028 | [
"MIT"
] | 22 | 2018-09-30T12:08:09.000Z | 2020-11-18T06:32:01.000Z | tests/routes_parsing/test1.py | hellojoechip/bambleweeny | ef65f574081eb169aef5a2f7363c3f8ba9ebf028 | [
"MIT"
] | 34 | 2018-09-13T14:54:21.000Z | 2020-03-26T18:26:26.000Z | tests/routes_parsing/test1.py | hellojoechip/bambleweeny | ef65f574081eb169aef5a2f7363c3f8ba9ebf028 | [
"MIT"
] | 17 | 2018-10-07T15:41:50.000Z | 2021-12-10T10:29:02.000Z | import re
t1 = 'Data !@[value1] and also !@[system:uptime] testing.'
print("Content: " + t1)
if re.search('!@\[[_a-zA-Z0-9:]*\]', t1):
print("YES")
else:
print("NO")
o = re.sub('!@\[[_a-zA-Z0-9:]*\]', '_B9yPrsE_\\g<0>_B9yPrsE_', t1)
o2 = o.split("_B9yPrsE_")
for i in o2:
if i.startswith("!@["):
i2 = re.sub('[^\w:]', "", i)
print("Parse: " + str(i) + " " +str(i2))
else:
print("Plain: '" + str(i) + "'")
| 21.428571 | 66 | 0.482222 |
429a7f070688a75a8c4a4c449d4d3474a9a7088a | 5,430 | py | Python | internal/notes/builtin-SAVE/packages/suite-sparse/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/suite-sparse/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/suite-sparse/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
| 44.876033 | 122 | 0.618416 |
429b606cd5d96a46f963693074a289b595badea4 | 4,580 | py | Python | tick/array/serialize.py | andro2157/tick | d22d0e70c8bb2d5b232ffa7b97426010c2328edc | [
"BSD-3-Clause"
] | null | null | null | tick/array/serialize.py | andro2157/tick | d22d0e70c8bb2d5b232ffa7b97426010c2328edc | [
"BSD-3-Clause"
] | null | null | null | tick/array/serialize.py | andro2157/tick | d22d0e70c8bb2d5b232ffa7b97426010c2328edc | [
"BSD-3-Clause"
] | null | null | null | # License: BSD 3 clause
import os
import numpy as np
import scipy
from tick.array.build.array import (
tick_float_array_to_file,
tick_float_array2d_to_file,
tick_float_sparse2d_to_file,
tick_double_array_to_file,
tick_double_array2d_to_file,
tick_double_sparse2d_to_file,
tick_float_array_from_file,
tick_float_array2d_from_file,
tick_float_sparse2d_from_file,
tick_double_array_from_file,
tick_double_array2d_from_file,
tick_double_sparse2d_from_file,
)
def serialize_array(array, filepath):
"""Save an array on disk on a format that tick C++ modules can read
This method is intended to be used by developpers only, mostly for
benchmarking in C++ on real datasets imported from Python
Parameters
----------
array : `np.ndarray` or `scipy.sparse.csr_matrix`
1d or 2d array
filepath : `str`
Path where the array will be stored
Returns
-------
path : `str`
Global path of the serialized array
"""
if array.dtype not in [np.float32, np.float64]:
raise ValueError('Only float32/64 arrays can be serrialized')
if array.dtype == "float32":
if isinstance(array, np.ndarray):
if len(array.shape) == 1:
serializer = tick_float_array_to_file
elif len(array.shape) == 2:
serializer = tick_float_array2d_to_file
else:
raise ValueError('Only 1d and 2d arrays can be serrialized')
else:
if len(array.shape) == 2:
serializer = tick_float_sparse2d_to_file
else:
raise ValueError('Only 2d sparse arrays can be serrialized')
elif array.dtype == "float64" or array.dtype == "double":
if isinstance(array, np.ndarray):
if len(array.shape) == 1:
serializer = tick_double_array_to_file
elif len(array.shape) == 2:
serializer = tick_double_array2d_to_file
else:
raise ValueError('Only 1d and 2d arrays can be serrialized')
else:
if len(array.shape) == 2:
serializer = tick_double_sparse2d_to_file
else:
raise ValueError('Only 2d sparse arrays can be serrialized')
else:
raise ValueError('Unhandled serrialization type')
serializer(filepath, array)
return os.path.abspath(filepath)
def load_array(filepath, array_type='dense', array_dim=1, dtype="float64"):
"""Loaf an array from disk from a format that tick C++ modules can read
This method is intended to be used by developpers only, mostly for
benchmarking in C++ on real datasets imported from Python
Parameters
----------
filepath : `str`
Path where the array was stored
array_type : {'dense', 'sparse'}, default='dense'
Expected type of the array
array_dim : `int`
Expected dimension of the array
Returns
-------
array : `np.ndarray` or `scipy.sparse.csr_matrix`
1d or 2d array
"""
abspath = os.path.abspath(filepath)
if not os.path.exists(filepath):
raise FileNotFoundError('File {} does not exists'.format(abspath))
if dtype == "float32":
if array_type == 'dense':
if array_dim == 1:
reader = tick_float_array_from_file
elif array_dim == 2:
reader = tick_float_array2d_from_file
else:
raise ValueError('Only 1d and 2d arrays can be loaded')
elif array_type == 'sparse':
if array_dim == 2:
reader = tick_float_sparse2d_from_file
else:
raise ValueError('Only 2d sparse arrays can be loaded')
else:
raise ValueError('Cannot load this class of array')
elif dtype == "float64" or dtype == "double":
if array_type == 'dense':
if array_dim == 1:
reader = tick_double_array_from_file
elif array_dim == 2:
reader = tick_double_array2d_from_file
else:
raise ValueError('Only 1d and 2d arrays can be loaded')
elif array_type == 'sparse':
if array_dim == 2:
reader = tick_double_sparse2d_from_file
else:
raise ValueError('Only 2d sparse arrays can be loaded')
else:
raise ValueError('Cannot load this class of array')
else:
raise ValueError('Unhandled serrialization type')
return reader(filepath)
| 32.94964 | 76 | 0.613974 |
429b9b03d73a5f7f9bbccc750f09ea936a25f8a0 | 78 | py | Python | __init__.py | bbockelm/glideinWMS | a2b39e3d4ff6c4527efad54b1eefe728a4ec9d18 | [
"BSD-3-Clause"
] | null | null | null | __init__.py | bbockelm/glideinWMS | a2b39e3d4ff6c4527efad54b1eefe728a4ec9d18 | [
"BSD-3-Clause"
] | 3 | 2015-12-02T19:37:45.000Z | 2016-01-20T03:21:48.000Z | __init__.py | bbockelm/glideinWMS | a2b39e3d4ff6c4527efad54b1eefe728a4ec9d18 | [
"BSD-3-Clause"
] | 1 | 2015-12-01T23:02:41.000Z | 2015-12-01T23:02:41.000Z | __all__=["factory","frontend","lib","tools","creation","install","unittests"]
| 39 | 77 | 0.692308 |
429beefc88e6c9cf72106405ad5b6e321025f9d6 | 20,658 | py | Python | views/menuVisualizacaoGeral.py | iOsnaaente/Tracker-solar-Supervisorio | 9790c34f0d9df283bc1b92f79b2807875dbcfe3e | [
"MIT"
] | 2 | 2022-01-29T21:33:12.000Z | 2022-02-01T12:41:35.000Z | views/menuVisualizacaoGeral.py | iOsnaaente/Tracker-solar-Supervisorio | 9790c34f0d9df283bc1b92f79b2807875dbcfe3e | [
"MIT"
] | null | null | null | views/menuVisualizacaoGeral.py | iOsnaaente/Tracker-solar-Supervisorio | 9790c34f0d9df283bc1b92f79b2807875dbcfe3e | [
"MIT"
] | null | null | null | import dearpygui.dearpygui as dpg
import datetime as dt
import math
from registry import *
SUN_DATA.update_date()
# FUNCTIONS
# MAIN FUNCTIONS | 70.989691 | 267 | 0.554265 |
429cb5fb216dbdf5ec9ff71a33c2d298dd2c8210 | 4,071 | py | Python | python/jwt.py | angelbarranco/passes-rest-samples | 93f54e3e7b651bcfd1b269e2bcd5d9bf9d50ad8c | [
"Apache-2.0"
] | 95 | 2019-06-05T12:45:15.000Z | 2022-03-30T14:02:27.000Z | python/jwt.py | angelbarranco/passes-rest-samples | 93f54e3e7b651bcfd1b269e2bcd5d9bf9d50ad8c | [
"Apache-2.0"
] | 21 | 2019-06-18T15:41:41.000Z | 2022-03-04T15:29:57.000Z | python/jwt.py | angelbarranco/passes-rest-samples | 93f54e3e7b651bcfd1b269e2bcd5d9bf9d50ad8c | [
"Apache-2.0"
] | 45 | 2019-06-13T20:57:11.000Z | 2022-03-21T13:43:31.000Z | """
Copyright 2019 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import config
import time
# for jwt signing. see https://google-auth.readthedocs.io/en/latest/reference/google.auth.jwt.html#module-google.auth.jwt
from google.auth import crypt as cryptGoogle
from google.auth import jwt as jwtGoogle
#############################
#
# class that defines JWT format for a Google Pay Pass.
#
# to check the JWT protocol for Google Pay Passes, check:
# https://developers.google.com/pay/passes/reference/s2w-reference#google-pay-api-for-passes-jwt
#
# also demonstrates RSA-SHA256 signing implementation to make the signed JWT used
# in links and buttons. Learn more:
# https://developers.google.com/pay/passes/guides/get-started/implementing-the-api/save-to-google-pay
#
#############################
| 35.4 | 121 | 0.747237 |
429ce61086d20c4c1d15d20e5249184bf0cc61e3 | 4,714 | py | Python | janus.py | caxmd/januus | 79208e2450b4c5b1c81346b99814462f6d083b66 | [
"MIT"
] | 83 | 2017-12-11T03:33:10.000Z | 2022-02-17T15:13:54.000Z | janus.py | caxmd/januus | 79208e2450b4c5b1c81346b99814462f6d083b66 | [
"MIT"
] | 3 | 2017-12-25T16:15:44.000Z | 2018-06-17T11:06:08.000Z | janus.py | caxmd/januus | 79208e2450b4c5b1c81346b99814462f6d083b66 | [
"MIT"
] | 25 | 2017-12-11T03:51:12.000Z | 2022-02-17T15:13:57.000Z | # Includes some code derived from the cpython project.
# Source: https://github.com/python/cpython/blob/master/Lib/zipfile.py
# Excuse the mess.
import argparse
from hashlib import sha1
import os
import struct
from zipfile import _EndRecData, ZipFile
from zlib import adler32
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
_DEX_MAGIC = 0
_DEX_CHECKSUM = 1
_DEX_SIGNATURE = 2
_DEX_FILE_SIZE = 3
structDexHeader = "<8sI20sI"
parser = argparse.ArgumentParser(description="Creates an APK exploiting the Janus vulnerability.")
parser.add_argument("apk_in", metavar="original-apk", type=str,
help="the source apk to use")
parser.add_argument("dex_in", metavar="dex-file", type=str,
help="the dex file to prepend")
parser.add_argument("apk_out", metavar="output-apk", type=str,
help="the file to output to")
args = parser.parse_args()
with ZipFile(args.apk_in, "r") as apk_in_zip, open(args.apk_in, "rb") as apk_in, open(args.dex_in, "rb") as dex_in, open(args.apk_out, "wb") as apk_out:
dex_data = dex_in.read()
dex_header = get_dex_header(dex_data)
dex_size = os.path.getsize(args.dex_in)
orig_endrec = get_endrec(apk_in)
new_endrec = get_endrec(apk_in)
new_endrec[_ECD_OFFSET] = new_endrec[_ECD_OFFSET] + dex_size
final_size = os.path.getsize(args.apk_in) + dex_size
apk_in_zip.filelist = sorted(apk_in_zip.filelist, key=sort_info)
infolist = apk_in_zip.infolist()
for info in infolist:
info.date_time = (2042, 14, 3, 0, 62, 18)
info.header_offset = info.header_offset + dex_size
out_bytes = b""
out_bytes += dex_data[0x24:]
out_bytes += apk_in.read()[:orig_endrec[_ECD_OFFSET]]
out_bytes += get_centdirs(infolist)
out_bytes += pack_endrec(new_endrec)
out_bytes = make_dex_header(dex_header, out_bytes, final_size) + out_bytes
apk_out.write(out_bytes) | 31.218543 | 152 | 0.655282 |
429cf2c16bb83449ca0bd5d3338a9cac6d753159 | 74 | py | Python | constants.py | phy1um/tmtc-discord-bot | 7d01cd4c1a78dc0b8aa2bb703c8970ff7bb27f92 | [
"MIT"
] | null | null | null | constants.py | phy1um/tmtc-discord-bot | 7d01cd4c1a78dc0b8aa2bb703c8970ff7bb27f92 | [
"MIT"
] | null | null | null | constants.py | phy1um/tmtc-discord-bot | 7d01cd4c1a78dc0b8aa2bb703c8970ff7bb27f92 | [
"MIT"
] | null | null | null |
ANNOUNCEMENT_ROLE = "941805571915513857"
GUILD_ID = "878926572235665418"
| 18.5 | 40 | 0.824324 |
429eedb68c601680755c430f3d242a23508963a5 | 3,352 | py | Python | test/gst-msdk/transcode/mpeg2.py | haribommi/vaapi-fits | cbf2a463bd3b2c9af5c45a1376b0bde2b703ed23 | [
"BSD-3-Clause"
] | null | null | null | test/gst-msdk/transcode/mpeg2.py | haribommi/vaapi-fits | cbf2a463bd3b2c9af5c45a1376b0bde2b703ed23 | [
"BSD-3-Clause"
] | null | null | null | test/gst-msdk/transcode/mpeg2.py | haribommi/vaapi-fits | cbf2a463bd3b2c9af5c45a1376b0bde2b703ed23 | [
"BSD-3-Clause"
] | null | null | null | ##
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
from .transcoder import TranscoderTest
spec = load_test_spec("mpeg2", "transcode")
| 38.976744 | 107 | 0.683174 |
42a05049df648190833a6dde333b459a1ed6a363 | 10,220 | py | Python | rusel/base/context.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | rusel/base/context.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | rusel/base/context.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | import os, time, mimetypes, glob
from django.utils.translation import gettext_lazy as _
from django.urls import reverse
from task.const import *
from task.models import Task, detect_group
from rusel.base.config import Config
from rusel.base.forms import CreateGroupForm
from rusel.context import get_base_context
from rusel.utils import extract_get_params
| 40.078431 | 196 | 0.531409 |
42a0a34d1333c63396ab8f94b968a15d8d78c49d | 2,046 | py | Python | deepdiy/plugins/system/debugger/debugger.py | IEWbgfnYDwHRoRRSKtkdyMDUzgdwuBYgDKtDJWd/diy | 080ddece4f982f22f3d5cff8d9d82e12fcd946a1 | [
"MIT"
] | 57 | 2019-05-01T05:27:19.000Z | 2022-03-06T12:11:55.000Z | deepdiy/plugins/system/debugger/debugger.py | markusj1201/deepdiy | 080ddece4f982f22f3d5cff8d9d82e12fcd946a1 | [
"MIT"
] | 6 | 2020-01-28T22:42:22.000Z | 2022-02-10T00:13:11.000Z | deepdiy/plugins/system/debugger/debugger.py | markusj1201/deepdiy | 080ddece4f982f22f3d5cff8d9d82e12fcd946a1 | [
"MIT"
] | 13 | 2019-05-08T03:19:58.000Z | 2021-08-02T04:24:15.000Z | import os,rootpath
rootpath.append(pattern='main.py') # add the directory of main.py to PATH
import glob
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import ObjectProperty,DictProperty,ListProperty
from kivy.uix.boxlayout import BoxLayout
import logging,importlib,pkgutil
if __name__ == '__main__':
Test().run()
| 31 | 106 | 0.711632 |
42a141b9ed0d23fd4819a5a6563c8f54190ea8c2 | 1,885 | py | Python | supervised_learning/classification/perceptron/perceptron.py | Ambitious-idiot/python-machine-learning | 6c057dd64fb47de3e822b825135d24896ce13a4a | [
"MIT"
] | 3 | 2021-04-15T06:20:31.000Z | 2021-05-28T05:26:06.000Z | supervised_learning/classification/perceptron/perceptron.py | Ambitious-idiot/python-machine-learning | 6c057dd64fb47de3e822b825135d24896ce13a4a | [
"MIT"
] | null | null | null | supervised_learning/classification/perceptron/perceptron.py | Ambitious-idiot/python-machine-learning | 6c057dd64fb47de3e822b825135d24896ce13a4a | [
"MIT"
] | null | null | null | import numpy as np
| 40.106383 | 116 | 0.682228 |
42a1c00f35b59908451cfee2563f53a899db2598 | 901 | py | Python | pygama/dsp/_processors/trap_filter.py | sweigart/pygama | 3c5fe4c69230814933b2de879b9a305ff0d4ad5e | [
"Apache-2.0"
] | 1 | 2022-01-19T14:31:56.000Z | 2022-01-19T14:31:56.000Z | pygama/dsp/_processors/trap_filter.py | sweigart/pygama | 3c5fe4c69230814933b2de879b9a305ff0d4ad5e | [
"Apache-2.0"
] | 1 | 2020-12-08T20:07:24.000Z | 2020-12-08T20:07:24.000Z | pygama/dsp/_processors/trap_filter.py | sweigart/pygama | 3c5fe4c69230814933b2de879b9a305ff0d4ad5e | [
"Apache-2.0"
] | null | null | null | import numpy as np
from numba import guvectorize
| 37.541667 | 102 | 0.558269 |
42a67cbf934d63272df061aa18d737365bf0fa29 | 5,109 | py | Python | pilferer/engine.py | Sebastian-dm/pilferer | 5126377154c7ba08fbea1a9dfad752bf8b1c72a9 | [
"MIT"
] | null | null | null | pilferer/engine.py | Sebastian-dm/pilferer | 5126377154c7ba08fbea1a9dfad752bf8b1c72a9 | [
"MIT"
] | null | null | null | pilferer/engine.py | Sebastian-dm/pilferer | 5126377154c7ba08fbea1a9dfad752bf8b1c72a9 | [
"MIT"
] | null | null | null | import tcod
from input_handlers import handle_keys
from game_states import GameStates
from render_functions import clear_all, render_all, RenderOrder
from map_objects.game_map import GameMap
from fov_functions import initialize_fov, recompute_fov
from entity import Entity, get_blocking_entity_at_location
from components.fighter import Fighter
from death_functions import kill_monster, kill_player
VERSION = "0.2"
FONT = 'assets/arial10x10.png'
screen_width = 80
screen_height = 50
map_width = 80
map_height = 45
room_max_size = 10
room_min_size = 6
max_rooms = 30
fov_algorithm = 0
fov_light_walls = False
fov_radius = 10
max_monsters_per_room = 3
colors = {
'dark_wall': tcod.Color(0, 0, 0),
'light_wall': tcod.Color(120, 120, 80),
'dark_ground': tcod.Color(150, 150, 150),
'light_ground': tcod.Color(200, 200, 150)
}
def main():
""" Main game function """
fighter_component = Fighter(hp=30, defense=2, power=5)
player = Entity(0, 0, '@', tcod.white, 'Player', blocks=True,
render_order=RenderOrder.ACTOR, fighter=fighter_component)
entities = [player]
# Import font
tcod.console_set_custom_font(FONT, tcod.FONT_TYPE_GREYSCALE | tcod.FONT_LAYOUT_TCOD)
# Console initialization
tcod.console_init_root(screen_width, screen_height, 'Pilferer %s'%VERSION, False, vsync=False)
con = tcod.console.Console(screen_width, screen_height)
# Mapping
game_map = GameMap(map_width, map_height)
game_map.make_map(max_rooms, room_min_size, room_max_size, map_width,
map_height, player, entities, max_monsters_per_room)
# FOV
fov_recompute = True
fov_map = initialize_fov(game_map)
# Variables for holding input
key = tcod.Key()
mouse = tcod.Mouse()
# Game state
game_state = GameStates.PLAYERS_TURN
# Main game loop
while not tcod.console_is_window_closed():
# FOV
if fov_recompute:
recompute_fov(fov_map, player.x, player.y, fov_radius, fov_light_walls, fov_algorithm)
# Draw
render_all(con, entities, player, game_map, fov_map, fov_recompute, screen_width, screen_height, colors)
fov_recompute = False
tcod.console_flush()
clear_all(con, entities)
# INDPUT HANDLING
tcod.sys_check_for_event(tcod.EVENT_KEY_PRESS, key, mouse)
action = handle_keys(key)
# Command move
player_turn_results = []
move = action.get('move')
if move and game_state == GameStates.PLAYERS_TURN:
dx, dy = move
destination_x = player.x + dx
destination_y = player.y + dy
if not game_map.is_blocked(destination_x, destination_y):
target = get_blocking_entity_at_location(entities, destination_x, destination_y)
if target:
attack_results = player.fighter.attack(target)
player_turn_results.extend(attack_results)
else:
player.move(dx, dy)
fov_recompute = True
game_state = GameStates.ENEMY_TURN
# Command exit
exit = action.get('exit')
if exit:
return True
# Command Fullscreen
fullscreen = action.get('fullscreen')
if fullscreen:
tcod.console_set_fullscreen(not tcod.console_is_fullscreen())
# Results
for player_turn_result in player_turn_results:
message = player_turn_result.get('message')
dead_entity = player_turn_result.get('dead')
if message:
print(message)
if dead_entity:
if dead_entity == player:
message, game_state = kill_player(dead_entity)
else:
message = kill_monster(dead_entity)
print(message)
# Monster turns
if game_state == GameStates.ENEMY_TURN:
for entity in entities:
if entity.ai:
enemy_turn_results = entity.ai.take_turn(player, fov_map, game_map, entities)
for enemy_turn_result in enemy_turn_results:
message = enemy_turn_result.get('message')
dead_entity = enemy_turn_result.get('dead')
if message:
print(message)
if dead_entity:
if dead_entity == player:
message, game_state = kill_player(dead_entity)
else:
message = kill_monster(dead_entity)
print(message)
if game_state == GameStates.PLAYER_DEAD:
break
if game_state == GameStates.PLAYER_DEAD:
break
else:
game_state = GameStates.PLAYERS_TURN
game_state = GameStates.PLAYERS_TURN
if __name__ == '__main__':
main() | 32.335443 | 112 | 0.603249 |
42a6cbc1a232b14997c3952e709da0eebe84cd51 | 2,337 | py | Python | galaxy/api/v2/urls.py | SamyCoenen/galaxy | 7c17ef45e53b0fc2fe8a2c70a99f3947604e0b0e | [
"Apache-2.0"
] | null | null | null | galaxy/api/v2/urls.py | SamyCoenen/galaxy | 7c17ef45e53b0fc2fe8a2c70a99f3947604e0b0e | [
"Apache-2.0"
] | null | null | null | galaxy/api/v2/urls.py | SamyCoenen/galaxy | 7c17ef45e53b0fc2fe8a2c70a99f3947604e0b0e | [
"Apache-2.0"
] | null | null | null | # (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.urls import path
from galaxy.api.v2 import views
app_name = 'api'
urlpatterns = [
# Collection Imports URLs
path('collection-imports/<int:pk>/',
views.CollectionImportView.as_view(),
name='collection-import-detail'),
# Collection Version list URLs
path('collections/<int:pk>/versions/',
views.VersionListView.as_view(),
name='version-list'),
path('collections/<str:namespace>/<str:name>/versions/',
views.VersionListView.as_view(),
name='version-list'),
# Collection Version detail URLs
path('collection-versions/<int:version_pk>/',
views.VersionDetailView.as_view(),
name='version-detail'),
path('collections/<str:namespace>/<str:name>/versions/<str:version>/',
views.VersionDetailView.as_view(),
name='version-detail'),
# Collection Version Artifact download URLs
path('collection-versions/<int:pk>/artifact/',
views.CollectionArtifactView.as_view(),
name='version-artifact'),
path('collections/<namespace>/<name>/versions/<version>/artifact/',
views.CollectionArtifactView.as_view(),
name='version-artifact'),
# Collection URLs
path('collections/',
views.CollectionListView.as_view(),
name='collection-list'),
path('collections/<int:pk>/',
views.CollectionDetailView.as_view(),
name='collection-detail'),
# NOTE: needs to come after 'collections/<int:collection_pk>/versions/'
path('collections/<str:namespace>/<str:name>/',
views.CollectionDetailView.as_view(),
name='collection-detail'),
]
| 35.953846 | 75 | 0.682071 |
42a78f723d388f6c17abd15949a96f2a870ca42a | 1,933 | py | Python | mindhome_alpha/erpnext/stock/doctype/stock_settings/test_stock_settings.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/stock/doctype/stock_settings/test_stock_settings.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/stock/doctype/stock_settings/test_stock_settings.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
| 42.021739 | 338 | 0.698914 |
42a96ad3b83164695c47573ef1f876f36eb4d891 | 1,148 | py | Python | pybloxy/classes/http.py | R0bl0x10501050/roblox.py | cbbb25878627c2d837caaeb7edf37d0aeda615ae | [
"MIT"
] | null | null | null | pybloxy/classes/http.py | R0bl0x10501050/roblox.py | cbbb25878627c2d837caaeb7edf37d0aeda615ae | [
"MIT"
] | null | null | null | pybloxy/classes/http.py | R0bl0x10501050/roblox.py | cbbb25878627c2d837caaeb7edf37d0aeda615ae | [
"MIT"
] | null | null | null | import logging
import requests | 26.697674 | 93 | 0.722997 |
42a99e600220ea6f0c20b482db83263664318f69 | 1,305 | py | Python | resources/nuice_simulations/src/layers_sim/layers_sim_node.py | SpyGuyIan/NUice | 47991a848dac244b4c476b4a92f7a27a1f9e5dcc | [
"MIT"
] | 1 | 2021-08-17T00:40:42.000Z | 2021-08-17T00:40:42.000Z | resources/nuice_simulations/src/layers_sim/layers_sim_node.py | SpyGuyIan/NUice | 47991a848dac244b4c476b4a92f7a27a1f9e5dcc | [
"MIT"
] | 1 | 2021-01-31T17:15:40.000Z | 2021-01-31T17:15:40.000Z | resources/nuice_simulations/src/layers_sim/layers_sim_node.py | NUMarsIce/NUice | 47991a848dac244b4c476b4a92f7a27a1f9e5dcc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
import random
possibleLayers = [140, 50, 80, 200, 100]
cur_position = 0.0
#Build the layers simulation, then publish material strengths. Lasts 100 seconds.
#Get the strength of the next layer from the list of possible layer strengths.
#Build the next layer of the simulation.
if __name__ == '__main__':
runLayersSim()
| 29 | 81 | 0.691954 |
42a9a106ced30891f6bde30e0be69f4978578110 | 1,121 | py | Python | imagescraper/imagescraper/spiders/image_crawl_spider.py | karthikn2789/Scrapy-Projects | 84db4ed1a2f38d6fa03d1bfa6a6ebf9fb527f523 | [
"MIT"
] | 2 | 2021-04-08T12:48:10.000Z | 2021-06-16T09:42:39.000Z | imagescraper/imagescraper/spiders/image_crawl_spider.py | karthikn2789/Scrapy-Projects | 84db4ed1a2f38d6fa03d1bfa6a6ebf9fb527f523 | [
"MIT"
] | null | null | null | imagescraper/imagescraper/spiders/image_crawl_spider.py | karthikn2789/Scrapy-Projects | 84db4ed1a2f38d6fa03d1bfa6a6ebf9fb527f523 | [
"MIT"
] | 6 | 2020-08-05T09:45:39.000Z | 2021-11-16T14:05:20.000Z | import scrapy
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import ImagescraperItem
| 36.16129 | 92 | 0.611954 |
42a9d7f0de1fb5aee832bc5e97c48ecbdecd3930 | 10,460 | py | Python | scripts/pos_eval.py | ProKil/sparse-text-prototype | e7369dc981fb2c2a94ccb4edca4a7e7c7d7543cd | [
"MIT"
] | 19 | 2020-11-05T12:17:45.000Z | 2021-11-17T08:43:50.000Z | scripts/pos_eval.py | ProKil/sparse-text-prototype | e7369dc981fb2c2a94ccb4edca4a7e7c7d7543cd | [
"MIT"
] | 1 | 2021-07-08T13:30:15.000Z | 2021-07-08T13:30:15.000Z | scripts/pos_eval.py | ProKil/sparse-text-prototype | e7369dc981fb2c2a94ccb4edca4a7e7c7d7543cd | [
"MIT"
] | 2 | 2020-12-20T13:19:14.000Z | 2021-06-25T20:18:00.000Z | import os
import argparse
import subprocess
import random
import edlib
from typing import List
from collections import Counter
import stanza
parser = argparse.ArgumentParser(description='Evaluate analysis metrics')
parser.add_argument('--prefix', type=str, choices=['inference', 'generation'],
help='prediction file prefix')
parser.add_argument('--exp-dir', type=str, help='output directory')
args = parser.parse_args()
fout = open(os.path.join(args.exp_dir, 'analysis_{}_res.txt'.format(args.prefix)), 'w')
len_cut = 1000
prototypes, examples = read_file(os.path.join(args.exp_dir, '{}_analysis_input.txt'.format(args.prefix)), len_cut=len_cut)
prototype_path = os.path.join(args.exp_dir, 'prototype.txt')
prototype_pos_path = os.path.join(args.exp_dir, 'prototype_pos.txt')
prototype_rand_path = os.path.join(args.exp_dir, 'prototype_rand.txt')
prototype_pos_rand_path = os.path.join(args.exp_dir, 'prototype_pos_rand.txt')
example_path = os.path.join(args.exp_dir, 'example.txt')
example_pos_path = os.path.join(args.exp_dir, 'example_pos.txt')
prototypes_rand = generate_rand_prototype(args.exp_dir, len(examples))
write_file(prototype_path, prototypes)
write_file(example_path, examples)
write_file(prototype_rand_path, prototypes_rand)
# surface BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_rand_path))
bleu = sentence_bleu(prototype_rand_path, example_path)
print('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_path))
bleu = sentence_bleu(prototype_path, example_path)
print('Regular BLEU: \n{}'.format(bleu))
fout.write('Regular BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# POS tagging
print('POS tagging')
nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos', tokenize_pretokenized=True)
prototype_doc = nlp('\n'.join(prototypes))
example_doc = nlp('\n'.join(examples))
prototype_rand_doc = nlp('\n'.join(prototypes_rand))
prototypes_pos = [[word.upos for word in sent.words] for sent in prototype_doc.sentences]
examples_pos = [[word.upos for word in sent.words] for sent in example_doc.sentences]
prototypes_pos_rand = [[word.upos for word in sent.words]for sent in prototype_rand_doc.sentences]
write_file(prototype_pos_path, prototypes_pos)
write_file(example_pos_path, examples_pos)
write_file(prototype_pos_rand_path, prototypes_pos_rand)
# POS BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_rand_path))
bleu = sentence_bleu(prototype_pos_rand_path, example_pos_path)
print('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_path))
bleu = sentence_bleu(prototype_pos_path, example_pos_path)
print('POS BLEU: \n{}'.format(bleu))
fout.write('POS BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# break down precision and recall
print("compute precision, recall, f1")
assert len(prototypes) == len(prototypes_pos)
assert len(examples) == len(examples_pos)
res = eval_f1(list(prototype_rand_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('random baseline precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
res = eval_f1(list(prototype_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
# edit operations
print("edit analysis")
res = eval_edit(list(prototype_doc.sentences), list(example_doc.sentences))
total = sum([sum(v.values()) for k, v in res.items()])
fout.write('total: {}\n'.format(total))
res = sorted(res.items(), key=lambda item: (-sum(item[1].values())))
for k, v in res:
fout.write('{}: {}\n'.format(k, sum(v.values())))
for k1, v1 in v.most_common():
fout.write('{}: {} ({:.3f}), '.format(k1, v1, v1 / sum(v.values())))
fout.write('\n\n')
fout.close()
| 33.41853 | 122 | 0.603537 |
42aa82728f6722cbbdd0c68a0e10c8dd5f0958ee | 582 | py | Python | tests/rules/test_git_stash_pop.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
] | null | null | null | tests/rules/test_git_stash_pop.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
] | null | null | null | tests/rules/test_git_stash_pop.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
] | null | null | null | import pytest
from thefuck.rules.git_stash_pop import get_new_command
from thefuck.rules.git_stash_pop import match
from thefuck.types import Command
| 26.454545 | 96 | 0.707904 |
42ab556174e9603454893f6f485c837afcd3bad8 | 3,642 | py | Python | src/arima_model.py | SaharCarmel/ARIMA | c54e8554f1c4a95c25687bdf35b4296ed6bd78d6 | [
"MIT"
] | null | null | null | src/arima_model.py | SaharCarmel/ARIMA | c54e8554f1c4a95c25687bdf35b4296ed6bd78d6 | [
"MIT"
] | null | null | null | src/arima_model.py | SaharCarmel/ARIMA | c54e8554f1c4a95c25687bdf35b4296ed6bd78d6 | [
"MIT"
] | null | null | null | """ The ARIMA model. """
import torch
import numpy as np
| 34.358491 | 77 | 0.549149 |
42ab6fa034b5730a8c76b4b76e6056f1b558984c | 687 | py | Python | problems/slidingwindow/Solution1100.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/slidingwindow/Solution1100.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/slidingwindow/Solution1100.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | """
Sliding window
Given a string S, return the number of substrings of length K with no
repeated characters.
Example 1:
Input: S = "havefunonleetcode", K = 5 Output: 6 Explanation: There are 6
substrings they are : 'havef','avefu','vefun','efuno','etcod','tcode'.
counter havefunonleetcode
IDEA:
1) for each letter in the string setup a counter and
2) update unique counter each time when counter[let] hits 0, 1 or 2 (magic numbers)
aaabac
|||
123
0) a:3 unique=0
1) a:2 b:1 unique=1
2) a:2 b:1 unique=1
3) a:2 b:1 c:1 unique=1+2=3
"""
| 18.078947 | 86 | 0.58952 |
42ab8cf968e58717ef4f86c899c0440ef99114b5 | 26 | py | Python | the_file_propagator/__init__.py | joeflack4/the-file-propagator | c72fdad7774c82c8bfa6bf5253b83f6bb1e4e713 | [
"MIT"
] | null | null | null | the_file_propagator/__init__.py | joeflack4/the-file-propagator | c72fdad7774c82c8bfa6bf5253b83f6bb1e4e713 | [
"MIT"
] | null | null | null | the_file_propagator/__init__.py | joeflack4/the-file-propagator | c72fdad7774c82c8bfa6bf5253b83f6bb1e4e713 | [
"MIT"
] | null | null | null | """The File Propagator"""
| 13 | 25 | 0.653846 |
42ab9f264f4ecd8a53e0ce06b3bb77538b433100 | 4,681 | py | Python | src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py | WebArchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 3 | 2017-03-23T12:59:21.000Z | 2017-11-22T08:23:14.000Z | src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py | WebArchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 89 | 2015-06-28T22:10:28.000Z | 2017-01-30T16:06:05.000Z | src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py | WebarchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 1 | 2015-12-17T02:56:59.000Z | 2015-12-17T02:56:59.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: brython (http://brython.info) (like python3)
#
# Imports =====================================================================
from os.path import join
from browser import window
from browser import document
# virtual filesystem / modules provided by REST API
from virtual_fs import settings
# Functions & classes =========================================================
UserKeywordHandler = KeywordListHandler("user_keyword_list")
AlephKeywordHandler = KeywordListHandler("aleph_keyword_list")
AanalysisKeywordHandler = KeywordListHandler("analysis_keyword_list")
KeywordAdder.set_kw_typeahead_input()
| 29.25625 | 79 | 0.580432 |
42abdb34b5121a34132a5ff61f5b37cf1ca828bc | 53 | py | Python | scripts/rnn/gru/__init__.py | bfeng/CryptoGRU | 65f6fe9eba981fea65fc665ff16938bf3a593001 | [
"MIT"
] | 1 | 2022-01-12T03:18:55.000Z | 2022-01-12T03:18:55.000Z | scripts/rnn/gru/__init__.py | bfeng/CryptoGRU | 65f6fe9eba981fea65fc665ff16938bf3a593001 | [
"MIT"
] | null | null | null | scripts/rnn/gru/__init__.py | bfeng/CryptoGRU | 65f6fe9eba981fea65fc665ff16938bf3a593001 | [
"MIT"
] | null | null | null | from .grucell import MyGRUCell
from .gru import MyGRU | 26.5 | 30 | 0.830189 |
42b002236c965251bc510639be4dce4dd1300339 | 2,946 | py | Python | ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/kext-missing-weak-bind/test.py | 1079278593/TreasureChest | 8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8 | [
"MIT"
] | null | null | null | ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/kext-missing-weak-bind/test.py | 1079278593/TreasureChest | 8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8 | [
"MIT"
] | null | null | null | ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/kext-missing-weak-bind/test.py | 1079278593/TreasureChest | 8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8 | [
"MIT"
] | null | null | null | #!/usr/bin/python2.7
import os
import KernelCollection
# Check that weak binds can be missing, so long as we check for the magic symbol
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r-- main.c -o main.kernel
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info bar.c -o extensions/bar.kext/bar -Wl,-fixup_chains
# [~]> rm -r extensions/*.kext/*.ld
| 62.680851 | 316 | 0.681942 |
42b0f3205382f72fca408d985411165330e27a01 | 7,453 | py | Python | datahub/search/investment/models.py | alixedi/data-hub-api-cd-poc | a5e5ea45bb496c0d2a06635864514af0c7d4291a | [
"MIT"
] | null | null | null | datahub/search/investment/models.py | alixedi/data-hub-api-cd-poc | a5e5ea45bb496c0d2a06635864514af0c7d4291a | [
"MIT"
] | 16 | 2020-04-01T15:25:35.000Z | 2020-04-14T14:07:30.000Z | datahub/search/investment/models.py | alixedi/data-hub-api-cd-poc | a5e5ea45bb496c0d2a06635864514af0c7d4291a | [
"MIT"
] | null | null | null | from elasticsearch_dsl import Boolean, Date, Double, Integer, Keyword, Long, Object, Text
from datahub.search import dict_utils
from datahub.search import fields
from datahub.search.models import BaseESModel
DOC_TYPE = 'investment_project'
def _related_investment_project_field():
"""Field for a related investment project."""
return Object(properties={
'id': Keyword(),
'name': fields.NormalizedKeyword(),
'project_code': fields.NormalizedKeyword(),
})
| 38.417526 | 99 | 0.70106 |
42b106aaf54e3b2c19e17572d5a63e648baf43b4 | 1,670 | py | Python | robust_sleep_net/models/modulo_net/features_encoder/fully_connected.py | Dreem-Organization/RobustSleepNet | c8ff3f6f857299eb2bf2e9400483084d5ecd4106 | [
"MIT"
] | 16 | 2021-04-06T14:04:45.000Z | 2022-03-11T14:37:08.000Z | robust_sleep_net/models/modulo_net/features_encoder/fully_connected.py | Dreem-Organization/RobustSleepNet | c8ff3f6f857299eb2bf2e9400483084d5ecd4106 | [
"MIT"
] | null | null | null | robust_sleep_net/models/modulo_net/features_encoder/fully_connected.py | Dreem-Organization/RobustSleepNet | c8ff3f6f857299eb2bf2e9400483084d5ecd4106 | [
"MIT"
] | 4 | 2021-06-10T06:48:33.000Z | 2022-03-26T22:29:07.000Z | from collections import OrderedDict
import torch
from torch import nn
| 32.115385 | 96 | 0.426946 |
35e91cbc49c53f3ff38da3a05748e14783d919ce | 2,968 | py | Python | data/rawdata_dataset.py | weiyw16/pytorch-CycleGAN-and-pix2pix | 432a91ee6ca8dc606ba0116b27b0948abc48f295 | [
"BSD-3-Clause"
] | null | null | null | data/rawdata_dataset.py | weiyw16/pytorch-CycleGAN-and-pix2pix | 432a91ee6ca8dc606ba0116b27b0948abc48f295 | [
"BSD-3-Clause"
] | null | null | null | data/rawdata_dataset.py | weiyw16/pytorch-CycleGAN-and-pix2pix | 432a91ee6ca8dc606ba0116b27b0948abc48f295 | [
"BSD-3-Clause"
] | null | null | null | #import
import os
#import torch
#import torch.nn as nn
import torch.utils.data as Data
#import torchvision
import matplotlib.pyplot as plt
import h5py
#from torch.autograd import Variable
import numpy as np
import torch
#%hist -f rawdata_dataset.py
| 38.545455 | 97 | 0.597035 |
35eca7541efb5afc537b44ba4b6a0fc5cf5a30dd | 310 | py | Python | pythons/pythons/pythons_app/urls.py | BoyanPeychinov/python_web_framework | bb3a78c36790821d8b3a2b847494a1138d063193 | [
"MIT"
] | null | null | null | pythons/pythons/pythons_app/urls.py | BoyanPeychinov/python_web_framework | bb3a78c36790821d8b3a2b847494a1138d063193 | [
"MIT"
] | null | null | null | pythons/pythons/pythons_app/urls.py | BoyanPeychinov/python_web_framework | bb3a78c36790821d8b3a2b847494a1138d063193 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from .views import IndexView
urlpatterns = [
# path('', views.index, name="index"),
path('', IndexView.as_view(), name="index"),
# path('create/', views.create, name="create"),
path('create/', views.PythonCreateView.as_view(), name="create"),
] | 31 | 69 | 0.66129 |
35ed1f868aeb38f0c96a30ed7f9536e255837e20 | 356 | py | Python | tests/python/text_utility.py | Noxsense/mCRL2 | dd2fcdd6eb8b15af2729633041c2dbbd2216ad24 | [
"BSL-1.0"
] | 61 | 2018-05-24T13:14:05.000Z | 2022-03-29T11:35:03.000Z | tests/python/text_utility.py | Noxsense/mCRL2 | dd2fcdd6eb8b15af2729633041c2dbbd2216ad24 | [
"BSL-1.0"
] | 229 | 2018-05-28T08:31:09.000Z | 2022-03-21T11:02:41.000Z | tests/python/text_utility.py | Noxsense/mCRL2 | dd2fcdd6eb8b15af2729633041c2dbbd2216ad24 | [
"BSL-1.0"
] | 28 | 2018-04-11T14:09:39.000Z | 2022-02-25T15:57:39.000Z | #~ Copyright 2014 Wieger Wesselink.
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
| 29.666667 | 82 | 0.691011 |
35ed5dd8d0b8879efae47e20f5661656b1666fbb | 159 | py | Python | Dumper/temp.py | NeerajGulia/kafka-monitor | cfcd39a37d22c86d3cebffe289687a030bb84353 | [
"Apache-2.0"
] | null | null | null | Dumper/temp.py | NeerajGulia/kafka-monitor | cfcd39a37d22c86d3cebffe289687a030bb84353 | [
"Apache-2.0"
] | null | null | null | Dumper/temp.py | NeerajGulia/kafka-monitor | cfcd39a37d22c86d3cebffe289687a030bb84353 | [
"Apache-2.0"
] | null | null | null | import datetime
t1 = datetime.datetime(2019, 3, 9, 10, 55, 30, 991882)
t2 = datetime.datetime(2019, 3, 10, 10, 55, 30, 991882)
print((t2-t1).total_seconds()) | 26.5 | 55 | 0.685535 |
35ee497682f551e6df5ef747e053a1c6578b24fe | 1,401 | py | Python | listools/llogic/is_descending.py | jgarte/listools | 17ef56fc7dde701890213f248971d8dc7a6e6b7c | [
"MIT"
] | 2 | 2019-01-22T03:50:43.000Z | 2021-04-22T16:12:17.000Z | listools/llogic/is_descending.py | jgarte/listools | 17ef56fc7dde701890213f248971d8dc7a6e6b7c | [
"MIT"
] | 2 | 2019-01-22T03:57:49.000Z | 2021-04-22T22:03:47.000Z | listools/llogic/is_descending.py | jgarte/listools | 17ef56fc7dde701890213f248971d8dc7a6e6b7c | [
"MIT"
] | 1 | 2021-04-22T21:13:00.000Z | 2021-04-22T21:13:00.000Z | def is_descending(input_list: list, step: int = -1) -> bool:
r"""llogic.is_descending(input_list[, step])
This function returns True if the input list is descending with a fixed
step, otherwise it returns False. Usage:
>>> alist = [3, 2, 1, 0]
>>> llogic.is_descending(alist)
True
The final value can be other than zero:
>>> alist = [12, 11, 10]
>>> llogic.is_descending(alist)
True
The list can also have negative elements:
>>> alist = [2, 1, 0, -1, -2]
>>> llogic.is_descending(alist)
True
It will return False if the list is not ascending:
>>> alist = [6, 5, 9, 2]
>>> llogic.is_descending(alist)
False
By default, the function uses steps of size 1 so the list below is not
considered as ascending:
>>> alist = [7, 5, 3, 1]
>>> llogic.is_descending(alist)
False
But the user can set the step argument to any value less than one:
>>> alist = [7, 5, 3, 1]
>>> step = -2
>>> llogic.is_descending(alist, step)
True
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not isinstance(step, int):
raise TypeError('\'step\' must be \'int\'')
if step > 1:
raise ValueError('\'step\' must be < 0')
aux_list = list(range(max(input_list), min(input_list)-1, step))
return input_list == aux_list
| 27.470588 | 75 | 0.608851 |
35ef2ec3e738f6a7d680ddbb0d8cfed8a80181c4 | 384 | py | Python | blazer/hpc/local/__init__.py | radiantone/blazer | 4f369729a72a397a5a472f081002bf24cf22b69c | [
"CC0-1.0"
] | 4 | 2022-02-11T13:37:03.000Z | 2022-02-26T00:25:13.000Z | blazer/hpc/local/__init__.py | radiantone/blazer | 4f369729a72a397a5a472f081002bf24cf22b69c | [
"CC0-1.0"
] | null | null | null | blazer/hpc/local/__init__.py | radiantone/blazer | 4f369729a72a397a5a472f081002bf24cf22b69c | [
"CC0-1.0"
] | null | null | null | from functools import partial
from pipe import select, where
from pydash import chunk
from pydash import filter_ as filter
from pydash import flatten, get, omit
from .primitives import parallel, pipeline, scatter
__all__ = (
"parallel",
"scatter",
"pipeline",
"partial",
"select",
"where",
"flatten",
"chunk",
"omit",
"get",
"filter",
)
| 16.695652 | 51 | 0.648438 |
35f130f559ed7cd7af033555dccc66ba4d2035c4 | 304 | py | Python | resumebuilder/resumebuilder.py | kinshuk4/ResumeBuilder | 2c997f73b522c0668f3a66afb372bd91c6408b3c | [
"MIT"
] | 1 | 2020-01-04T05:54:19.000Z | 2020-01-04T05:54:19.000Z | resumebuilder/resumebuilder.py | kinshuk4/ResumeBuilder | 2c997f73b522c0668f3a66afb372bd91c6408b3c | [
"MIT"
] | null | null | null | resumebuilder/resumebuilder.py | kinshuk4/ResumeBuilder | 2c997f73b522c0668f3a66afb372bd91c6408b3c | [
"MIT"
] | null | null | null | import yaml
if __name__ == '__main__':
main()
| 17.882353 | 45 | 0.664474 |
35f16309c334902b0ed8ed87b8f07d61caa46a9a | 6,025 | py | Python | backend/tests/unittests/metric_source/test_report/junit_test_report_tests.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 25 | 2016-11-25T10:41:24.000Z | 2021-07-03T14:02:49.000Z | backend/tests/unittests/metric_source/test_report/junit_test_report_tests.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 783 | 2016-09-19T12:10:21.000Z | 2021-01-04T20:39:15.000Z | backend/tests/unittests/metric_source/test_report/junit_test_report_tests.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 15 | 2015-03-25T13:52:49.000Z | 2021-03-08T17:17:56.000Z | """
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import unittest
from unittest.mock import Mock
import urllib.error
from dateutil.tz import tzutc, tzlocal
from hqlib.metric_source import JunitTestReport
| 47.81746 | 113 | 0.64249 |
35f24e93301e26ad076b53b869df2630d390d615 | 965 | py | Python | lang/Python/compare-sorting-algorithms-performance-6.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | 1 | 2018-11-09T22:08:38.000Z | 2018-11-09T22:08:38.000Z | lang/Python/compare-sorting-algorithms-performance-6.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/compare-sorting-algorithms-performance-6.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | sort_functions = [
builtinsort, # see implementation above
insertion_sort, # see [[Insertion sort]]
insertion_sort_lowb, # ''insertion_sort'', where sequential search is replaced
# by lower_bound() function
qsort, # see [[Quicksort]]
qsortranlc, # ''qsort'' with randomly choosen ''pivot''
# and the filtering via list comprehension
qsortranpart, # ''qsortranlc'' with filtering via ''partition'' function
qsortranpartis, # ''qsortranpart'', where for a small input sequence lengths
] # ''insertion_sort'' is called
if __name__=="__main__":
import sys
sys.setrecursionlimit(10000)
write_timings(npoints=100, maxN=1024, # 1 <= N <= 2**10 an input sequence length
sort_functions=sort_functions,
sequence_creators = (ones, range, shuffledrange))
plot_timings()
| 50.789474 | 85 | 0.598964 |
35f445a5ba07dee2c2143db897f87a8a3259db16 | 6,300 | py | Python | server/organization/tests.py | NicholasNagy/ALTA | ca07627481ee91f2969b0fc8e8f15e2a37b3e992 | [
"Apache-2.0"
] | 3 | 2020-09-09T23:26:29.000Z | 2020-10-17T22:58:34.000Z | server/organization/tests.py | NicholasNagy/ALTA | ca07627481ee91f2969b0fc8e8f15e2a37b3e992 | [
"Apache-2.0"
] | 294 | 2020-09-27T17:20:50.000Z | 2021-06-23T01:44:09.000Z | server/organization/tests.py | NicholasNagy/ALTA | ca07627481ee91f2969b0fc8e8f15e2a37b3e992 | [
"Apache-2.0"
] | 10 | 2020-10-07T05:25:30.000Z | 2021-05-01T05:32:59.000Z | from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.test import APIClient
from django.db.models import signals
import factory
from user_account.models import CustomUser
from .models import Organization
| 45.985401 | 161 | 0.686667 |
35f470bfac10a58409ff19aa1d364eb85ab7359d | 1,656 | py | Python | src/mumblecode/convert.py | Mumbleskates/mumblecode | 0221c33a09df154bf80ece73ff907c51d2a971f0 | [
"MIT"
] | 1 | 2016-05-17T23:07:38.000Z | 2016-05-17T23:07:38.000Z | src/mumblecode/convert.py | Mumbleskates/mumblecode | 0221c33a09df154bf80ece73ff907c51d2a971f0 | [
"MIT"
] | null | null | null | src/mumblecode/convert.py | Mumbleskates/mumblecode | 0221c33a09df154bf80ece73ff907c51d2a971f0 | [
"MIT"
] | null | null | null | # coding=utf-8
from math import log2, ceil
# valid chars for a url path component: a-z A-Z 0-9 .-_~!$&'()*+,;=:@
# For the default set here (base 72) we have excluded $'();:@
radix_alphabet = ''.join(sorted(
"0123456789"
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
".-_~!&*+,="
))
radix = len(radix_alphabet)
radix_lookup = {ch: i for i, ch in enumerate(radix_alphabet)}
length_limit = ceil(128 / log2(radix)) # don't decode numbers much over 128 bits
# TODO: add radix alphabet as parameter
# TODO: fix format so length conveys m ore information (e.g. 0 and 00 and 000 are different with decimal alphabet)
def natural_to_url(n):
"""Accepts an int and returns a url-compatible string representing it"""
# map from signed int to positive int
url = ""
while n:
n, digit = divmod(n, radix)
url += radix_alphabet[digit]
return url or radix_alphabet[0]
def url_to_natural(url):
"""Accepts a string and extracts the int it represents in this radix encoding"""
if not url or len(url) > length_limit:
return None
n = 0
try:
for ch in reversed(url):
n = n * radix + radix_lookup[ch]
except KeyError:
return None
return n
| 24.352941 | 114 | 0.634662 |
35f52784cb920f6695ea0214e66ce046c4ba0969 | 961 | py | Python | flaskapp/routes.py | vijay0707/Send-Email-Flask | 3e8f981c5ef4c4051f61b5229eb3e56a35142bc7 | [
"MIT"
] | null | null | null | flaskapp/routes.py | vijay0707/Send-Email-Flask | 3e8f981c5ef4c4051f61b5229eb3e56a35142bc7 | [
"MIT"
] | null | null | null | flaskapp/routes.py | vijay0707/Send-Email-Flask | 3e8f981c5ef4c4051f61b5229eb3e56a35142bc7 | [
"MIT"
] | null | null | null | from flaskapp import app, db, mail
from flask import render_template, url_for
from flask import request, flash, redirect
# from flaskapp.model import User
from flaskapp.form import SurveyForm
from flask_mail import Message
| 32.033333 | 97 | 0.632674 |
35f622ff3fa5187c3265b7d1252636eaf5af175d | 5,708 | py | Python | tests/test_camera.py | Gokender/kameramera | 7ebd9a196809c1e7ab117bb11b90bcea8d1eb8e7 | [
"MIT"
] | null | null | null | tests/test_camera.py | Gokender/kameramera | 7ebd9a196809c1e7ab117bb11b90bcea8d1eb8e7 | [
"MIT"
] | null | null | null | tests/test_camera.py | Gokender/kameramera | 7ebd9a196809c1e7ab117bb11b90bcea8d1eb8e7 | [
"MIT"
] | null | null | null | import unittest
from kameramera import camera | 31.711111 | 82 | 0.637176 |
35f678cde08c5ff864121819c46adfa1fdba45f0 | 887 | py | Python | app/coordinates.py | krasch/simply_landmarks | 8a5c3f2ff476377e44646a00e61b8287a53260e3 | [
"MIT"
] | 14 | 2020-02-03T22:30:48.000Z | 2021-11-01T09:41:34.000Z | app/coordinates.py | krasch/simply_landmarks | 8a5c3f2ff476377e44646a00e61b8287a53260e3 | [
"MIT"
] | 3 | 2020-11-28T17:24:28.000Z | 2022-01-26T19:56:35.000Z | app/coordinates.py | krasch/simply_landmarks | 8a5c3f2ff476377e44646a00e61b8287a53260e3 | [
"MIT"
] | 4 | 2020-10-11T21:26:53.000Z | 2021-09-14T03:59:20.000Z | from pathlib import Path
from PIL import Image
# coordinates are sent as slightly weird URL parameters (e.g. 0.png?214,243)
# parse them, will crash server if they are coming in unexpected format
# image was not displayed in original size -> need to convert the coordinates
| 27.71875 | 77 | 0.67982 |
35f6bdfd466ccfcc3ec731821bd0d70b92cb5b92 | 2,851 | py | Python | lib/tool_images.py | KTingLee/image-training | c02c7caa81a55b61e935d07ead27bcaed468eb0a | [
"MIT"
] | null | null | null | lib/tool_images.py | KTingLee/image-training | c02c7caa81a55b61e935d07ead27bcaed468eb0a | [
"MIT"
] | 2 | 2021-01-22T09:10:33.000Z | 2021-01-22T14:22:09.000Z | lib/tool_images.py | KTingLee/image-training | c02c7caa81a55b61e935d07ead27bcaed468eb0a | [
"MIT"
] | 1 | 2021-01-22T08:56:34.000Z | 2021-01-22T08:56:34.000Z | import matplotlib.pyplot as plt
import numpy as np
import math
import cv2
kernel = np.ones((3, 3), np.int8)
#
#
#
# threshold1,2
#
#
if __name__ == '__main__':
pic = cv2.imread('../captcha_Images/0.png')
print(pic)
cv2.imshow('pic', pic)
cv2.waitKey(0)
erosion = eraseImage(pic)
blured = blurImage(erosion)
edged = edgedImage(blured)
dilated = dilateImage(edged)
charBox = getCharBox(dilated)
showCharBox(dilated, charBox)
dilated = dilateImage(edged, (4, 4))
chars = resizeImage(dilated, charBox)
# input("Press Enter to continue.")
# c = result[0][0][0][0]
# print(c)
# plt.plot(c)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 25.684685 | 88 | 0.62785 |
35f6e6f91f9e05d76fd7957364cd9c3157a56978 | 2,965 | py | Python | Code/geneset_testing.py | dylkot/EbolaSC | d363f9d2c10911f01c7b1d22fec2b192df2569b1 | [
"MIT"
] | 2 | 2020-09-28T09:27:33.000Z | 2021-01-04T09:16:42.000Z | Code/geneset_testing.py | dylkot/SC-Ebola | d363f9d2c10911f01c7b1d22fec2b192df2569b1 | [
"MIT"
] | null | null | null | Code/geneset_testing.py | dylkot/SC-Ebola | d363f9d2c10911f01c7b1d22fec2b192df2569b1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from scipy.stats import mannwhitneyu, fisher_exact, ranksums
def load_geneset(gmtfn, genes=None, minsize=0):
'''
Load genesets stored in gmt format (e.g. as provided by msigdb)
gmtfn : str
path to gmt file
genes : list, optional
only include genes in this input
minsize : int, optional
minimum geneset size to keep
Returns
-------
gsets : dict
gene_set_name : set of genes
allsetgenes : set
set of genes found in all genesets combined
'''
allsetgenes = set()
if genes is not None:
genes = set(genes)
gsets = {}
effect_min_size = minsize+2 # account for gset name cols
with open(gmtfn) as F:
for l in F.readlines():
words = [x for x in l.rstrip().split('\t')]
gsetname = words[0]
setgenes = words[2:]
if genes is not None:
setgenes = set(setgenes).intersection(genes)
else:
setgenes = set(setgenes[2:])
if len(setgenes) >= effect_min_size:
gsets[gsetname] = setgenes
allsetgenes = allsetgenes.union(setgenes)
return(gsets, allsetgenes)
| 29.949495 | 89 | 0.57774 |
35f85f5cb5fab6226fab7a5a01b0882ca5ca7ca9 | 54 | py | Python | tests/src/import_func.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
] | null | null | null | tests/src/import_func.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
] | 1 | 2022-03-12T20:41:21.000Z | 2022-03-13T06:34:30.000Z | tests/src/import_func.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
] | null | null | null | from testlib_a.main_a import print_name
print_name()
| 13.5 | 39 | 0.833333 |
35f901a5b14d9bb965c94938ad6cacba20eb8f77 | 2,167 | py | Python | nn_wtf/parameter_optimizers/brute_force_optimizer.py | lene/nn-wtf | 4696f143d936e0c0c127847e3bb1e93a6e756d35 | [
"Apache-2.0"
] | null | null | null | nn_wtf/parameter_optimizers/brute_force_optimizer.py | lene/nn-wtf | 4696f143d936e0c0c127847e3bb1e93a6e756d35 | [
"Apache-2.0"
] | 20 | 2016-02-20T12:43:04.000Z | 2016-12-23T13:57:25.000Z | nn_wtf/parameter_optimizers/brute_force_optimizer.py | lene/nn-wtf | 4696f143d936e0c0c127847e3bb1e93a6e756d35 | [
"Apache-2.0"
] | null | null | null | import pprint
from nn_wtf.parameter_optimizers.neural_network_optimizer import NeuralNetworkOptimizer
__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
| 38.696429 | 119 | 0.677434 |
35f926086eaca9043bf3f10e9c0ac0804430ebb4 | 1,856 | py | Python | tests/test_get_value.py | mdpiper/bmi-example-python | e6b1e9105daef44fe1f0adba5b857cde1bbd032a | [
"MIT"
] | 3 | 2020-10-20T08:59:19.000Z | 2021-10-18T17:57:06.000Z | tests/test_get_value.py | mdpiper/bmi-example-python | e6b1e9105daef44fe1f0adba5b857cde1bbd032a | [
"MIT"
] | 4 | 2019-04-19T20:07:15.000Z | 2021-01-28T23:34:35.000Z | tests/test_get_value.py | mdpiper/bmi-example-python | e6b1e9105daef44fe1f0adba5b857cde1bbd032a | [
"MIT"
] | 7 | 2020-08-05T17:25:34.000Z | 2021-09-08T21:38:33.000Z | #!/usr/bin/env python
from numpy.testing import assert_array_almost_equal, assert_array_less
import numpy as np
from heat import BmiHeat
| 24.746667 | 82 | 0.715517 |
35fac5891884a7fafbd906447065470f94dbe9cf | 9,158 | py | Python | tensorflow/dgm/exp.py | goldfarbDave/vcl | 24fb33a1dcadfa6c6cf5e9e9838b64f4fd23143a | [
"Apache-2.0"
] | null | null | null | tensorflow/dgm/exp.py | goldfarbDave/vcl | 24fb33a1dcadfa6c6cf5e9e9838b64f4fd23143a | [
"Apache-2.0"
] | null | null | null | tensorflow/dgm/exp.py | goldfarbDave/vcl | 24fb33a1dcadfa6c6cf5e9e9838b64f4fd23143a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
import sys, os
sys.path.extend(['alg/', 'models/'])
from visualisation import plot_images
from encoder_no_shared import encoder, recon
from utils import init_variables, save_params, load_params, load_data
from eval_test_ll import construct_eval_func
dimZ = 50
dimH = 500
n_channel = 128
batch_size = 50
lr = 1e-4
K_mc = 10
checkpoint = -1
if __name__ == '__main__':
data_name = str(sys.argv[1])
method = str(sys.argv[2])
assert method in ['noreg', 'laplace', 'ewc', 'si', 'onlinevi']
if method == 'onlinevi':
lbd = 1.0 # some placeholder, doesn't matter
else:
lbd = float(sys.argv[3])
main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd)
| 40.166667 | 102 | 0.597183 |
35fb641cc4c232d5e95579ae3bf4fec4904fbdf7 | 1,663 | py | Python | src/cltl/combot/infra/config/k8config.py | leolani/cltl-combot | 7008742ba9db782166f79322658a8cb49890d61b | [
"MIT"
] | 1 | 2020-11-21T18:53:22.000Z | 2020-11-21T18:53:22.000Z | src/cltl/combot/infra/config/k8config.py | leolani/cltl-combot | 7008742ba9db782166f79322658a8cb49890d61b | [
"MIT"
] | null | null | null | src/cltl/combot/infra/config/k8config.py | leolani/cltl-combot | 7008742ba9db782166f79322658a8cb49890d61b | [
"MIT"
] | null | null | null | import logging
import os
import cltl.combot.infra.config.local as local_config
logger = logging.getLogger(__name__)
K8_CONFIG_DIR = "/cltl_k8_config"
K8_CONFIG = "config/k8.config"
| 36.955556 | 116 | 0.710764 |
35fb6a7aec8441ab62bd7a834d5a31a1a31bbbcf | 17,640 | py | Python | act_map/scripts/exp_compare_diff_maps.py | debugCVML/rpg_information_field | 56f9ffba83aaee796502116e1cf651c5bc405bf6 | [
"MIT"
] | 149 | 2020-06-23T12:08:47.000Z | 2022-03-31T08:18:52.000Z | act_map/scripts/exp_compare_diff_maps.py | debugCVML/rpg_information_field | 56f9ffba83aaee796502116e1cf651c5bc405bf6 | [
"MIT"
] | 4 | 2020-08-28T07:51:15.000Z | 2021-04-09T13:18:49.000Z | act_map/scripts/exp_compare_diff_maps.py | debugCVML/rpg_information_field | 56f9ffba83aaee796502116e1cf651c5bc405bf6 | [
"MIT"
] | 34 | 2020-06-26T14:50:34.000Z | 2022-03-04T06:45:55.000Z | #!/usr/bin/env python
import os
import argparse
import yaml
import numpy as np
from colorama import init, Fore, Style
from matplotlib import rc
import matplotlib.pyplot as plt
import plot_utils as pu
init(autoreset=True)
rc('font', **{'serif': ['Cardo'], 'size': 20})
rc('text', usetex=True)
kMetrics = ['det', 'mineig', 'trace']
kMetricsLabels = ['$\det$', '$\lambda_{min}$', '${Tr}$']
kSecToUs = 1.0e6
kPallete = [
'blue', 'green', 'red', 'gold', 'purple', 'gray', 'cyan',
'midnightblue', 'lime', 'lightcoral', 'darkgoldenrod', 'violet', 'dimgray', 'darkorange',
'black'
]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--res_dir', required=True)
parser.add_argument('--analyze_config', required=True)
parser.add_argument('--save_dir', type=str, default='analysis_results')
parser.add_argument('--pc_res_key', type=str, default='quad_info_0p5')
parser.set_defaults(map_names=['quad_info', 'quad_trace', 'gp_info', 'gp_trace'])
args = parser.parse_args()
print(Fore.YELLOW + args.__dict__.__str__())
with open(args.analyze_config, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
print("Read configurations:\n{}".format(cfg))
map_names = []
map_nm_to_label = {}
for d in cfg['all_maps']:
map_nm_to_label.update(d)
for k in d:
map_names.append(k)
print(Fore.GREEN + "Maps to compare:\n- {}".format('\n- '.join(map_names)))
print(Fore.GREEN + "Labels:\n{}".format(map_nm_to_label))
fim_map_nms = [v for v in map_names if 'info' in v]
compare_orient_map_nms = [v for v in map_names if 'info' in v]
compare_cont_motion_map_nms = [v for v in map_names if 'info' in v]
print("Will analyze FIM for {}".format(fim_map_nms))
print("Will compare orientations for {}".format(compare_orient_map_nms))
print("Will compare cont. motion for {}".format(compare_cont_motion_map_nms))
save_dir = os.path.join(args.res_dir, args.save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
print(Fore.RED + "Save folder exists, will re-use and overwrite.")
print("Going to save to {}".format(save_dir))
map_nm_to_res = {}
for map_nm in map_names:
print(Fore.GREEN + "====> Reading {}...".format(map_nm))
map_nm_to_res[map_nm] = readResults(args.res_dir, map_nm)
print(Fore.YELLOW + Style.BRIGHT + "Start analysis.")
print(Fore.GREEN + "1. Table of complexity.")
_writeComplexityTable(map_nm_to_res, args.pc_res_key, map_names, map_nm_to_label,
os.path.join(save_dir, 'complexity_table.txt'))
print(Fore.GREEN + "2. FIM difference.")
map_nm_to_fim_diff_perc = _computeAndWriteFIMDiff(
map_nm_to_res, fim_map_nms, map_nm_to_label, save_dir)
_boxplotFIMDiffs(map_nm_to_fim_diff_perc, fim_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + "3. Optimal views.")
map_nm_to_orient_diff = _computeAndWriteOptimalOrientDiff(
map_nm_to_res, compare_orient_map_nms, map_nm_to_label, save_dir)
_boxplotOrientDiffs(map_nm_to_orient_diff, compare_orient_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + "4. Continous motion.")
_compareContinuousMotion(map_nm_to_res, compare_cont_motion_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + Style.BRIGHT + "Start processing specified subsets...")
sel_dir = os.path.join(save_dir, 'selected_results')
if not os.path.exists(sel_dir):
os.makedirs(sel_dir)
if 'sel_complexity_table_entries' in cfg:
print(Fore.GREEN + "- complexity table")
_writeComplexityTable(map_nm_to_res, args.pc_res_key, cfg['sel_complexity_table_entries'], map_nm_to_label, os.path.join(
sel_dir, 'complexity_table.txt'))
if 'sel_fro_norm_table_entries' in cfg:
print(Fore.GREEN + "- FIM diff. table")
sel_fim_nms = cfg['sel_fro_norm_table_entries']
sel_nm_to_fim_diff = _computeAndWriteFIMDiff(
map_nm_to_res, sel_fim_nms, map_nm_to_label, sel_dir)
_boxplotFIMDiffs(sel_nm_to_fim_diff, sel_fim_nms, map_nm_to_label, sel_dir)
if 'sel_hist_orient_entries' in cfg:
sel_orient_nms = cfg['sel_hist_orient_entries']
print(Fore.GREEN + "- Orientation diff.")
sel_nm_to_orient_diff = _computeAndWriteOptimalOrientDiff(
map_nm_to_res, sel_orient_nms, map_nm_to_label, sel_dir)
_boxplotOrientDiffs(sel_nm_to_orient_diff, sel_orient_nms, map_nm_to_label, sel_dir)
if 'sel_cont_motion_plot' in cfg:
print(Fore.GREEN + "- continuous motion")
_compareContinuousMotion(
map_nm_to_res, cfg['sel_cont_motion_plot'], map_nm_to_label, sel_dir)
| 41.505882 | 129 | 0.626361 |
35fbe8e8b4f1e1aa102f85306945ce878960b4de | 52 | py | Python | tests/conftest.py | grintor/Hello-Wolrd-CI | 1f1b8c40f55d0b35cd73601ed90567a84abf03db | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | grintor/Hello-Wolrd-CI | 1f1b8c40f55d0b35cd73601ed90567a84abf03db | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | grintor/Hello-Wolrd-CI | 1f1b8c40f55d0b35cd73601ed90567a84abf03db | [
"Apache-2.0"
] | null | null | null | # see: https://stackoverflow.com/a/34520971/3238695
| 26 | 51 | 0.769231 |
35fc69cf4551ec557452a3db41e67d9efead2ebf | 1,318 | py | Python | Files/SpeechRecognition/speechDandR.py | JahnaviDoneria/HomeAutomationSystem | 0419ba4a0fefd16b9a5c7a19fef7897d76850dc2 | [
"MIT"
] | null | null | null | Files/SpeechRecognition/speechDandR.py | JahnaviDoneria/HomeAutomationSystem | 0419ba4a0fefd16b9a5c7a19fef7897d76850dc2 | [
"MIT"
] | null | null | null | Files/SpeechRecognition/speechDandR.py | JahnaviDoneria/HomeAutomationSystem | 0419ba4a0fefd16b9a5c7a19fef7897d76850dc2 | [
"MIT"
] | 1 | 2020-01-20T13:04:55.000Z | 2020-01-20T13:04:55.000Z | import json
import apiai
import speech_recognition as sr
#speechRecognition()
| 23.122807 | 63 | 0.651745 |
35fcbb05f8e3b57b8ab5311822807b3114647a9f | 4,667 | py | Python | mylib/dataset/coco.py | duducheng/deeplabv3p_gluon | fd8e3e8d834838a9a221785b825499c62cee578f | [
"Apache-2.0"
] | 66 | 2018-07-20T04:01:41.000Z | 2021-11-08T10:40:49.000Z | mylib/dataset/coco.py | duducheng/deeplabv3p_gluon | fd8e3e8d834838a9a221785b825499c62cee578f | [
"Apache-2.0"
] | 6 | 2018-08-16T08:06:39.000Z | 2020-11-28T13:07:21.000Z | mylib/dataset/coco.py | duducheng/deeplabv3p_gluon | fd8e3e8d834838a9a221785b825499c62cee578f | [
"Apache-2.0"
] | 11 | 2018-07-20T18:00:29.000Z | 2020-04-28T15:21:58.000Z | # raise NotImplementedError("Did not check!")
"""MSCOCO Semantic Segmentation pretraining for VOC."""
import os
from tqdm import trange
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import pickle
from gluoncv.data.segbase import SegmentationDataset
| 40.582609 | 92 | 0.555817 |
35fd4da34b0954ed2f821de46d87379191733efa | 1,045 | py | Python | find_other_news_sources.py | sr33/OtherNewsSources | 17857381a5690d5e89d4a034f1fc60f61c2377dc | [
"MIT"
] | 10 | 2015-07-17T09:57:38.000Z | 2020-05-24T20:09:20.000Z | find_other_news_sources.py | sr33/OtherNewsSources | 17857381a5690d5e89d4a034f1fc60f61c2377dc | [
"MIT"
] | null | null | null | find_other_news_sources.py | sr33/OtherNewsSources | 17857381a5690d5e89d4a034f1fc60f61c2377dc | [
"MIT"
] | null | null | null | # __author__ = 'sree'
import urllib2
from lxml import html
import requests
| 40.192308 | 119 | 0.702392 |
35fda7f9b73a414c879824f59fa81da72f267f5a | 35,235 | py | Python | code/client/munkilib/adobeutils/adobeinfo.py | Rippling/munki | 115832687d4411ca825202ec82d9a27053fef7c8 | [
"Apache-2.0"
] | 1 | 2021-10-06T12:56:14.000Z | 2021-10-06T12:56:14.000Z | code/client/munkilib/adobeutils/adobeinfo.py | Rippling/munki | 115832687d4411ca825202ec82d9a27053fef7c8 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/adobeutils/adobeinfo.py | Rippling/munki | 115832687d4411ca825202ec82d9a27053fef7c8 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
# Copyright 2009-2020 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
adobeutils.adobeinfo
Created by Greg Neagle on 2017-01-06.
Utilities to get info about Adobe installers/uninstallers
"""
from __future__ import absolute_import, print_function
import os
import json
import sqlite3
from glob import glob
from xml.dom import minidom
from .. import osutils
from .. import pkgutils
def find_install_app(dirpath):
'''Searches dirpath and enclosed directories for Install.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Install.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Install")
if os.path.exists(setup_path):
return setup_path
return ''
def find_setup_app(dirpath):
'''Search dirpath and enclosed directories for Setup.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Setup.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Setup")
if os.path.exists(setup_path):
return setup_path
return ''
def find_adobepatchinstaller_app(dirpath):
'''Searches dirpath and enclosed directories for AdobePatchInstaller.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("AdobePatchInstaller.app"):
setup_path = os.path.join(
path, "Contents", "MacOS", "AdobePatchInstaller")
if os.path.exists(setup_path):
return setup_path
return ''
def find_adobe_deployment_manager(dirpath):
'''Searches dirpath and enclosed directories for AdobeDeploymentManager.
Returns path to the executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("pkg/Contents/Resources"):
dm_path = os.path.join(path, "AdobeDeploymentManager")
if os.path.exists(dm_path):
return dm_path
return ''
def find_acrobat_patch_app(dirpath):
'''Attempts to find an AcrobatPro patching application
in dirpath. If found, returns the path to the bundled
patching script.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith(".app"):
# look for Adobe's patching script
patch_script_path = os.path.join(
path, 'Contents', 'Resources', 'ApplyOperation.py')
if os.path.exists(patch_script_path):
return path
return ''
def get_payload_info(dirpath):
'''Parses Adobe payloads, pulling out info useful to munki.
.proxy.xml files are used if available, or for CC-era updates
which do not contain one, the Media_db.db file, which contains
identical XML, is instead used.
CS3/CS4: contain only .proxy.xml
CS5/CS5.5/CS6: contain both
CC: contain only Media_db.db'''
payloadinfo = {}
# look for .proxy.xml file dir
if os.path.isdir(dirpath):
proxy_paths = glob(os.path.join(dirpath, '*.proxy.xml'))
if proxy_paths:
xmlpath = proxy_paths[0]
dom = minidom.parse(xmlpath)
# if there's no .proxy.xml we should hope there's a Media_db.db
else:
db_path = os.path.join(dirpath, 'Media_db.db')
if os.path.exists(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute("SELECT value FROM PayloadData WHERE "
"PayloadData.key = 'PayloadInfo'")
result = cur.fetchone()
cur.close()
if result:
info_xml = result[0].encode('UTF-8')
dom = minidom.parseString(info_xml)
else:
# no xml, no db, no payload info!
return payloadinfo
payload_info = dom.getElementsByTagName('PayloadInfo')
if payload_info:
installer_properties = payload_info[0].getElementsByTagName(
'InstallerProperties')
if installer_properties:
properties = installer_properties[0].getElementsByTagName(
'Property')
for prop in properties:
if 'name' in list(prop.attributes.keys()):
propname = prop.attributes['name'].value.encode('UTF-8')
propvalue = ''
for node in prop.childNodes:
propvalue += node.nodeValue
if propname == 'AdobeCode':
payloadinfo['AdobeCode'] = propvalue
if propname == 'ProductName':
payloadinfo['display_name'] = propvalue
if propname == 'ProductVersion':
payloadinfo['version'] = propvalue
installmetadata = payload_info[0].getElementsByTagName(
'InstallDestinationMetadata')
if installmetadata:
totalsizes = installmetadata[0].getElementsByTagName(
'TotalSize')
if totalsizes:
installsize = ''
for node in totalsizes[0].childNodes:
installsize += node.nodeValue
payloadinfo['installed_size'] = int(int(installsize)/1024)
return payloadinfo
def get_adobe_setup_info(installroot):
'''Given the root of mounted Adobe DMG,
look for info about the installer or updater'''
info = {}
payloads = []
# look for all the payloads folders
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith('/payloads'):
driverfolder = ''
media_signature = ''
setupxml = os.path.join(path, 'setup.xml')
if os.path.exists(setupxml):
dom = minidom.parse(setupxml)
drivers = dom.getElementsByTagName('Driver')
if drivers:
driver = drivers[0]
if 'folder' in list(driver.attributes.keys()):
driverfolder = driver.attributes[
'folder'].value.encode('UTF-8')
if driverfolder == '':
# look for mediaSignature (CS5 AAMEE install)
setup_elements = dom.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = setup_elements[
0].getElementsByTagName('mediaSignature')
if media_signature_elements:
element = media_signature_elements[0]
for node in element.childNodes:
media_signature += node.nodeValue
for item in osutils.listdir(path):
payloadpath = os.path.join(path, item)
payloadinfo = get_payload_info(payloadpath)
if payloadinfo:
payloads.append(payloadinfo)
if ((driverfolder and item == driverfolder) or
(media_signature and
payloadinfo['AdobeCode'] == media_signature)):
info['display_name'] = payloadinfo['display_name']
info['version'] = payloadinfo['version']
info['AdobeSetupType'] = 'ProductInstall'
if not payloads:
# look for an extensions folder; almost certainly this is an Updater
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith("/extensions"):
for item in osutils.listdir(path):
#skip LanguagePacks
if item.find("LanguagePack") == -1:
itempath = os.path.join(path, item)
payloadinfo = get_payload_info(itempath)
if payloadinfo:
payloads.append(payloadinfo)
# we found an extensions dir,
# so no need to keep walking the install root
break
if payloads:
if len(payloads) == 1:
info['display_name'] = payloads[0]['display_name']
info['version'] = payloads[0]['version']
else:
if 'display_name' not in info:
info['display_name'] = "ADMIN: choose from payloads"
if 'version' not in info:
info['version'] = "ADMIN please set me"
info['payloads'] = payloads
installed_size = 0
for payload in payloads:
installed_size = installed_size + payload.get('installed_size', 0)
info['installed_size'] = installed_size
return info
def get_adobe_package_info(installroot):
'''Gets the package name from the AdobeUberInstaller.xml file;
other info from the payloads folder'''
info = get_adobe_setup_info(installroot)
info['description'] = ""
installerxml = os.path.join(installroot, "AdobeUberInstaller.xml")
if os.path.exists(installerxml):
description = ''
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
packagedescriptions = \
installinfo[0].getElementsByTagName("PackageDescription")
if packagedescriptions:
prop = packagedescriptions[0]
for node in prop.childNodes:
description += node.nodeValue
if description:
description_parts = description.split(' : ', 1)
info['display_name'] = description_parts[0]
if len(description_parts) > 1:
info['description'] = description_parts[1]
else:
info['description'] = ""
return info
else:
installerxml = os.path.join(installroot, "optionXML.xml")
if os.path.exists(installerxml):
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
pkgname_elems = installinfo[0].getElementsByTagName(
"PackageName")
if pkgname_elems:
prop = pkgname_elems[0]
pkgname = ""
for node in prop.childNodes:
pkgname += node.nodeValue
info['display_name'] = pkgname
if not info.get('display_name'):
info['display_name'] = os.path.basename(installroot)
return info
def get_xml_text_element(dom_node, name):
'''Returns the text value of the first item found with the given
tagname'''
value = None
subelements = dom_node.getElementsByTagName(name)
if subelements:
value = ''
for node in subelements[0].childNodes:
value += node.nodeValue
return value
def parse_option_xml(option_xml_file):
'''Parses an optionXML.xml file and pulls the items of interest, returning
them in a dictionary'''
info = {}
dom = minidom.parse(option_xml_file)
installinfo = dom.getElementsByTagName('InstallInfo')
if installinfo:
if 'id' in list(installinfo[0].attributes.keys()):
info['packager_id'] = installinfo[0].attributes['id'].value
if 'version' in list(installinfo[0].attributes.keys()):
info['packager_version'] = installinfo[
0].attributes['version'].value
info['package_name'] = get_xml_text_element(
installinfo[0], 'PackageName')
info['package_id'] = get_xml_text_element(installinfo[0], 'PackageID')
info['products'] = []
# CS5 to CC 2015.0-2015.2 releases use RIBS, and we retrieve a
# display name, version and 'mediaSignature' for building installs
# items. SAPCode is also stored so that we can later search by this
# key across both RIBS and HyperDrive installer metadata.
medias_elements = installinfo[0].getElementsByTagName('Medias')
if medias_elements:
media_elements = medias_elements[0].getElementsByTagName('Media')
if media_elements:
for media in media_elements:
product = {}
product['prodName'] = get_xml_text_element(
media, 'prodName')
product['prodVersion'] = get_xml_text_element(
media, 'prodVersion')
product['SAPCode'] = get_xml_text_element(media, 'SAPCode')
setup_elements = media.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = setup_elements[
0].getElementsByTagName('mediaSignature')
if media_signature_elements:
product['mediaSignature'] = ''
element = media_signature_elements[0]
for node in element.childNodes:
product['mediaSignature'] += node.nodeValue
info['products'].append(product)
# HD (HyperDrive) media for new mid-June 2016 products. We need the
# SAP codes, versions, and which ones are MediaType 'Product'. Support
# payloads seem to all be 'STI', and are listed as STIDependencies under
# the main product.
hd_medias_elements = installinfo[0].getElementsByTagName('HDMedias')
if hd_medias_elements:
hd_media_elements = hd_medias_elements[0].getElementsByTagName(
'HDMedia')
if hd_media_elements:
for hd_media in hd_media_elements:
product = {}
product['hd_installer'] = True
# productVersion is the 'full' version number
# prodVersion seems to be the "customer-facing" version for
# this update
# baseVersion is the first/base version for this standalone
# product/channel/LEID,
# not really needed here so we don't copy it
for elem in [
'mediaLEID',
'prodVersion',
'productVersion',
'SAPCode',
'MediaType',
'TargetFolderName']:
product[elem] = get_xml_text_element(hd_media, elem)
info['products'].append(product)
return info
def get_hd_installer_info(hd_payload_root, sap_code):
'''Attempts to extract some information from a HyperDrive payload
application.json file and return a reduced set in a dict'''
hd_app_info = {}
app_json_path = os.path.join(hd_payload_root, sap_code, 'Application.json')
json_info = json.loads(open(app_json_path, 'r').read())
# Copy some useful top-level keys, useful later for:
# - Name: display_name pkginfo key
# - ProductVersion: version pkginfo key and uninstall XML location
# - SAPCode: an uninstallXml for an installs item if it's a 'core' Type
# - BaseVersion and version: not currently used but may be useful once
# there are more HD installers in the future
for key in ['BaseVersion', 'Name', 'ProductVersion', 'SAPCode', 'version']:
hd_app_info[key] = json_info[key]
hd_app_info['SAPCode'] = json_info['SAPCode']
# Adobe puts an array of dicts in a dict with one key called 'Package'
pkgs = [pkg for pkg in json_info['Packages']['Package']]
hd_app_info['Packages'] = pkgs
return hd_app_info
def get_cs5_media_signature(dirpath):
'''Returns the CS5 mediaSignature for an AAMEE CS5 install.
dirpath is typically the root of a mounted dmg'''
payloads_dir = ""
# look for a payloads folder
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith('/payloads'):
payloads_dir = path
# return empty-handed if we didn't find a payloads folder
if not payloads_dir:
return ''
# now look for setup.xml
setupxml = os.path.join(payloads_dir, 'Setup.xml')
if os.path.exists(setupxml) and os.path.isfile(setupxml):
# parse the XML
dom = minidom.parse(setupxml)
setup_elements = dom.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = (
setup_elements[0].getElementsByTagName('mediaSignature'))
if media_signature_elements:
element = media_signature_elements[0]
elementvalue = ''
for node in element.childNodes:
elementvalue += node.nodeValue
return elementvalue
return ""
def get_cs5_uninstall_xml(option_xml_file):
'''Gets the uninstall deployment data from a CS5 installer'''
xml = ''
dom = minidom.parse(option_xml_file)
deployment_info = dom.getElementsByTagName('DeploymentInfo')
if deployment_info:
for info_item in deployment_info:
deployment_uninstall = info_item.getElementsByTagName(
'DeploymentUninstall')
if deployment_uninstall:
deployment_data = deployment_uninstall[0].getElementsByTagName(
'Deployment')
if deployment_data:
deployment = deployment_data[0]
xml += deployment.toxml('UTF-8')
return xml
def count_payloads(dirpath):
'''Attempts to count the payloads in the Adobe installation item.
Used for rough percent-done progress feedback.'''
count = 0
for (path, dummy_dirs, files) in os.walk(dirpath):
if path.endswith("/payloads"):
# RIBS-style installers
for subitem in osutils.listdir(path):
subitempath = os.path.join(path, subitem)
if os.path.isdir(subitempath):
count = count + 1
elif "/HD/" in path and "Application.json" in files:
# we're inside an HD installer directory. The payloads/packages
# are .zip files
zip_file_count = len(
[item for item in files if item.endswith(".zip")])
count = count + zip_file_count
return count
def get_adobe_install_info(installdir):
'''Encapsulates info used by the Adobe Setup/Install app.'''
adobe_install_info = {}
if installdir:
adobe_install_info['media_signature'] = get_cs5_media_signature(
installdir)
adobe_install_info['payload_count'] = count_payloads(installdir)
option_xml_file = os.path.join(installdir, "optionXML.xml")
if os.path.exists(option_xml_file):
adobe_install_info['uninstallxml'] = get_cs5_uninstall_xml(
option_xml_file)
return adobe_install_info
# Disable PyLint complaining about 'invalid' camelCase names
# pylint: disable=invalid-name
def getAdobeCatalogInfo(mountpoint, pkgname=""):
'''Used by makepkginfo to build pkginfo data for Adobe
installers/updaters'''
# look for AdobeDeploymentManager (AAMEE installer)
deploymentmanager = find_adobe_deployment_manager(mountpoint)
if deploymentmanager:
dirpath = os.path.dirname(deploymentmanager)
option_xml_file = os.path.join(dirpath, 'optionXML.xml')
option_xml_info = {}
if os.path.exists(option_xml_file):
option_xml_info = parse_option_xml(option_xml_file)
cataloginfo = get_adobe_package_info(dirpath)
if cataloginfo:
# add some more data
if option_xml_info.get('packager_id') == u'CloudPackager':
# CCP package
cataloginfo['display_name'] = option_xml_info.get(
'package_name', 'unknown')
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCCPUninstaller"
cataloginfo['installer_type'] = "AdobeCCPInstaller"
cataloginfo['minimum_os_version'] = "10.6.8"
mediasignatures = [
item['mediaSignature']
for item in option_xml_info.get('products', [])
if 'mediaSignature' in item]
else:
# AAMEE package
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCS5AAMEEPackage"
cataloginfo['installer_type'] = "AdobeCS5AAMEEPackage"
cataloginfo['minimum_os_version'] = "10.5.0"
cataloginfo['adobe_install_info'] = get_adobe_install_info(
installdir=dirpath)
mediasignature = cataloginfo['adobe_install_info'].get(
"media_signature")
mediasignatures = [mediasignature]
# Determine whether we have HD media as well in this installer
hd_metadata_dirs = [
product['TargetFolderName']
for product in option_xml_info['products']
if product.get('hd_installer')]
hd_app_infos = []
for sap_code in hd_metadata_dirs:
hd_app_info = get_hd_installer_info(
os.path.join(dirpath, 'HD'), sap_code)
hd_app_infos.append(hd_app_info)
# 'installs' array will be populated if we have either RIBS
# or HD installers, which may be mixed together in one
# CCP package.
# Acrobat Pro DC doesn't currently generate any useful installs
# info if it's part of a CCP package.
installs = []
# media signatures are used for RIBS (CS5 to CC mid-2015)
if mediasignatures:
# make a default <key>installs</key> array
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
for mediasignature in mediasignatures:
signaturefile = mediasignature + ".db"
filepath = os.path.join(uninstalldir, signaturefile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
# Custom installs items for HD installers seem to need only HDMedias
# from optionXML.xml with a MediaType of 'Product' and their
# 'core' packages (e.g. language packs are 'non-core')
if hd_app_infos:
if 'payloads' not in cataloginfo:
cataloginfo['payloads'] = []
cataloginfo['payloads'].extend(hd_app_infos)
# Calculate installed_size by counting packages in payloads
# in these indexed HD medias. installed_size may exist already
# if this package contained RIBS payloads, so try reading it
# and default to 0. This will typically include several very
# small packages (language or regional recommended settings)
# which would not actually get installed. These seem to be
# no larger than a few MB, so in practice it increases the
# 'installed_size' value by only ~1%.
installed_size = cataloginfo.get('installed_size', 0)
for hd_payload in hd_app_infos:
for package in hd_payload['Packages']:
# Generally, all app installs will include 1-3 'core'
# packages and then additional language/settings/color
# packages which are regional or language-specific.
# If we filter this by including both unconditional
# installs and those which are language/region specific,
# we get a rough approximation of the total size of
# supplemental packages, as their equivalents for other
# languages are very close to the same size. We also
# get one included language package which would be the
# case for any install.
#
# Because InDesign CC 2017 is not like any other package
# and contains a 'Condition' key but as an empty
# string, we explicitly test this case as well.
if ('Condition' not in list(package.keys()) or
package.get('Condition') == '' or
'[installLanguage]==en_US' in
package.get('Condition', '')):
installed_size += int(package.get(
'ExtractSize', 0) / 1024)
# We get much closer to Adobe's "HDSetup" calculated
# install space requirement if we include both the
# DownloadSize and ExtractSize data
# (DownloadSize is just the zip file size)
installed_size += int(package.get(
'DownloadSize', 0) / 1024)
# Add another 300MB for the CC app and plumbing in case they've
# never been installed on the system
installed_size += 307200
cataloginfo['installed_size'] = installed_size
uninstalldir = (
'/Library/Application Support/Adobe/Installers/uninstallXml'
)
product_saps = [
prod['SAPCode'] for
prod in option_xml_info['products']
if prod.get('MediaType') == 'Product'
]
product_app_infos = [app for app in hd_app_infos
if app['SAPCode'] in product_saps]
# if we had only a single HD and no legacy apps, set a sane
# version and display_name derived from the app's metadata
if (len(product_app_infos) == 1) and not mediasignatures:
cataloginfo.update({
'display_name': product_app_infos[0]['Name'],
'version': product_app_infos[0]['ProductVersion'],
})
for app_info in product_app_infos:
for pkg in app_info['Packages']:
# Don't assume 'Type' key always exists. At least the
#'AdobeIllustrator20-Settings'
# package doesn't have this key set.
if pkg.get('Type') == 'core':
# We can't use 'ProductVersion' from
# Application.json for the part following the
# SAPCode, because it's usually too specific and
# won't match the "short" product version.
# We can take 'prodVersion' from the optionXML.xml
# instead.
# We filter out any non-HD installers to avoid
# matching up the wrong versions for packages that
# may contain multiple different major versions of
# a given SAPCode
pkg_prod_vers = [
prod['prodVersion']
for prod in option_xml_info['products']
if prod.get('hd_installer') and
prod['SAPCode'] == app_info['SAPCode']][0]
uninstall_file_name = '_'.join([
app_info['SAPCode'],
pkg_prod_vers.replace('.', '_'),
pkg['PackageName'],
pkg['PackageVersion']]) + '.pimx'
filepath = os.path.join(
uninstalldir, uninstall_file_name)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
if installs:
cataloginfo['installs'] = installs
return cataloginfo
# Look for Install.app (Bare metal CS5 install)
# we don't handle this type, but we'll report it
# back so makepkginfo can provide an error message
# installapp = find_install_app(mountpoint)
# if installapp:
# cataloginfo = {}
# cataloginfo['installer_type'] = "AdobeCS5Installer"
# return cataloginfo
# Look for AdobePatchInstaller.app (CS5 updater)
installapp = find_adobepatchinstaller_app(mountpoint)
if os.path.exists(installapp):
# this is a CS5 updater disk image
cataloginfo = get_adobe_package_info(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = False
cataloginfo['installer_type'] = "AdobeCS5PatchInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
# make some (hopefully functional) installs items from the payloads
installs = []
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
# first look for a payload with a display_name matching the
# overall display_name
for payload in cataloginfo.get('payloads', []):
if (payload.get('display_name', '') ==
cataloginfo['display_name']):
if 'AdobeCode' in payload:
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
break
if installs == []:
# didn't find a payload with matching name
# just add all of the non-LangPack payloads
# to the installs list.
for payload in cataloginfo.get('payloads', []):
if 'AdobeCode' in payload:
if ("LangPack" in payload.get("display_name") or
"Language Files" in payload.get(
"display_name")):
# skip Language Packs
continue
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for AdobeUberInstaller items (CS4 install)
pkgroot = os.path.join(mountpoint, pkgname)
adobeinstallxml = os.path.join(pkgroot, "AdobeUberInstaller.xml")
if os.path.exists(adobeinstallxml):
# this is a CS4 Enterprise Deployment package
cataloginfo = get_adobe_package_info(pkgroot)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeUberUninstaller"
cataloginfo['installer_type'] = "AdobeUberInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
return cataloginfo
# maybe this is an Adobe update DMG or CS3 installer
# look for Adobe Setup.app
setuppath = find_setup_app(mountpoint)
if setuppath:
cataloginfo = get_adobe_setup_info(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['installer_type'] = "AdobeSetup"
if cataloginfo.get('AdobeSetupType') == "ProductInstall":
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeSetup"
else:
cataloginfo['description'] = "Adobe updater"
cataloginfo['uninstallable'] = False
cataloginfo['update_for'] = ["PleaseEditMe-1.0.0.0.0"]
return cataloginfo
# maybe this is an Adobe Acrobat 9 Pro patcher?
acrobatpatcherapp = find_acrobat_patch_app(mountpoint)
if acrobatpatcherapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeAcrobatUpdater"
cataloginfo['uninstallable'] = False
plist = pkgutils.getBundleInfo(acrobatpatcherapp)
cataloginfo['version'] = pkgutils.getVersionString(plist)
cataloginfo['name'] = "AcrobatPro9Update"
cataloginfo['display_name'] = "Adobe Acrobat Pro Update"
cataloginfo['update_for'] = ["AcrobatPro9"]
cataloginfo['RestartAction'] = 'RequireLogout'
cataloginfo['requires'] = []
cataloginfo['installs'] = [
{'CFBundleIdentifier': 'com.adobe.Acrobat.Pro',
'CFBundleName': 'Acrobat',
'CFBundleShortVersionString': cataloginfo['version'],
'path': '/Applications/Adobe Acrobat 9 Pro/Adobe Acrobat Pro.app',
'type': 'application'}
]
return cataloginfo
# didn't find any Adobe installers/updaters we understand
return None
# pylint: enable=invalid-name
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| 44.657795 | 80 | 0.565319 |
35fe055b65de9e34581ebd9b036ec7f195d41986 | 645 | py | Python | mandrel/config/helpers.py | gf-atebbe/python-mandrel | 64b90e3265a522ff72019960752bcc716533347f | [
"MIT"
] | null | null | null | mandrel/config/helpers.py | gf-atebbe/python-mandrel | 64b90e3265a522ff72019960752bcc716533347f | [
"MIT"
] | null | null | null | mandrel/config/helpers.py | gf-atebbe/python-mandrel | 64b90e3265a522ff72019960752bcc716533347f | [
"MIT"
] | null | null | null | from .. import util
| 25.8 | 81 | 0.626357 |
35ff001cebfbaa2f16c6208ca4d5a99ce422a736 | 1,606 | py | Python | Components/MoveComponent.py | RuoxiQin/Unmanned-Aerial-Vehicle-Tracking | 49a0a32abcce42fc6bf9e71f5b098ec708373153 | [
"Apache-2.0"
] | 13 | 2018-06-16T12:52:18.000Z | 2021-08-14T02:43:24.000Z | Components/MoveComponent.py | RuoxiQin/Unmanned-Aerial-Vehicle-Tracking | 49a0a32abcce42fc6bf9e71f5b098ec708373153 | [
"Apache-2.0"
] | null | null | null | Components/MoveComponent.py | RuoxiQin/Unmanned-Aerial-Vehicle-Tracking | 49a0a32abcce42fc6bf9e71f5b098ec708373153 | [
"Apache-2.0"
] | 6 | 2019-06-20T21:06:01.000Z | 2021-08-14T02:43:28.000Z | #!/usr/bin/python
#-*-coding:utf-8-*-
from Component import Component
| 34.170213 | 104 | 0.52802 |
35ff5a9fe6f25456cafae5f86dcd151f7638267e | 35,016 | py | Python | poshc2/server/Tasks.py | slackr/PoshC2 | d4804f1f534dac53b95dd6dd6578431beaf79360 | [
"BSD-3-Clause"
] | 1,504 | 2016-07-12T04:14:00.000Z | 2022-03-31T02:59:30.000Z | poshc2/server/Tasks.py | PhilKeeble/PoshC2 | 498b30097e12e46b5aa454feaeaa4bbae3c04c0d | [
"BSD-3-Clause"
] | 139 | 2016-10-13T10:41:18.000Z | 2022-03-31T13:22:47.000Z | poshc2/server/Tasks.py | PhilKeeble/PoshC2 | 498b30097e12e46b5aa454feaeaa4bbae3c04c0d | [
"BSD-3-Clause"
] | 377 | 2016-07-12T03:10:03.000Z | 2022-03-31T10:04:13.000Z | import datetime, hashlib, base64, traceback, os, re
import poshc2.server.database.DB as DB
from poshc2.Colours import Colours
from poshc2.server.Config import ModulesDirectory, DownloadsDirectory, ReportsDirectory
from poshc2.server.Implant import Implant
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import translate_power_status
from poshc2.Utils import randomuri
| 64.486188 | 474 | 0.499714 |
c4005a008048988474573247edb485bd20d1bb6d | 1,029 | py | Python | Leetcode/89.grayCode.py | Song2017/Leetcode_python | 99d9f3cec0e47ddab6ec107392a6b33bf6c1d046 | [
"MIT"
] | 1 | 2019-05-14T00:55:30.000Z | 2019-05-14T00:55:30.000Z | LeetcodeView/89.grayCode.md | Song2017/Leetcode_python | 99d9f3cec0e47ddab6ec107392a6b33bf6c1d046 | [
"MIT"
] | null | null | null | LeetcodeView/89.grayCode.md | Song2017/Leetcode_python | 99d9f3cec0e47ddab6ec107392a6b33bf6c1d046 | [
"MIT"
] | null | null | null |
s = Solution()
print(s.grayCode(3))
| 25.725 | 62 | 0.433431 |