hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
1c9707dc1574081d46ce438a0fbd3d659ca252fc
7,985
py
Python
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
yavik-kapadia/openverse-catalog
853766f2176a96450f456a9fd6675e134c0866e1
[ "MIT" ]
25
2021-05-06T20:53:45.000Z
2022-03-30T23:18:50.000Z
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
yavik-kapadia/openverse-catalog
853766f2176a96450f456a9fd6675e134c0866e1
[ "MIT" ]
272
2021-05-17T05:53:00.000Z
2022-03-31T23:57:20.000Z
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
yavik-kapadia/openverse-catalog
853766f2176a96450f456a9fd6675e134c0866e1
[ "MIT" ]
13
2021-06-12T07:09:06.000Z
2022-03-29T17:39:13.000Z
import logging from common.licenses import get_license_info from common.loader import provider_details as prov from common.requester import DelayedRequester from common.storage.image import ImageStore logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s: %(message)s", level=logging.INFO ) logger = logging.getLogger(__name__) LIMIT = 100 DELAY = 5.0 RETRIES = 3 PROVIDER = prov.SCIENCE_DEFAULT_PROVIDER ENDPOINT = "https://collection.sciencemuseumgroup.org.uk/search/" delay_request = DelayedRequester(delay=DELAY) image_store = ImageStore(provider=PROVIDER) HEADERS = {"Accept": "application/json"} DEFAULT_QUERY_PARAMS = { "has_image": 1, "image_license": "CC", "page[size]": LIMIT, "page[number]": 0, "date[from]": 0, "date[to]": 1500, } YEAR_RANGE = [ (0, 1500), (1500, 1750), (1750, 1825), (1825, 1850), (1850, 1875), (1875, 1900), (1900, 1915), (1915, 1940), (1940, 1965), (1965, 1990), (1990, 2020), ] # global variable to keep track of records pulled RECORD_IDS = [] if __name__ == "__main__": main()
30.830116
87
0.628053
1c97af3344054a3843093ee257c735adccd419f3
1,089
py
Python
digitaltape.py
heerdyes/tapegame
d6e0c6f81fe9c7c85a54edbd037be318ff7ed391
[ "Artistic-2.0" ]
null
null
null
digitaltape.py
heerdyes/tapegame
d6e0c6f81fe9c7c85a54edbd037be318ff7ed391
[ "Artistic-2.0" ]
null
null
null
digitaltape.py
heerdyes/tapegame
d6e0c6f81fe9c7c85a54edbd037be318ff7ed391
[ "Artistic-2.0" ]
null
null
null
# tape variables TS_MAX=1000 # the digital tape model
26.560976
86
0.602388
1c980836374b3fb5fedf0a12599c8c546395b546
422
py
Python
webhook-cdk/lambda/vars.py
ncalteen/github-webhook-lambda-example
414daf1a70343abf207ff37dc4a9d65d6892197d
[ "MIT" ]
null
null
null
webhook-cdk/lambda/vars.py
ncalteen/github-webhook-lambda-example
414daf1a70343abf207ff37dc4a9d65d6892197d
[ "MIT" ]
null
null
null
webhook-cdk/lambda/vars.py
ncalteen/github-webhook-lambda-example
414daf1a70343abf207ff37dc4a9d65d6892197d
[ "MIT" ]
1
2022-03-29T14:42:25.000Z
2022-03-29T14:42:25.000Z
import json # Output must be returned in the format mentioned below: # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format lambda_response = { "isBase64Encoded": False, "statusCode": 200, "headers": { "Content-Type": "application/json", }, "body": json.dumps({ "Status": "OK" }) }
26.375
152
0.668246
1c988d19204c6f421dff8e8f0c696fe6f0e5ec4f
3,737
py
Python
gym_unblockme/envs/unblockme_render.py
fedingo/gym-unblockme
a4dd20a7608122e09862d681259111e2634f3d4b
[ "MIT" ]
3
2019-02-12T15:53:17.000Z
2019-07-03T12:00:32.000Z
gym_unblockme/envs/unblockme_render.py
fedingo/gym-unblockme
a4dd20a7608122e09862d681259111e2634f3d4b
[ "MIT" ]
null
null
null
gym_unblockme/envs/unblockme_render.py
fedingo/gym-unblockme
a4dd20a7608122e09862d681259111e2634f3d4b
[ "MIT" ]
null
null
null
import pygame import time import numpy as np import sys gray = (150, 150, 150) white = (255, 255, 255) black = (0, 0, 0, ) red_block = (255, 0, 0) red_border = (76, 0, 19) block_color = (255, 128, 0) border_color = (165,42,42) screen = None SIDE = 50 BORDER = 5 MARGIN = 5 LINE = 1 h_switch = True ## Render function for the unblockme_class if __name__ == "__main__": from unblockme_class import * matrix, goal = get_example() game = unblock_me(matrix, goal) render_unblockme(game)
36.637255
116
0.557399
1c98d508fd84565e1b07d0b60db1b387344d3b53
2,852
py
Python
scaffolds/__init__.py
chhsiao1981/frontend_template
fcd68c47d9ba3b04c8eb59c684c93baa20a688aa
[ "MIT" ]
null
null
null
scaffolds/__init__.py
chhsiao1981/frontend_template
fcd68c47d9ba3b04c8eb59c684c93baa20a688aa
[ "MIT" ]
null
null
null
scaffolds/__init__.py
chhsiao1981/frontend_template
fcd68c47d9ba3b04c8eb59c684c93baa20a688aa
[ "MIT" ]
null
null
null
# API from pyramid.scaffolds import PyramidTemplate import os import re import logging
29.102041
141
0.678822
1c990cbd7a7616bc0cdb891dffbb562850c5ab57
21,364
py
Python
phy/cluster/tests/test_supervisor.py
mikailweston/phy
d774cb989152a4b7344ac9b70c79c204a5036763
[ "BSD-3-Clause" ]
null
null
null
phy/cluster/tests/test_supervisor.py
mikailweston/phy
d774cb989152a4b7344ac9b70c79c204a5036763
[ "BSD-3-Clause" ]
null
null
null
phy/cluster/tests/test_supervisor.py
mikailweston/phy
d774cb989152a4b7344ac9b70c79c204a5036763
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """Test GUI component.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ #from contextlib import contextmanager from pytest import yield_fixture, fixture, raises import numpy as np from numpy.testing import assert_array_equal as ae from .. import supervisor as _supervisor from ..supervisor import (Supervisor, TaskLogger, ClusterView, SimilarityView, ActionCreator, ) from phy.gui import GUI from phy.gui.widgets import Barrier from phy.gui.qt import qInstallMessageHandler from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready from phy.utils.context import Context from phylib.utils import connect, Bunch, emit qInstallMessageHandler(handler) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test tasks #------------------------------------------------------------------------------ def test_task_1(tl): assert tl.last_state(None) is None def test_task_2(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.process() assert tl.last_state() == ([0], 1, None, None) def test_task_3(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.process() assert tl.last_state() == ([0], 1, [100], 101) def test_task_merge(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000) tl.process() assert tl.last_state() == ([1000], 1001, None, None) tl.enqueue(tl.supervisor, 'undo') tl.process() assert tl.last_state() == ([0], 1, [100], 101) tl.enqueue(tl.supervisor, 'redo') tl.process() assert tl.last_state() == ([1000], 1001, None, None) def test_task_split(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001]) tl.process() assert tl.last_state() == ([1000, 1001], 1002, None, None) def test_task_move_1(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.supervisor, 'move', [0], 'good') tl.process() assert tl.last_state() == ([1], 2, None, None) def test_task_move_best(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'move', 'best', 'good') tl.process() assert tl.last_state() == ([1], 2, None, None) def test_task_move_similar(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'move', 'similar', 'good') tl.process() assert tl.last_state() == ([0], 1, [101], 102) def test_task_move_all(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'move', 'all', 'good') tl.process() assert tl.last_state() == ([1], 2, [101], 102) #------------------------------------------------------------------------------ # Test cluster and similarity views #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test ActionCreator #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test GUI component #------------------------------------------------------------------------------ def test_supervisor_merge_move(qtbot, supervisor): """Check that merge then move selects the next cluster in the original cluster view, not the updated cluster view.""" _select(supervisor, [20, 11], []) _assert_selected(supervisor, [20, 11]) supervisor.actions.merge() supervisor.block() _assert_selected(supervisor, [31]) supervisor.actions.move('good', 'all') supervisor.block() _assert_selected(supervisor, [30]) supervisor.actions.move('good', 'all') supervisor.block() _assert_selected(supervisor, [2])
27.460154
92
0.637521
1c9997692782ea5187e69a11b0059d2cc2e4c11c
2,733
py
Python
Source/CommandManager.py
SOBotics/Botpy
8e3eb48fcc2a46fd60f4d49832941fa1b71bc223
[ "WTFPL" ]
5
2017-09-19T10:19:33.000Z
2020-10-11T13:29:43.000Z
Source/CommandManager.py
SOBotics/Botpy
8e3eb48fcc2a46fd60f4d49832941fa1b71bc223
[ "WTFPL" ]
33
2018-05-14T09:05:06.000Z
2020-04-20T08:48:59.000Z
Source/CommandManager.py
SOBotics/Botpy
8e3eb48fcc2a46fd60f4d49832941fa1b71bc223
[ "WTFPL" ]
1
2017-09-27T10:40:34.000Z
2017-09-27T10:40:34.000Z
# # CommandManager.py # Botpy # # Created by Ashish Ahuja on 4th September 2017. # # import threading import chatexchange as ce
35.038462
98
0.555799
1c9a0955e72ca504725f135176d44e72aae8607c
1,237
py
Python
tests/periodicities/gen_makefile.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
377
2016-10-13T20:52:44.000Z
2022-03-29T18:04:14.000Z
tests/periodicities/gen_makefile.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
160
2016-10-13T16:11:53.000Z
2022-03-28T04:21:34.000Z
tests/periodicities/gen_makefile.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
63
2017-03-09T14:51:18.000Z
2022-03-27T20:52:57.000Z
import os import glob subdirs = glob.glob("tests/periodicities/*"); subdirs = ['tests/periodicities/Month', 'tests/periodicities/Minute', 'tests/periodicities/Week', 'tests/periodicities/Business_Hour', 'tests/periodicities/Business_Day', 'tests/periodicities/Second', 'tests/periodicities/Semi_Month', 'tests/periodicities/Hour', 'tests/periodicities/Day'] #print(subdirs) print("PYTHON=python3\n\n"); lAllTarget = ""; for subdir1 in sorted(subdirs): lBase = os.path.basename(subdir1); test_target = ""; for filename in sorted(glob.glob(subdir1 + "/*.py")): bn = os.path.basename(filename); logfile = bn.replace("/" , "_"); logfile = "logs/periodicities_" + logfile.replace(".py" , ".log"); print("#PROCESSING FILE : " , filename, bn , logfile); print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1"); test_target = bn + " " + test_target; lAllTarget = lAllTarget + " " + lBase; print("\n\n", lBase , ": ", test_target, "\n" , "\n"); print("\n# ********************************************** \n"); print("all: " , lAllTarget , "\n\t\n");
32.552632
89
0.5481
1c9a500d9b0aad2641a02cde2360c7ed9e056cc6
3,494
py
Python
test/test_util_registry.py
SimulatedANeal/carpedm
22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89
[ "MIT" ]
2
2020-09-30T04:59:06.000Z
2021-03-30T20:42:44.000Z
test/test_util_registry.py
SimulatedANeal/carpedm
22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89
[ "MIT" ]
null
null
null
test/test_util_registry.py
SimulatedANeal/carpedm
22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89
[ "MIT" ]
1
2018-05-25T07:15:16.000Z
2018-05-25T07:15:16.000Z
# # Copyright (C) 2018 Neal Digre. # # This software may be modified and distributed under the terms # of the MIT license. See the LICENSE file for details. # # # Portions of this module are copied or lightly modified from the # Tensor2Tensor registry_test module, so here is their license: # # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for utils.registry References: Slight modification of `Tensor2Tensor registry_test`_. .. _Tensor2Tensor registry_test: https://github.com/tensorflow/ tensor2tensor/blob/master/tensor2tensor/utils/registry_test.py """ import unittest from carpedm.util import registry from carpedm.models.generic import Model from carpedm.models.baseline import SingleCharBaseline if __name__ == '__main__': unittest.main()
29.361345
79
0.697195
1c9a93508b958132a0c4b43ee8f248a5003366fb
383
py
Python
pay-api/migrations/versions/8f7565cf50c1_.py
stevenc987/sbc-pay
04f02f362f88a30c082b0643583b8d0ebff6063f
[ "Apache-2.0" ]
null
null
null
pay-api/migrations/versions/8f7565cf50c1_.py
stevenc987/sbc-pay
04f02f362f88a30c082b0643583b8d0ebff6063f
[ "Apache-2.0" ]
null
null
null
pay-api/migrations/versions/8f7565cf50c1_.py
stevenc987/sbc-pay
04f02f362f88a30c082b0643583b8d0ebff6063f
[ "Apache-2.0" ]
null
null
null
"""empty message Revision ID: 8f7565cf50c1 Revises: 872760122cc9, 8652bf9c03ff Create Date: 2020-10-02 11:11:49.823678 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8f7565cf50c1' down_revision = ('872760122cc9', '8652bf9c03ff') branch_labels = None depends_on = None
15.32
48
0.741514
1c9adb0e11b484554e5e9a324f68f256e624b588
13,217
py
Python
iotronic/wamp/agent.py
smartmeio/stack4things-openstack-iotronic
3e5782eb1fb33b7c3c8c9362e24d30241153c371
[ "Apache-2.0" ]
1
2021-11-04T09:43:49.000Z
2021-11-04T09:43:49.000Z
iotronic/wamp/agent.py
smartmeio/stack4things-openstack-iotronic
3e5782eb1fb33b7c3c8c9362e24d30241153c371
[ "Apache-2.0" ]
null
null
null
iotronic/wamp/agent.py
smartmeio/stack4things-openstack-iotronic
3e5782eb1fb33b7c3c8c9362e24d30241153c371
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 MDSLAB - University of Messina # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import asyncio import json import subprocess import time import txaio from iotronic.common import exception from iotronic.common.i18n import _ from iotronic.common.i18n import _LI from iotronic.common.i18n import _LW from iotronic.db import api as dbapi from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_messaging.rpc import dispatcher import importlib from threading import Thread import ssl import os import signal from autobahn.asyncio.component import Component LOG = logging.getLogger(__name__) service_opts = [ cfg.StrOpt('notification_level', choices=[('debug', _('"debug" level')), ('info', _('"info" level')), ('warning', _('"warning" level')), ('error', _('"error" level')), ('critical', _('"critical" level'))], help=_('Specifies the minimum level for which to send ' 'notifications. If not set, no notifications will ' 'be sent. The default is for this option to be unset.')), ] wamp_opts = [ cfg.StrOpt('wamp_transport_url', default='ws://localhost:8181/', help=('URL of wamp broker')), cfg.StrOpt('wamp_realm', default='s4t', help=('realm broker')), cfg.BoolOpt('register_agent', default=False, help=('Flag for marking this agent as a registration agent')), cfg.BoolOpt('skip_cert_verify', default=False, help=( 'Flag for skipping the verification of the server cert ' '(for the auto-signed ones)')), cfg.IntOpt('autoPingInterval', default=2, help=('autoPingInterval parameter for wamp')), cfg.IntOpt('autoPingTimeout', default=2, help=('autoPingInterval parameter for wamp')), cfg.BoolOpt('service_allow_list', default=False, help='Enable service allow list checks.'), cfg.StrOpt('service_allow_list_path', default="(/var/lib/wstun/allowlist)", help='Path of allowlist.json file.'), ] proxy_opts = [ cfg.StrOpt('proxy', choices=[('nginx', _('nginx proxy')), ], help=_('Proxy for webservices')), ] CONF = cfg.CONF cfg.CONF.register_opts(service_opts) cfg.CONF.register_opts(proxy_opts) CONF.register_opts(wamp_opts, 'wamp') txaio.start_logging(level="info") wamp_session_caller = None AGENT_HOST = None LOOP = None connected = False # OSLO ENDPOINT class WampEndpoint(object): def read_allowlist(): try: with open(CONF.wamp.service_allow_list_path, "r") as allow_file: allow_list_str = allow_file.read() allow_list = json.loads(allow_list_str) #LOG.debug(allow_list) return allow_list except Exception as err: LOG.error(err) class AgentEndpoint(object): # used for testing
31.544153
81
0.557464
1c9ae3c6c99371fef5bc7aaa5ea9deed848c23c0
1,406
py
Python
src/export_to_poseviz.py
anibali/metro-pose3d
dd0c8a82ae271ce69441d216d615428e5ab1d5d1
[ "MIT" ]
52
2020-03-10T05:18:02.000Z
2021-12-23T04:03:38.000Z
src/export_to_poseviz.py
anibali/metro-pose3d
dd0c8a82ae271ce69441d216d615428e5ab1d5d1
[ "MIT" ]
2
2020-03-30T08:08:06.000Z
2020-03-31T04:26:10.000Z
src/export_to_poseviz.py
anibali/metro-pose3d
dd0c8a82ae271ce69441d216d615428e5ab1d5d1
[ "MIT" ]
7
2020-04-02T09:02:00.000Z
2020-12-12T07:11:07.000Z
#!/usr/bin/env python3 import argparse import logging import sys import numpy as np import util if __name__ == '__main__': main()
29.914894
94
0.69559
1c9b5d5805105a181cbbe52dc9cadbd70001e7f9
1,606
py
Python
xcube/core/gen2/local/helpers.py
bcdev/xcube
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
[ "MIT" ]
97
2018-06-26T13:02:55.000Z
2022-03-26T21:03:13.000Z
xcube/core/gen2/local/helpers.py
bcdev/xcube
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
[ "MIT" ]
524
2018-11-09T12:00:08.000Z
2022-03-31T17:00:13.000Z
xcube/core/gen2/local/helpers.py
bcdev/xcube
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
[ "MIT" ]
15
2019-07-09T08:46:03.000Z
2022-02-07T18:47:34.000Z
# The MIT License (MIT) # Copyright (c) 2021 by the xcube development team and contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import xarray as xr
41.179487
81
0.71731
98bad417651d09071c208452c18aa4573b275f66
1,683
py
Python
core/log.py
dl-stuff/dl9
1cbe98afc53a1de9d413797fb130946acc4b6ba4
[ "MIT" ]
null
null
null
core/log.py
dl-stuff/dl9
1cbe98afc53a1de9d413797fb130946acc4b6ba4
[ "MIT" ]
null
null
null
core/log.py
dl-stuff/dl9
1cbe98afc53a1de9d413797fb130946acc4b6ba4
[ "MIT" ]
null
null
null
"""Simulation logs""" from __future__ import annotations # default once 3.10 import sys from enum import Enum from typing import Type, TYPE_CHECKING if TYPE_CHECKING: from core.timeline import Timeline
25.119403
97
0.60309
98bba1bb3ab4288928a1669d17e724ae9a0d33c2
3,760
py
Python
zipline/__init__.py
chalant/pluto
e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc
[ "Apache-2.0" ]
null
null
null
zipline/__init__.py
chalant/pluto
e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc
[ "Apache-2.0" ]
null
null
null
zipline/__init__.py
chalant/pluto
e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc
[ "Apache-2.0" ]
null
null
null
# # # # Copyright 2015 Quantopian, Inc. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # from distutils.version import StrictVersion # import os # import numpy as np # # # This is *not* a place to dump arbitrary classes/modules for convenience, # # it is a place to expose the public interfaces. # from trading_calendars import get_calendar # # from . import data # from . import finance # from . import gens # from . import utils # from .utils.numpy_utils import numpy_version # from .utils.pandas_utils import new_pandas # from .utils.run_algo import run_algorithm # from ._version import get_versions # # # These need to happen after the other imports. # from . algorithm import TradingAlgorithm # from . import api # from zipline import extensions as ext # from zipline.finance.blotter import Blotter # # # PERF: Fire a warning if calendars were instantiated during zipline import. # # Having calendars doesn't break anything per-se, but it makes zipline imports # # noticeably slower, which becomes particularly noticeable in the Zipline CLI. # from trading_calendars.calendar_utils import global_calendar_dispatcher # if global_calendar_dispatcher._calendars: # import warnings # warnings.warn( # "Found TradingCalendar instances after zipline import.\n" # "Zipline startup will be much slower until this is fixed!", # ) # del warnings # del global_calendar_dispatcher # # # __version__ = get_versions()['version'] # del get_versions # # extension_args = ext.Namespace() # # # def load_ipython_extension(ipython): # from .__main__ import zipline_magic # ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline') # # # if os.name == 'nt': # # we need to be able to write to our temp directoy on windows so we # # create a subdir in %TMP% that has write access and use that as %TMP% # def _(): # import atexit # import tempfile # # tempfile.tempdir = tempdir = tempfile.mkdtemp() # # @atexit.register # def cleanup_tempdir(): # import shutil # shutil.rmtree(tempdir) # _() # del _ # # __all__ = [ # 'Blotter', # 'TradingAlgorithm', # 'api', # 'data', # 'finance', # 'get_calendar', # 'gens', # 'run_algorithm', # 'utils', # 'extension_args' # ] # # # def setup(self, # np=np, # numpy_version=numpy_version, # StrictVersion=StrictVersion, # new_pandas=new_pandas): # """Lives in zipline.__init__ for doctests.""" # # if numpy_version >= StrictVersion('1.14'): # self.old_opts = np.get_printoptions() # np.set_printoptions(legacy='1.13') # else: # self.old_opts = None # # if new_pandas: # self.old_err = np.geterr() # # old pandas has numpy compat that sets this # np.seterr(all='ignore') # else: # self.old_err = None # # # def teardown(self, np=np): # """Lives in zipline.__init__ for doctests.""" # # if self.old_err is not None: # np.seterr(**self.old_err) # # if self.old_opts is not None: # np.set_printoptions(**self.old_opts) # # # del os # del np # del numpy_version # del StrictVersion # del new_pandas
29.147287
80
0.668617
98bbff443455dac48b3d58e95d525a9389d58413
7,896
py
Python
smarts/core/utils/traffic_history_service.py
c-h-a-r-l-i-e/SMARTS
6cb8ffda74e235c25d63b74313623b2e03e402f7
[ "MIT" ]
null
null
null
smarts/core/utils/traffic_history_service.py
c-h-a-r-l-i-e/SMARTS
6cb8ffda74e235c25d63b74313623b2e03e402f7
[ "MIT" ]
null
null
null
smarts/core/utils/traffic_history_service.py
c-h-a-r-l-i-e/SMARTS
6cb8ffda74e235c25d63b74313623b2e03e402f7
[ "MIT" ]
null
null
null
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import logging import os import pickle from dataclasses import dataclass from multiprocessing import Pipe, Process, Queue import ijson import smarts.core.scenario as scenario def fetch_history_at_timestep(self, timestep: str): if timestep not in self._all_timesteps: return {} elif timestep in self.traffic_history: return self.traffic_history[timestep] # ask child process to prepare the next batch: self._prepare_next_batch() self._prev_batch_history = self._current_traffic_history # receives the previous batch child process prepared self._current_traffic_history = self._receive_data_conn.recv() if timestep in self._current_traffic_history: return self._current_traffic_history[timestep] # no history exists at requested timestamp return {}
37.245283
90
0.630699
98bd00128cc88c306abd43b1058840e641926a91
195
py
Python
src/basics/sql_lite/update_data.py
FoxNeo/MyPythonProjects
3499ef0853f0087f6f143e1633b0a88a3d7b9818
[ "MIT" ]
null
null
null
src/basics/sql_lite/update_data.py
FoxNeo/MyPythonProjects
3499ef0853f0087f6f143e1633b0a88a3d7b9818
[ "MIT" ]
null
null
null
src/basics/sql_lite/update_data.py
FoxNeo/MyPythonProjects
3499ef0853f0087f6f143e1633b0a88a3d7b9818
[ "MIT" ]
null
null
null
import sqlite3 connect = sqlite3.connect("production.db") cursor = connect.cursor() cursor.execute("UPDATE PERSON SET edad = 19 WHERE nombre = 'Conker'") connect.commit() connect.close()
24.375
70
0.723077
98bd3099195cf49ba522ba023294ea3a974ffe7f
1,599
py
Python
calvin/runtime/south/plugins/media/defaultimpl/image.py
josrolgil/exjobbCalvin
976459eaa50246586360c049b9880d753623d574
[ "Apache-2.0" ]
1
2016-05-10T22:36:31.000Z
2016-05-10T22:36:31.000Z
calvin/runtime/south/plugins/media/defaultimpl/image.py
josrolgil/exjobbCalvin
976459eaa50246586360c049b9880d753623d574
[ "Apache-2.0" ]
null
null
null
calvin/runtime/south/plugins/media/defaultimpl/image.py
josrolgil/exjobbCalvin
976459eaa50246586360c049b9880d753623d574
[ "Apache-2.0" ]
null
null
null
import pygame from StringIO import StringIO import cv2 import os import numpy
26.65
69
0.56035
98bd3fd17ab9f4b238b6d43814353c33f950c7b3
3,340
py
Python
durin/models.py
mlodic/django-rest-durin
b31a7257fb9765a4928c08bb1e68e80f48159229
[ "MIT" ]
null
null
null
durin/models.py
mlodic/django-rest-durin
b31a7257fb9765a4928c08bb1e68e80f48159229
[ "MIT" ]
null
null
null
durin/models.py
mlodic/django-rest-durin
b31a7257fb9765a4928c08bb1e68e80f48159229
[ "MIT" ]
null
null
null
import binascii from os import urandom import humanize from django.conf import settings from django.db import models from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from durin.settings import durin_settings from durin.signals import token_renewed User = settings.AUTH_USER_MODEL def __str__(self) -> str: return self.token
26.299213
82
0.606886
98be793b0386d37224cb563ae56376daaaeb6f10
507
py
Python
linter.py
CudaText-addons/cuda_lint_htmltidy
afcf1dbfaa2dfc2d63e1ded4781d5f3e4b40a21c
[ "MIT" ]
null
null
null
linter.py
CudaText-addons/cuda_lint_htmltidy
afcf1dbfaa2dfc2d63e1ded4781d5f3e4b40a21c
[ "MIT" ]
null
null
null
linter.py
CudaText-addons/cuda_lint_htmltidy
afcf1dbfaa2dfc2d63e1ded4781d5f3e4b40a21c
[ "MIT" ]
null
null
null
# Copyright (c) 2013 Aparajita Fishman # Change for CudaLint: Alexey T. # License: MIT import os from cuda_lint import Linter, util if os.name=='nt': _exe = os.path.join(os.path.dirname(__file__), 'tidy_win32', 'tidy') else: _exe = 'tidy'
26.684211
115
0.627219
98bf3939045052dd4fba91a19ad1fdf6be1101a5
640
py
Python
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py
AngelLiang/PP4E
3a7f63b366e1e4700b4d2524884696999a87ba9d
[ "MIT" ]
null
null
null
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py
AngelLiang/PP4E
3a7f63b366e1e4700b4d2524884696999a87ba9d
[ "MIT" ]
null
null
null
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py
AngelLiang/PP4E
3a7f63b366e1e4700b4d2524884696999a87ba9d
[ "MIT" ]
null
null
null
"set operations for multiple sequences"
33.684211
68
0.504688
98bf61f5f3abef89b085be204210156d6a5477f5
3,006
py
Python
airtech_api/utils/error_messages/serialization_errors.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
1
2019-04-04T12:27:55.000Z
2019-04-04T12:27:55.000Z
airtech_api/utils/error_messages/serialization_errors.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
34
2019-03-26T11:18:17.000Z
2022-02-10T08:12:36.000Z
airtech_api/utils/error_messages/serialization_errors.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
null
null
null
msg_dict = { 'resource_not_found': 'The resource you specified was not found', 'invalid_gender': "The gender you specified is invalid!!", 'many_invalid_fields': 'Some errors occured while validating some fields. Please check and try again', 'unique': 'The {} you inputted already exists', 'user_not_found': 'The user with that username/email and password combination was not found', 'email_not_found': 'A user with email `{}` does not exist', 'user_already_verified': 'The user with that email has already been verified', 'invalid_flight_type': 'Flight type must be either international or local', 'invalid_flight_schedule': 'Flight schedule must be at least 12 hours before it is created', 'resource_id_not_found': 'The {} with that id was not found', 'user_book_flight_twice': 'You had previously booked for this Flight and thus cannot do it again', 'flight_booking_expired': 'You cannot book for a flight less than 24 hours before the flight', 'flight_schedule_expired': 'The schedule of this flight has already passed and thus you cannot book it', 'missing_field': 'You forgot to include this field', 'value_not_a_file': 'The value you inputted is not a file', 'not_an_image': 'The file you uploaded is not a valid image', 'image_too_large': 'Image must not be more than 2MB', 'payment_link_error': 'An error occurred while creating payment link', 'booking_already_paid': 'You have already paid for this flight', 'booking_expired': 'Your booking has expired, thus you cannot pay for this ticket', 'invalid_url': 'The `{}` field must be a valid URL with protocols `http` or `https`', "invalid_url_field": 'This field must be a valid URL with protocols `http` or `https`', 'paystack_threw_error': "There was an unexpected error while processing request. " "Please raise this as an issue in at " "https://github.com/chidioguejiofor/airtech-api/issues", 'empty_request': 'You did not specify any `{}` data in your request', 'paid_booking_cannot_be_deleted': 'You cannot delete this Booking because you have already paid for it', 'cannot_delete_expired_booking': 'You cannot delete an expired booking', 'cannot_delete_flight_with_bookings': 'You cannot delete this flight because users have started booking it', 'cannot_delete_flight_that_has_flown': 'You cannot delete this flight because the schedule date has been passed', 'cannot_update_flight_field_with_bookings': 'You cannot update the `{}` of this flight because it has already been booked', 'cannot_update_field': 'You cannot update a {} {}', 'regular_user_only': 'This endpoint is for only regular users', 'profile_not_updated': 'You need to update your profile picture before you can do this', 'only_alpha_and_numbers': 'This field can contain only alphabets and numbers' }
42.338028
83
0.706254
98c098366590ca27e9adc66a721c84a992752bc9
109
py
Python
blog/be/server/serialization/__init__.py
kamko/lnu_ht19_4ME310_final_project
ccb5d3c659cde0dac49c1bd6c3d46c46e73a111e
[ "MIT" ]
null
null
null
blog/be/server/serialization/__init__.py
kamko/lnu_ht19_4ME310_final_project
ccb5d3c659cde0dac49c1bd6c3d46c46e73a111e
[ "MIT" ]
2
2020-06-07T19:02:54.000Z
2020-06-07T19:03:02.000Z
blog/be/server/serialization/__init__.py
kamko/lnu_ht19_4ME310_final_project
ccb5d3c659cde0dac49c1bd6c3d46c46e73a111e
[ "MIT" ]
null
null
null
from .marshmallow import ma from .schemas import ArticleSchema __all__ = [ 'ma', 'ArticleSchema' ]
12.111111
34
0.688073
98c0a5642acac614148ed6f1d7bcaa9979233d3b
8,950
py
Python
scripts/fast_queue.py
ourresearch/openalex-guts
f6c3e9992361e4bb1dbe76fbfb644c80f081319a
[ "MIT" ]
48
2021-11-20T08:17:53.000Z
2022-03-19T13:57:15.000Z
scripts/fast_queue.py
ourresearch/openalex-guts
f6c3e9992361e4bb1dbe76fbfb644c80f081319a
[ "MIT" ]
null
null
null
scripts/fast_queue.py
ourresearch/openalex-guts
f6c3e9992361e4bb1dbe76fbfb644c80f081319a
[ "MIT" ]
2
2022-01-04T16:28:48.000Z
2022-02-05T21:25:01.000Z
import argparse from time import sleep, time from collections import defaultdict from sqlalchemy import orm, text, insert, delete from sqlalchemy.orm import selectinload import models from app import db from app import logger from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues from util import elapsed # python -m scripts.fast_queue --entity=work --method=add_everything --limit=3 if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run fast queue.") parser.add_argument('--entity', type=str, help="the entity type to run") parser.add_argument('--method', type=str, help="the method to run") parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)") parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on") parser.add_argument( '--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once" ) parsed_args = parser.parse_args() run(**vars(parsed_args))
42.216981
149
0.639777
98c14c64fb91ce8b039d5c03cf8ab0036d83b74c
3,810
py
Python
cogs/memes.py
Code-Cecilia/botman-rewrite
9d8baeebf267c62df975d2f209e85589b81934af
[ "MIT" ]
2
2022-02-21T14:10:15.000Z
2022-02-21T14:10:50.000Z
cogs/memes.py
Code-Cecilia/botman-rewrite
9d8baeebf267c62df975d2f209e85589b81934af
[ "MIT" ]
null
null
null
cogs/memes.py
Code-Cecilia/botman-rewrite
9d8baeebf267c62df975d2f209e85589b81934af
[ "MIT" ]
null
null
null
import json import discord from discord.ext import commands from assets import internet_funcs from assets.list_funcs import chunks
38.877551
109
0.574803
98c1adf25f25e2b996bb3df26aef461028014a20
55
py
Python
examples/forest_fire/run.py
fire-suppression-abm/mesa
8498eea3e5d4a739aee3b003107a0e7de59c5026
[ "Apache-2.0" ]
1,704
2015-02-01T17:59:44.000Z
2022-03-30T13:25:47.000Z
examples/forest_fire/run.py
fire-suppression-abm/mesa
8498eea3e5d4a739aee3b003107a0e7de59c5026
[ "Apache-2.0" ]
1,048
2015-01-12T01:16:05.000Z
2022-03-31T11:44:33.000Z
examples/forest_fire/run.py
fire-suppression-abm/mesa
8498eea3e5d4a739aee3b003107a0e7de59c5026
[ "Apache-2.0" ]
831
2015-03-04T13:41:25.000Z
2022-03-30T14:33:17.000Z
from forest_fire.server import server server.launch()
13.75
37
0.818182
98c1fbeb0d5441c90960a350fd079ea801185651
2,298
py
Python
scripts/collect_timelines1.py
tedhchen/twitter_timeline_tools
bc21e8c7c4e976409281e2697e1ec75044648eb8
[ "MIT" ]
null
null
null
scripts/collect_timelines1.py
tedhchen/twitter_timeline_tools
bc21e8c7c4e976409281e2697e1ec75044648eb8
[ "MIT" ]
null
null
null
scripts/collect_timelines1.py
tedhchen/twitter_timeline_tools
bc21e8c7c4e976409281e2697e1ec75044648eb8
[ "MIT" ]
null
null
null
# Prep import json, configparser, pickle, csv, logging, os import pandas as pd from tweepy import AppAuthHandler, API, Cursor # Reading in configuation params = configparser.ConfigParser() params.read('config.ini') # Functions # Takes config file and returns authenticated api object # Get relevant user ids # takes user ids, and writes out a txt file wiith each user's status jsons # Running script # Setting up logger logging.basicConfig(filename, filemode = 'a', format = '(%(asctime)s) %(levelname)s: %(message)s', level = logging.INFO) # Authenticating api api = twitter_auth(params) # Get users from pre-parsed data # csv file with: # user, subset # ..., ... # subset is just a way to subset users from the csv file # if subset == None, then no subsetting is performed users = get_ids(path, subset) # Getting timelines get_timelines(users, api, outpath) # Double checking errors retry_missed_users(logfile, api, outpath)
31.479452
150
0.700174
98c28b91f69f483e365aff3baa53ce90ba90427d
937
py
Python
aquarius/app/auth_util.py
oceanprotocol/provider-backend
f9e36e3d6b880de548c6b92c38d10d76daf369ba
[ "Apache-2.0" ]
null
null
null
aquarius/app/auth_util.py
oceanprotocol/provider-backend
f9e36e3d6b880de548c6b92c38d10d76daf369ba
[ "Apache-2.0" ]
1
2018-08-15T09:57:01.000Z
2018-08-15T09:57:01.000Z
aquarius/app/auth_util.py
oceanprotocol/provider-backend
f9e36e3d6b880de548c6b92c38d10d76daf369ba
[ "Apache-2.0" ]
null
null
null
# # Copyright 2021 Ocean Protocol Foundation # SPDX-License-Identifier: Apache-2.0 # from eth_utils import is_address from web3 import Web3 def compare_eth_addresses(address, checker, logger): """ Compare two addresses and return TRUE if there is a match :param str address: Address :param str checker: Address to compare with :param logger: instance of logging :return: boolean """ logger.debug("compare_eth_addresses address: %s" % address) logger.debug("compare_eth_addresses checker: %s" % checker) if not is_address(address): logger.debug("Address is not web3 valid") return False if not is_address(checker): logger.debug("Checker is not web3 valid") return False return Web3.toChecksumAddress(address) == Web3.toChecksumAddress(checker)
31.233333
77
0.71825
98c37885dc6ddaa81fc5bc670f8b3da95afaa94e
673
py
Python
oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
"""Auto-generated file, do not edit by hand. DJ metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_DJ = PhoneMetadata(id='DJ', country_code=253, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[27]\\d{7}', possible_length=(8,)), fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1[2-5]|7[45])\\d{5}', example_number='21360003', possible_length=(8,)), mobile=PhoneNumberDesc(national_number_pattern='77\\d{6}', example_number='77831001', possible_length=(8,)), number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')])
74.777778
132
0.708767
98c56839c6c8ff2db03c4e3a4f565347728ce5e0
4,541
py
Python
llvm-7.0.0.src/utils/unicode-case-fold.py
sillywalk/grazz
a0adb1a90d41ff9006d8c1476546263f728b3c83
[ "Apache-2.0" ]
171
2018-09-17T13:15:12.000Z
2022-03-18T03:47:04.000Z
llvm-7.0.0.src/utils/unicode-case-fold.py
sillywalk/grazz
a0adb1a90d41ff9006d8c1476546263f728b3c83
[ "Apache-2.0" ]
51
2019-10-23T11:55:08.000Z
2021-12-21T06:32:11.000Z
llvm-7.0.0.src/utils/unicode-case-fold.py
sillywalk/grazz
a0adb1a90d41ff9006d8c1476546263f728b3c83
[ "Apache-2.0" ]
35
2018-09-18T07:46:53.000Z
2022-03-27T07:59:48.000Z
#!/usr/bin/env python """ Unicode case folding database conversion utility Parses the database and generates a C++ function which implements the case folding algorithm. The database entries are of the form: <code>; <status>; <mapping>; # <name> <status> can be one of four characters: C - Common mappings S - mappings for Simple case folding F - mappings for Full case folding T - special case for Turkish I characters Right now this generates a function which implements simple case folding (C+S entries). """ import sys import re import urllib2 # This variable will body of the mappings function body = "" # Reads file line-by-line, extracts Common and Simple case fold mappings and # returns a (from_char, to_char, from_name) tuple. # Computes the shift (to_char - from_char) in a mapping. def shift(mapping): return mapping[1] - mapping[0] # Computes the stride (from_char2 - from_char1) of two mappings. # Computes the stride of a list of mappings. The list should have at least two # mappings. All mappings in the list are assumed to have the same stride. # b is a list of mappings. All the mappings are assumed to have the same # shift and the stride between adjecant mappings (if any) is constant. current_block = [] f = urllib2.urlopen(sys.argv[1]) for m in mappings(f): if len(current_block) == 0: current_block.append(m) continue if shift(current_block[0]) != shift(m): # Incompatible shift, start a new block. dump_block(current_block) current_block = [m] continue if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m): current_block.append(m) continue # Incompatible stride, start a new block. dump_block(current_block) current_block = [m] f.close() dump_block(current_block) print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//' print '//' print '// This file was generated by utils/unicode-case-fold.py from the Unicode' print '// case folding database at' print '// ', sys.argv[1] print '//' print '// To regenerate this file, run:' print '// utils/unicode-case-fold.py \\' print '// "{}" \\'.format(sys.argv[1]) print '// > lib/Support/UnicodeCaseFold.cpp' print '//' print '//===----------------------------------------------------------------------===//' print '' print '#include "llvm/Support/Unicode.h"' print '' print "int llvm::sys::unicode::foldCharSimple(int C) {" print body print " return C;" print "}"
32.905797
89
0.618146
98c615953ef0bbcfd93b9c52b023ec8e35bea466
115,101
py
Python
trade_remedies_caseworker/cases/views.py
uktrade/trade-remedies-caseworker
fece9fde3cb241d96cbc1aaf7188d976f8621600
[ "MIT" ]
1
2020-08-27T09:53:00.000Z
2020-08-27T09:53:00.000Z
trade_remedies_caseworker/cases/views.py
uktrade/trade-remedies-caseworker
fece9fde3cb241d96cbc1aaf7188d976f8621600
[ "MIT" ]
7
2020-10-14T16:23:42.000Z
2021-09-24T14:18:47.000Z
trade_remedies_caseworker/cases/views.py
uktrade/trade-remedies-caseworker
fece9fde3cb241d96cbc1aaf7188d976f8621600
[ "MIT" ]
null
null
null
import itertools import json import logging import re from django.views.generic import TemplateView from django.http import HttpResponse from django.views import View from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from django.views.decorators.csrf import csrf_exempt from django.shortcuts import render, redirect from django.urls import reverse from django.utils import timezone from django.utils.decorators import method_decorator from django_chunk_upload_handlers.clam_av import VirusFoundInFileException from core.base import GroupRequiredMixin from core.utils import ( deep_index_items_by, deep_index_items_by_exists, get, key_by, index_users_by_group, compact_list, submission_contact, public_login_url, parse_notify_template, parse_api_datetime, pluck, to_json, from_json, deep_update, internal_redirect, is_date, notify_footer, notify_contact_email, ) from django_countries import countries from django.conf import settings from cases.submissions import SUBMISSION_TYPE_HELPERS, get_submission_deadline from cases.utils import decorate_orgs from core.constants import ( ALL_REGION_ALLOWED_TYPE_IDS, SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION, SECURITY_GROUP_TRA_LEAD_INVESTIGATOR, SECURITY_GROUPS_TRA, SECURITY_GROUP_TRA_ADMINISTRATOR, SECURITY_GROUPS_TRA_ADMINS, SECURITY_GROUP_ORGANISATION_OWNER, SUBMISSION_TYPE_QUESTIONNAIRE, SUBMISSION_TYPE_APPLICATION, SUBMISSION_NOTICE_TYPE_INVITE, SUBMISSION_NOTICE_TYPE_DEFICIENCY, SUBMISSION_TYPE_THIRD_PARTY, CASE_ROLE_AWAITING_APPROVAL, CASE_ROLE_REJECTED, CASE_ROLE_APPLICANT, CASE_ROLE_PREPARING, DIRECTION_TRA_TO_PUBLIC, ) from trade_remedies_client.mixins import TradeRemediesAPIClientMixin from trade_remedies_client.exceptions import APIException logger = logging.getLogger(__name__) org_fields = json.dumps( { "Organisation": { "id": 0, "has_non_draft_subs": 0, "gov_body": 0, "has_roi": 0, } } )
41.063503
150
0.574391
98c73bdbed560b9a7619895e79b9cc268edc26d3
10,352
py
Python
analyze-ir-signal.py
hnw/analyze-ir-signal
f810c0ced955ec70c5e4c0d3556efa0a6d0d3138
[ "MIT" ]
null
null
null
analyze-ir-signal.py
hnw/analyze-ir-signal
f810c0ced955ec70c5e4c0d3556efa0a6d0d3138
[ "MIT" ]
null
null
null
analyze-ir-signal.py
hnw/analyze-ir-signal
f810c0ced955ec70c5e4c0d3556efa0a6d0d3138
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import numpy as np from sklearn.neighbors import KernelDensity # Relative tolerance (in percent) for some comparisons on measured data. TOLERANCE = 25 # Lower tolerance for comparison of measured data LTOL = 100 - TOLERANCE # Upper tolerance for comparison of measured data UTOL = 100 + TOLERANCE # Resolution of the raw input buffer data. Corresponds to 2 pulses of each 26.3 at 38 kHz. MICROS_PER_TICK = 50 # Value is subtracted from all marks and added to all spaces before decoding, to compensate for the signal forming of different IR receiver modules. MARK_EXCESS_MICROS = 20 NEC_ADDRESS_BITS = 16 # 16 bit address or 8 bit address and 8 bit inverted address NEC_COMMAND_BITS = 16 # Command and inverted command NEC_BITS = (NEC_ADDRESS_BITS + NEC_COMMAND_BITS) NEC_UNIT = 560 NEC_HEADER_MARK = (16 * NEC_UNIT) # 9000 NEC_HEADER_SPACE = (8 * NEC_UNIT) # 4500 NEC_BIT_MARK = NEC_UNIT NEC_ONE_SPACE = (3 * NEC_UNIT) # 1690 NEC_ZERO_SPACE = NEC_UNIT PROTOCOL_IS_LSB_FIRST = False PROTOCOL_IS_MSB_FIRST = True #C8E880=? #131780 data_onoff=[3016,1561,344,1186,343,1189,343,425,341,421,348,1185,348,425,341,424,346,419,345,1189,343,1187,342,1188,341,428,342,1184,347,425,344,439,328,423,351,415,351,414,348,428,342,1188,341,436,330,424,348,421,343,422,348,8272,3011,1563,343,1185,344,1183,346,422,346,422,349,1182,346,422,344,425,345,421,349,1185,342,1187,348,1184,342,422,348,1183,346,423,346,419,351,419,348,424,344,427,340,445,323,1190,342,442,325,423,345,422,347,419,348,8272,3014,1559,348,1201,326,1206,326,419,348,425,343,1183,347,419,349,424,343,427,340,1189,343,1186,343,1187,342,422,348,1184,344,436,330,422,351,423,344,424,341,422,348,438,329,1205,324,425,343,422,351,419,348,425,344] #131720 data_30=[3015,1558,345,1183,346,1201,329,419,347,426,344,1185,347,419,345,444,325,442,328,1201,328,1204,325,1204,330,418,345,1186,343,422,348,422,348,425,342,445,322,1205,325,425,342,426,346,419,345,424,345,440,327,426,345,8262,3012,1562,341,1189,344,1186,341,424,345,424,343,1186,343,429,341,425,343,425,345,1185,344,1186,343,1187,346,425,341,1187,340,440,305,448,354,404,357,419,350,1185,341,426,341,440,329,419,348,426,320,448,345,421,345,8260,3013,1563,343,1187,352,1166,328,450,342,439,328,1208,323,425,343,421,347,422,349,1187,342,1186,352,1165,353,439,330,1204,300,453,341,424,342,428,318,462,333,1203,299,446,346,424,344,428,349,431,301,446,348,424,342] #131750 data_50=[3020,1555,345,1188,344,1188,342,419,347,424,346,1183,345,419,351,424,342,441,326,1204,328,1201,326,1205,324,422,356,1176,348,419,344,422,350,425,342,1186,343,416,350,1185,345,423,347,419,347,421,348,442,331,418,348,8258,3016,1558,345,1184,346,1183,346,425,342,421,348,1188,343,424,344,417,349,422,348,1185,344,1184,348,1181,346,419,350,1187,342,424,345,424,343,422,350,1183,343,442,328,1201,327,419,349,423,346,424,343,421,349,435,331,8258,3018,1557,346,1184,348,1181,346,422,347,423,344,1186,346,419,354,415,355,407,350,1186,343,1190,343,1183,346,419,348,1203,326,419,350,419,348,424,344,1184,344,422,346,1187,343,417,350,421,348,419,347,442,325,425,346] #131760 data_100=[3023,1556,346,1183,346,1183,346,419,349,422,348,1184,348,417,352,419,350,420,347,1183,348,1201,328,1185,346,422,345,1185,358,412,345,422,348,419,347,441,328,1187,345,1183,346,425,343,424,346,422,344,440,329,439,332,8267,3017,1558,346,1185,344,1185,344,423,346,423,347,1184,345,420,349,423,346,438,328,1190,342,1184,346,1180,355,416,349,1199,330,419,351,419,351,418,345,422,348,1186,345,1202,327,422,350,414,353,421,348,420,346,419,350,8268,3017,1560,343,1187,345,1202,330,414,352,417,356,1178,348,418,350,417,352,417,350,1184,347,1182,348,1185,344,438,331,1189,343,441,325,417,353,419,362,397,357,1184,346,1184,348,424,342,425,345,419,349,436,331,425,345] # 070710 data_brighten=[3021,1557,348,1183,348,1183,345,1184,344,423,347,437,330,421,348,421,349,418,348,1184,348,1182,351,1197,329,421,348,421,348,417,350,422,347,423,347,1185,343,419,350,440,327,441,329,421,348,421,353,418,344,423,346,8252,3019,1559,347,1184,343,1184,345,1184,348,417,350,419,350,424,345,439,328,435,335,1201,328,1188,341,1186,347,438,328,441,330,420,347,419,349,423,345,1185,345,427,344,438,328,420,349,416,354,418,348,425,344,424,346,8252,3017,1576,329,1202,327,1186,349,1180,346,419,360,429,326,442,328,422,345,419,350,1184,346,1184,347,1203,330,418,345,422,348,422,351,435,331,437,330,1200,329,419,350,421,345,425,345,419,351,434,332,442,327,421,346,8255,3017,1558,345,1185,348,1199,331,1200,327,418,353,420,347,425,343,422,348,423,344,1189,342,1181,348,1184,345,424,346,436,331,437,332,421,349,420,346,1185,347,425,342,424,346,439,327,419,350,419,348,423,347,422,347,8251,3018,1561,342,1187,342,1185,347,1203,327,417,352,421,348,424,343,419,349,438,349,1155,354,1186,346,1186,344,422,347,419,347,436,333,420,349,424,343,1184,348,421,346,423,346,419,351,438,328,423,346,422,348,421,345] # 070730 data_darken=[3018,1562,345,1189,372,1147,353,1184,348,423,347,420,346,423,346,424,345,422,347,1185,345,1183,346,1184,348,417,352,419,348,424,346,424,344,423,347,1185,344,1184,345,419,350,424,345,427,343,439,331,414,353,419,349,8261,3018,1558,351,1182,343,1190,350,1176,346,425,345,425,340,422,348,436,333,422,347,1201,328,1186,343,1190,343,424,341,443,327,421,348,417,351,419,352,1181,345,1186,348,415,353,421,345,424,346,419,347,428,341,441,329,8263,3018,1556,347,1188,342,1189,341,1186,346,423,347,440,336,398,361,421,346,428,341,1184,345,1187,342,1185,347,421,349,423,343,423,347,424,344,426,341,1186,343,1189,344,423,343,441,328,417,353,417,349,424,346,423,346] # 131740 data_nightlight=[3020,1557,348,1185,350,1180,350,419,349,421,348,1185,346,419,348,441,327,442,331,1180,348,1184,348,1188,343,424,346,1186,343,419,350,424,345,424,346,419,350,421,350,1201,329,421,347,417,352,419,350,421,348,421,349,8260,3019,1557,351,1183,346,1184,348,422,347,419,347,1204,327,441,328,441,328,437,335,1184,344,1184,349,1185,346,438,331,1185,348,419,346,426,343,420,350,418,350,420,350,1183,352,418,347,417,354,423,344,422,347,417,354,8259,3019,1557,348,1189,343,1184,346,421,345,440,331,1184,346,422,345,424,345,438,331,1186,346,1186,344,1184,345,419,350,1184,347,421,348,417,353,419,347,424,345,424,346,1184,345,423,346,423,346,427,342,419,353,419,345] data_hitachi=[8917,4558,525,590,525,1725,526,1728,527,590,524,1727,526,606,513,1726,521,592,527,1725,525,594,528,605,509,1727,526,588,531,1723,527,588,532,1736,511,1726,526,588,531,583,533,593,529,1720,529,1723,526,1730,523,588,530,588,528,1722,528,1727,535,1703,537,586,534,603,512,594,525,1728,522,39873] #a = np.array(data_nightlight) a = np.array(data_hitachi) #show_aeha(a) decode_nec(a)
57.511111
1,100
0.681897
98c872b368191fe4e11021c3430aca414eab1a34
2,698
py
Python
mmdet/models/emod_ops/ar_module.py
zhenglab/EMOD
68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89
[ "Apache-2.0" ]
2
2020-12-09T08:40:04.000Z
2021-07-27T08:44:46.000Z
mmdet/models/emod_ops/ar_module.py
zhenglab/EMOD
68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89
[ "Apache-2.0" ]
null
null
null
mmdet/models/emod_ops/ar_module.py
zhenglab/EMOD
68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89
[ "Apache-2.0" ]
null
null
null
import torch from torch import nn from mmcv.cnn.utils import constant_init, kaiming_init
29.326087
81
0.546331
98c9bbdcbfc1d4a76b6ddc9df442f68e0236c7a7
519
py
Python
prayer_times_v2.py
danish09/request_api
67aac9079cb30fc0069a9273c8b4074122ea4d3b
[ "MIT" ]
null
null
null
prayer_times_v2.py
danish09/request_api
67aac9079cb30fc0069a9273c8b4074122ea4d3b
[ "MIT" ]
null
null
null
prayer_times_v2.py
danish09/request_api
67aac9079cb30fc0069a9273c8b4074122ea4d3b
[ "MIT" ]
null
null
null
import json import requests from datetime import datetime from playsound import playsound tday=datetime.today().strftime('%Y-%m-%d') right_now=datetime.today().strftime('%I-%M-%p') response = requests.get("https://www.londonprayertimes.com/api/times/?format=json&key=0239f686-4423-408e-9a0c-7968a403d197&year=&month=") data=response.json() for key,value in data.items(): if value >= '03:30' and value < '06:00': print('It is asr time') #playsound('/home/danish/Downloads/adan.mp3')
23.590909
137
0.693642
98ca5c7bd9f6d4e14adea6a5004535831845ac15
6,763
py
Python
pokemon/pokemon_tests/test_serializers.py
pessman/pokemon_utils
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
[ "MIT" ]
1
2019-03-11T04:12:50.000Z
2019-03-11T04:12:50.000Z
pokemon/pokemon_tests/test_serializers.py
pessman/pokemon_utils
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
[ "MIT" ]
null
null
null
pokemon/pokemon_tests/test_serializers.py
pessman/pokemon_utils
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
[ "MIT" ]
2
2019-03-13T03:17:29.000Z
2019-04-04T20:06:50.000Z
import pytest from django.test import TestCase from rest_framework import serializers as drf_serializers from pokemon import models, serializers
32.990244
67
0.546799
98ca9c54fc93a4a5630df7be404c28ca3e935a2c
4,962
py
Python
sqlpuzzle/_common/argsparser.py
Dundee/python-sqlpuzzle
260524922a0645c9bf94a9779195f93ef2c78cba
[ "MIT" ]
8
2015-03-19T11:25:32.000Z
2020-09-02T11:30:10.000Z
sqlpuzzle/_common/argsparser.py
Dundee/python-sqlpuzzle
260524922a0645c9bf94a9779195f93ef2c78cba
[ "MIT" ]
7
2015-03-23T14:34:28.000Z
2022-02-21T12:36:01.000Z
sqlpuzzle/_common/argsparser.py
Dundee/python-sqlpuzzle
260524922a0645c9bf94a9779195f93ef2c78cba
[ "MIT" ]
4
2018-11-28T21:59:27.000Z
2020-01-05T01:50:08.000Z
from sqlpuzzle.exceptions import InvalidArgumentException __all__ = ('parse_args',) # pylint: disable=dangerous-default-value,keyword-arg-before-vararg def parse_args(options={}, *args, **kwds): """ Parser of arguments. dict options { int min_items: Min of required items to fold one tuple. (default: 1) int max_items: Count of items in one tuple. Last `max_items-min_items` items is by default set to None. (default: 1) bool allow_dict: Flag allowing dictionary as first (and only one) argument or dictinary as **kwds. (default: False) bool allow_list: Flag allowing list as first (and only one) argument. (default: False) } Examples: calling with min_items=1, max_items=2, allow_dict=False: arg1, arg2 => ((arg1, None), (arg2, None)) (arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None)) arg1=val1 => FAIL {key1: val1} => FAIL calling with min_items=2, max_items=3, allow_dict=True: arg1, arg2 => ((arg1, arg2, None),) arg1, arg2, arg3 => ((arg1, arg2, arg3),) (arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),) arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None)) {key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None)) (arg1a, arg1b), arg2a, arg2b => FAIL """ parser_options = ParserOptions(options) parser_input = ParserInput(args, kwds) parser = Parser(parser_options, parser_input) parser.parse() return parser.output_data # pylint: disable=too-few-public-methods class Parser: def __init__(self, options, input_data): self.options = options self.input_data = input_data self.output_data = []
33.755102
112
0.620314
98cab2bad7becb5d77b33c01de7f7ffa0e4c8c44
16,809
py
Python
reviewboard/webapi/tests/test_review_screenshot_comment.py
ParikhKadam/reviewboard
7395902e4c181bcd1d633f61105012ffb1d18e1b
[ "MIT" ]
921
2015-01-01T15:26:28.000Z
2022-03-29T11:30:38.000Z
reviewboard/webapi/tests/test_review_screenshot_comment.py
ParikhKadam/reviewboard
7395902e4c181bcd1d633f61105012ffb1d18e1b
[ "MIT" ]
5
2015-03-17T18:57:47.000Z
2020-10-02T13:24:31.000Z
reviewboard/webapi/tests/test_review_screenshot_comment.py
ParikhKadam/reviewboard
7395902e4c181bcd1d633f61105012ffb1d18e1b
[ "MIT" ]
285
2015-01-12T06:24:36.000Z
2022-03-29T11:03:50.000Z
from __future__ import unicode_literals from django.contrib.auth.models import User from djblets.webapi.errors import PERMISSION_DENIED from reviewboard.reviews.models import ScreenshotComment from reviewboard.webapi.resources import resources from reviewboard.webapi.tests.base import BaseWebAPITestCase from reviewboard.webapi.tests.mimetypes import ( screenshot_comment_item_mimetype, screenshot_comment_list_mimetype) from reviewboard.webapi.tests.mixins import ( BasicTestsMetaclass, ReviewRequestChildItemMixin, ReviewRequestChildListMixin) from reviewboard.webapi.tests.mixins_comment import ( CommentItemMixin, CommentListMixin) from reviewboard.webapi.tests.urls import ( get_review_screenshot_comment_item_url, get_review_screenshot_comment_list_url)
41.198529
79
0.636861
98caf2eb8158bde50b1d44dd5a0629d9a33340c7
1,163
py
Python
qbapi/app.py
dimddev/qb
fbf9f4cac8aaf14243229e3193960da7114bb7ba
[ "BSD-3-Clause" ]
null
null
null
qbapi/app.py
dimddev/qb
fbf9f4cac8aaf14243229e3193960da7114bb7ba
[ "BSD-3-Clause" ]
null
null
null
qbapi/app.py
dimddev/qb
fbf9f4cac8aaf14243229e3193960da7114bb7ba
[ "BSD-3-Clause" ]
null
null
null
""" Command line tool """ import asyncio from qbapi.request import create_request from qbapi.services.clients import Producer, Consumer
21.943396
66
0.638005
98cd18a83142f071207fd03be7967e2e0520ebe6
9,063
py
Python
test/test_literal.py
hrnciar/rdflib
d507fdac93be2ec3e35882e3efaa5e7c7349fa93
[ "BSD-3-Clause" ]
null
null
null
test/test_literal.py
hrnciar/rdflib
d507fdac93be2ec3e35882e3efaa5e7c7349fa93
[ "BSD-3-Clause" ]
null
null
null
test/test_literal.py
hrnciar/rdflib
d507fdac93be2ec3e35882e3efaa5e7c7349fa93
[ "BSD-3-Clause" ]
null
null
null
import unittest import datetime import rdflib # needed for eval(repr(...)) below from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN from rdflib.namespace import XSD if __name__ == "__main__": unittest.main()
33.69145
98
0.580051
98cd74ddbd3fcee3b46641490c05dc6a010713cd
2,472
py
Python
src/messages/text/ruling.py
rkulyn/telegram-dutch-taxbot
f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1
[ "MIT" ]
2
2020-02-27T13:15:07.000Z
2020-09-19T15:19:29.000Z
src/messages/text/ruling.py
rkulyn/telegram-dutch-taxbot
f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1
[ "MIT" ]
null
null
null
src/messages/text/ruling.py
rkulyn/telegram-dutch-taxbot
f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1
[ "MIT" ]
null
null
null
import telegram from emoji import emojize from .base import TextMessageBase
44.142857
121
0.607605
98cecf3619ad0f5f809b91b86260d60284ee57d7
14,312
py
Python
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py
pyaf/severstal-steel-defect-detection
68a0df4164e84803b6cba78597a079d3736b4e00
[ "MIT" ]
null
null
null
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py
pyaf/severstal-steel-defect-detection
68a0df4164e84803b6cba78597a079d3736b4e00
[ "MIT" ]
null
null
null
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py
pyaf/severstal-steel-defect-detection
68a0df4164e84803b6cba78597a079d3736b4e00
[ "MIT" ]
null
null
null
import os os.environ['CUDA_VISIBLE_DEVICES']='0' from common import * from dataset import * from model import * #------------------------------------ # main ################################################################# if __name__ == '__main__': print( '%s: calling main function ... ' % os.path.basename(__file__)) run_train()
33.995249
180
0.523896
98d09bdc81e45d8b676af2c3e285dd5d038ee1da
1,283
py
Python
city_coord_download.py
Yuchen971/Chinese-city-level-geojson
51f8d3d336f3e335b15bbf37882a9f248f0e6461
[ "MIT" ]
null
null
null
city_coord_download.py
Yuchen971/Chinese-city-level-geojson
51f8d3d336f3e335b15bbf37882a9f248f0e6461
[ "MIT" ]
null
null
null
city_coord_download.py
Yuchen971/Chinese-city-level-geojson
51f8d3d336f3e335b15bbf37882a9f248f0e6461
[ "MIT" ]
null
null
null
import requests import os get_json('/Users/yuchenli/Downloads/city_geojson-master', 100000)
44.241379
87
0.626656
98d0ac63dc3b2801dd2a6ec85e229c55affc71b1
703
py
Python
myproject/core/clusterAnalysis.py
xiaoxiansheng19/data_analysis
9e05aada3f545472500e04225d8537b7f0f90a85
[ "MIT" ]
null
null
null
myproject/core/clusterAnalysis.py
xiaoxiansheng19/data_analysis
9e05aada3f545472500e04225d8537b7f0f90a85
[ "MIT" ]
null
null
null
myproject/core/clusterAnalysis.py
xiaoxiansheng19/data_analysis
9e05aada3f545472500e04225d8537b7f0f90a85
[ "MIT" ]
null
null
null
# from sklearn.cluster import DBSCAN,KMeans # # # def run(data,radius=300): # res={} # # epsilon=0.001, min_samples=200 # epsilon = radius / 100000 # # epsilon = 0.003 # min_samples = 100 # db = DBSCAN(eps=epsilon, min_samples=min_samples) # # eps # # min_samples,, # y_pred = db.fit_predict(data) # # print(y_pred) # # df_user_info['label'] = y_pred # n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # # if n_clusters_<1: # model = KMeans(n_clusters=1, random_state=0) # model.fit(data) # centroid = model.cluster_centers_ # res['point']=
33.47619
76
0.627312
98d0b391f82dbbbda80cf6f637cf8415548b806e
1,881
py
Python
verticapy/tests/vDataFrame/test_vDF_create.py
sitingren/VerticaPy
aa18f4f1277e264005de2d1a8646c28acd1ba137
[ "Apache-2.0" ]
null
null
null
verticapy/tests/vDataFrame/test_vDF_create.py
sitingren/VerticaPy
aa18f4f1277e264005de2d1a8646c28acd1ba137
[ "Apache-2.0" ]
null
null
null
verticapy/tests/vDataFrame/test_vDF_create.py
sitingren/VerticaPy
aa18f4f1277e264005de2d1a8646c28acd1ba137
[ "Apache-2.0" ]
null
null
null
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest, warnings from verticapy import vDataFrame, drop_table from verticapy import set_option set_option("print_info", False)
34.2
88
0.725678
98d33c72cdff1bb8b3302772a68873ef14217bfa
353
py
Python
Solutions/beta/beta_is_it_an_isogram.py
citrok25/Codewars-1
dc641c5079e2e8b5955eb027fd15427e5bdb2e26
[ "MIT" ]
46
2017-08-24T09:27:57.000Z
2022-02-25T02:24:33.000Z
Solutions/beta/beta_is_it_an_isogram.py
abbhishek971/Codewars
9e761811db724da1e8aae44594df42b4ee879a16
[ "MIT" ]
null
null
null
Solutions/beta/beta_is_it_an_isogram.py
abbhishek971/Codewars
9e761811db724da1e8aae44594df42b4ee879a16
[ "MIT" ]
35
2017-08-01T22:09:48.000Z
2022-02-18T17:21:37.000Z
import re from collections import Counter
32.090909
67
0.430595
98d39e717fc52a479b273f0813ba804a39854ac0
1,011
py
Python
p23_Merge_k_Sorted_Lists.py
bzhou26/leetcode_sol
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
[ "MIT" ]
null
null
null
p23_Merge_k_Sorted_Lists.py
bzhou26/leetcode_sol
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
[ "MIT" ]
null
null
null
p23_Merge_k_Sorted_Lists.py
bzhou26/leetcode_sol
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
[ "MIT" ]
null
null
null
''' - Leetcode problem: 23 - Difficulty: Hard - Brief problem description: Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity. Example: Input: [ 1->4->5, 1->3->4, 2->6 ] Output: 1->1->2->3->4->4->5->6 - Solution Summary: - Used Resources: --- Bo Zhou ''' # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next
20.22
98
0.565776
98d3b83719bd7419b5fbeccf9de154866fb50efc
117
py
Python
flocx_ui/content/flocx/views.py
whitel/flocx-ui
3887882f1a7a650850bda9d7627cf6ebcc6c32e8
[ "Apache-2.0" ]
null
null
null
flocx_ui/content/flocx/views.py
whitel/flocx-ui
3887882f1a7a650850bda9d7627cf6ebcc6c32e8
[ "Apache-2.0" ]
null
null
null
flocx_ui/content/flocx/views.py
whitel/flocx-ui
3887882f1a7a650850bda9d7627cf6ebcc6c32e8
[ "Apache-2.0" ]
null
null
null
from django.views import generic
29.25
44
0.811966
98d56156be74bebcd376e40f41b92a8ab49e898e
5,833
py
Python
wificontrol/utils/networkstranslate.py
patrislav1/pywificontrol
1edf9cdb95158804033dba8fcb860e5214ded10f
[ "BSD-3-Clause" ]
1
2019-02-12T14:08:08.000Z
2019-02-12T14:08:08.000Z
wificontrol/utils/networkstranslate.py
patrislav1/pywificontrol
1edf9cdb95158804033dba8fcb860e5214ded10f
[ "BSD-3-Clause" ]
null
null
null
wificontrol/utils/networkstranslate.py
patrislav1/pywificontrol
1edf9cdb95158804033dba8fcb860e5214ded10f
[ "BSD-3-Clause" ]
2
2018-12-05T15:55:22.000Z
2019-01-28T03:44:21.000Z
# Written by Ivan Sapozhkov and Denis Chagin <denis.chagin@emlid.com> # # Copyright (c) 2016, Emlid Limited # All rights reserved. # # Redistribution and use in source and binary forms, # with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, # OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. if __name__ == '__main__': network = {'ssid': "MySSID", 'password': "NewPassword", 'security': "wpaeap", 'identity': "alex@example.com"} conv = convert_to_wpas_network(network) reconv = convert_to_wificontrol_network(conv) print(conv, reconv)
36.006173
113
0.603806
98d57c6c79fbcfbe80f6e85abd3550ed59d42da1
22,613
py
Python
src/LaminariaCore.py
MrKelpy/IFXG
695865a8140fdf258a643ee29d6439a59037bc99
[ "MIT" ]
null
null
null
src/LaminariaCore.py
MrKelpy/IFXG
695865a8140fdf258a643ee29d6439a59037bc99
[ "MIT" ]
null
null
null
src/LaminariaCore.py
MrKelpy/IFXG
695865a8140fdf258a643ee29d6439a59037bc99
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ This module is distributed as part of the Laminaria Core (Python Version). Get the Source Code in GitHub: https://github.com/MrKelpy/LaminariaCore The LaminariaCore is Open Source and distributed under the MIT License """ # Built-in Imports import datetime import random import asyncio import os # Third Party Imports import screeninfo from discord.ext import commands import discord from fpdf import FPDF # Local Application Imports ############################################################################### ### DATE & TIME ### ############################################################################### def twochars(arg): """ Formats a string of two characters into the format of (0X), useful for date formatting. :param arg: The string :return: String """ if len(arg) == 1: return f"0{arg}" return arg def get_formatted_date(date: datetime, include_seconds: bool = False): """ Returns a given date in the handy DD/MM/YY - HH:MM:SS format. :param date: The date to be formatted -> datetime.datetime :param include_seconds: If set to True, include seconds in the format. :return: String """ date_string = f"{twochars(str(date.day))}/{twochars(str(date.month))}/{twochars(str(date.year))} - " \ f"{twochars(str(date.hour))}:{twochars(str(date.minute))}" if include_seconds: date_string += f":{twochars(str(date.second))}" return date_string def get_formatted_date_now(include_seconds: bool = False, formatting: int = 1): """ Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one. :param formatting: Format type -> int :param include_seconds: If set to True, include seconds in the format. :return: String """ now = datetime.datetime.now() if formatting == 1: date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \ f"{twochars(str(now.hour))}:{twochars(str(now.minute))}" elif formatting == 2: date_string = f"{twochars(str(now.day))}.{twochars(str(now.month))}.{twochars(str(now.year))}_" \ f"{twochars(str(now.hour))}.{twochars(str(now.minute))}" else: date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \ f"{twochars(str(now.hour))}:{twochars(str(now.minute))}" if include_seconds: date_string += f":{twochars(str(now.second))}" return date_string def time_until_midnight(): """ Get seconds left until midnight """ tomorrow = datetime.date.today() + datetime.timedelta(days=1) timedelta_until_midnight = datetime.datetime.combine(tomorrow, datetime.time.min) - datetime.datetime.now() return timedelta_until_midnight.seconds ############################################################################### ### GENERAL ### ############################################################################### def get_absolute_screen_coords(relx, rely): """ Returns absolute screen coordinates based off the given relative coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be x960, y360. :param relx: Relative X Coordinate :param rely: Relative Y Coordinate :return: Absolute Coordinates """ monitor = screeninfo.get_monitors()[0] x = (relx*monitor.width)/100 y = (rely*monitor.height)/100 return x, y def get_relative_screen_coords(x, y): """ Returns relative screen coordinates based off the given absolute coordinates. The relative coordinates are percentage-based values calculates relatively to the monitor specs and the given coords. :param x: Absolute X :param y: Absolute Y :return: """ monitor = screeninfo.get_monitors()[0] relx = (x*100)/monitor.width rely = (y*100)/monitor.height return relx, rely ############################################################################### ### PLACEHOLDERS ### ############################################################################### ############################################################################### ### DISCORD.PY ### ###############################################################################
35.388106
139
0.618007
98d580323bddffeab7acfa9058f08c58277e005a
1,280
py
Python
examples/api/default_value.py
clamdad/atom
45d8a2e696002914dd5b71c150edbe54e9ba1e59
[ "BSD-3-Clause-Clear" ]
222
2015-01-01T10:16:41.000Z
2022-03-15T21:28:08.000Z
examples/api/default_value.py
clamdad/atom
45d8a2e696002914dd5b71c150edbe54e9ba1e59
[ "BSD-3-Clause-Clear" ]
100
2015-01-28T16:26:04.000Z
2022-03-29T07:17:44.000Z
examples/api/default_value.py
clamdad/atom
45d8a2e696002914dd5b71c150edbe54e9ba1e59
[ "BSD-3-Clause-Clear" ]
45
2015-01-05T14:08:13.000Z
2022-01-31T14:17:20.000Z
# -------------------------------------------------------------------------------------- # Copyright (c) 2013-2021, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # -------------------------------------------------------------------------------------- """ Demonstrate all the ways to initialize a value 1. Pass the value directly 2. Assign the default value explicitly 3. Provide the value during initialization of the object 4. Provide factory callable that returns a value 5. Use a _default_* static method """ import sys from atom.api import Atom, Int, Str def get_last_name(): """Return a last name based on the system byteorder.""" return sys.byteorder.capitalize() if __name__ == "__main__": bob = Person(address="101 Main") print((bob.first_name, bob.last_name, bob.age)) print(bob.mother)
24.615385
88
0.604688
98d5c4c121e4fde76563c5c0ac59d5c2ef8f0cbc
23,139
py
Python
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py
mdavid/nuxleus
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
[ "BSD-3-Clause" ]
1
2017-03-28T06:41:51.000Z
2017-03-28T06:41:51.000Z
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py
mdavid/nuxleus
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
[ "BSD-3-Clause" ]
null
null
null
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py
mdavid/nuxleus
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
[ "BSD-3-Clause" ]
1
2016-12-13T21:08:58.000Z
2016-12-13T21:08:58.000Z
#!/usr/bin/env python # # Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1) # All Rights Reserved. # # You may only modify and redistribute this under the terms of any of the # following licenses(2): Mozilla Public License, V1.1, GNU General # Public License, V2.0, GNU Lesser General Public License, V2.1 # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://kamaelia.sourceforge.net/AUTHORS - please extend this file, # not this notice. # (2) Reproduced in the COPYING file, and at: # http://kamaelia.sourceforge.net/COPYING # Under section 3.5 of the MPL, we are using this text since we deem the MPL # notice inappropriate for this file. As per MPL/GPL/LGPL removal of this # notice is prohibited. # # Please contact us via: kamaelia-list-owner@lists.sourceforge.net # to discuss alternative licensing. # ------------------------------------------------------------------------- # """\ ============================================= Parsing and Creation of YUV4MPEG format files ============================================= YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends video fram data structures to its "outbox" outbox. FrameToYUV4MPEG does the reverse - taking frame data structures sent to its "inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox." The YUV4MPEG file format is supported by many tools, such as mjpegtools, mplayer/mencoder, and ffmpeg. Example Usage ------------- Playback a YUV4MPEG format file:: Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...), YUV4MPEGToFrame(), VideoOverlay() ).run() Decode a dirac encoded video file to a YUV4MPEG format file:: Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...), DiracDecoder(), FrameToYUV4MPEG(), SimpleFileWriter("output.yuv4mpeg") ).run() YUV4MPEGToFrame Behaviour ------------------------- Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox and frame data structures will be sent out of the "outbox" outbox as soon as they are parsed. See below for a description of the uncompressed frame data structure format. This component supports sending data out of its outbox to a size limited inbox. If the size limited inbox is full, this component will pause until it is able to send out the data. Data will not be consumed from the inbox if this component is waiting to send to the outbox. If a producerFinished message is received on the "control" inbox, this component will complete parsing any data pending in its inbox, and finish sending any resulting data to its outbox. It will then send the producerFinished message on out of its "signal" outbox and terminate. If a shutdownMicroprocess message is received on the "control" inbox, this component will immediately send it on out of its "signal" outbox and immediately terminate. It will not complete processing, or sending on any pending data. FrameToYUV4MPEG Behaviour ------------------------- Send frame data structures to the "inbox" inbox of this component. YUV4MPEG format binary string data will be sent out of the "outbox" outbox. See below for a description of the uncompressed frame data structure format. The header data for the YUV4MPEG file is determined from the first frame. All frames sent to this component must therefore be in the same pixel format and size, otherwise the output data will not be valid YUV4MPEG. This component supports sending data out of its outbox to a size limited inbox. If the size limited inbox is full, this component will pause until it is able to send out the data. Data will not be consumed from the inbox if this component is waiting to send to the outbox. If a producerFinished message is received on the "control" inbox, this component will complete parsing any data pending in its inbox, and finish sending any resulting data to its outbox. It will then send the producerFinished message on out of its "signal" outbox and terminate. If a shutdownMicroprocess message is received on the "control" inbox, this component will immediately send it on out of its "signal" outbox and immediately terminate. It will not complete processing, or sending on any pending data. ========================= UNCOMPRESSED FRAME FORMAT ========================= A frame is a dictionary data structure. It must, at minimum contain the first 3 ("yuv", "size" and "pixformat"):: { "yuv" : (y_data, u_data, v_data) # a tuple of strings "size" : (width, height) # in pixels "pixformat" : pixelformat # format of raw video data "frame_rate" : fps # frames per second "interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields "topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data "pixel_aspect" : fraction # aspect ratio of pixels "sequence_meta" : metadata # string containing extended metadata # (no whitespace or control characters) } All other fields are optional when providing frames to FrameToYUV4MPEG. YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields will be filled in if the relevant header data is detected in the file. The pixel formats recognised (and therefore supported) are:: "YUV420_planar" "YUV411_planar" "YUV422_planar" "YUV444_planar" "YUV4444_planar" "Y_planar" """ from Axon.Component import component #from Axon.Ipc import WaitComplete from Axon.Ipc import shutdownMicroprocess, producerFinished from Axon.AxonExceptions import noSpaceInBox import re from Kamaelia.Support.Data.Rationals import rational def parse_seq_tags(fields): """Parses YUV4MPEG header tags""" params = {} tags = {} while fields: m = re.match("^ (.)(\S*)(.*)$", fields) (tag,value,fields) = m.groups() tags[tag] = value if "W" in tags and "H" in tags: params['size'] = (int(tags["W"]), int(tags["H"])) else: raise if "C" in tags: C = tags["C"] if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default) params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) elif C == "420paldv": # 4:2:0 with PAL-DV siting params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) elif C == "411": # 4:1:1, cosited params['pixformat'] = "YUV411_planar" params['chroma_size'] = (params['size'][0]/4, params['size'][1]) elif C == "422": # 4:2:2, cosited params['pixformat'] = "YUV422_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]) elif C == "444": # 4:4:4 (no subsampling) params['pixformat'] = "YUV444_planar" params['chroma_size'] = (params['size'][0], params['size'][1]) elif C == "444alpha": # 4:4:4 with an alpha channel params['pixformat'] = "YUV4444_planar" params['chroma_size'] = (params['size'][0], params['size'][1]) elif C == "mono": # luma (Y') plane only params['pixformat'] = "Y_planar" params['chroma_size'] = (0,0) else: params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) if "I" in tags: I = tags["I"] if I == "?": # unknown (default) pass elif I == "p": # progressive/none params["interlaced"] = False elif I == "t": # top-field-first params["interlaced"] = True params["topfieldfirst"] = True elif I == "b": # bottom-field-first params["interlaced"] = True params["topfieldfirst"] = False elif I == "m": # mixed-mode: refer to 'I' tag in frame header pass if "F" in tags: m = re.match("^(\d+):(\d+)$",tags["F"]) num, denom = float(m.groups()[0]), float(m.groups()[1]) if denom > 0: params["frame_rate"] = num/denom if "A" in tags: m = re.match("^(\d+):(\d+)$",tags["A"]) num, denom = float(m.groups()[0]), float(m.groups()[1]) if denom > 0: params["pixel_aspect"] = num/denom if "X" in tags: params["sequence_meta"] = tags["X"] return params def parse_frame_tags(fields): """\ Parses YUV4MPEG frame tags. """ params = {} tags = {} while fields: m = re.match("^ (.)(\S*)(.*)$", fields) (tag,value,fields) = m.groups() tags[tag] = value if "I" in tags: x,y,z = tags["I"][0], tags["I"][1], tags["I"][2] if x == "t": # top-field-first params["interlaced"] = True params["topfieldfirst"] = True elif x == "T": # top-field-first and repeat params["interlaced"] = True params["topfieldfirst"] = True elif x == "b": # bottom-field-first params["interlaced"] = True params["topfieldfirst"] = False elif x == "B": # bottom-field-first and repeat params["interlaced"] = True params["topfieldfirst"] = False elif x == "1": # single progressive frame params["interlaced"] = False elif x == "2": # double progressive frame (repeat) params["interlaced"] = False elif x == "3": # triple progressive frame (repeat) params["interlaced"] = False if y == "p": # fields sampled at same time params["interlaced"] = False elif y == "i": # fields sampled at different times params["interlaced"] = True if z == "p": # progressive (subsampling over whole frame) pass elif z == "i": # interlaced (each field subsampled independently) pass elif z == "?": # unknown (allowed only for non-4:2:0 subsampling) pass if "X" in tags: params["meta"] = tags["X"] return params __kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, ) if __name__ == "__main__": from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.File.Reading import RateControlledFileReader from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)), YUV4MPEGToFrame(), FrameToYUV4MPEG(), YUV4MPEGToFrame(), VideoOverlay(), ).run()
35.543779
107
0.563075
98d7520f9994f6836e73faaf42f63009eee0dc64
697
py
Python
project/cli/event.py
DanielGrams/gsevp
e94034f7b64de76f38754b56455e83092378261f
[ "MIT" ]
1
2021-06-01T14:49:18.000Z
2021-06-01T14:49:18.000Z
project/cli/event.py
DanielGrams/gsevp
e94034f7b64de76f38754b56455e83092378261f
[ "MIT" ]
286
2020-12-04T14:13:00.000Z
2022-03-09T19:05:16.000Z
project/cli/event.py
DanielGrams/gsevpt
a92f71694388e227e65ed1b24446246ee688d00e
[ "MIT" ]
null
null
null
import click from flask.cli import AppGroup from project import app, db from project.dateutils import berlin_tz from project.services.event import ( get_recurring_events, update_event_dates_with_recurrence_rule, ) event_cli = AppGroup("event") app.cli.add_command(event_cli)
24.034483
72
0.746055
98d900684301053ffd4e6344e16abaa1c0d10ed9
3,647
py
Python
test/functional/examples/test_examples.py
ymn1k/testplan
b1bde8495c449d75a74a7fe4e7c6501b0476f833
[ "Apache-2.0" ]
null
null
null
test/functional/examples/test_examples.py
ymn1k/testplan
b1bde8495c449d75a74a7fe4e7c6501b0476f833
[ "Apache-2.0" ]
null
null
null
test/functional/examples/test_examples.py
ymn1k/testplan
b1bde8495c449d75a74a7fe4e7c6501b0476f833
[ "Apache-2.0" ]
1
2019-09-11T09:13:18.000Z
2019-09-11T09:13:18.000Z
import os import re import sys import subprocess import pytest from testplan.common.utils.path import change_directory import platform ON_WINDOWS = platform.system() == 'Windows' KNOWN_EXCEPTIONS = [ "TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example. "ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example. "ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example. "ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example. "ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example. "RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example. "No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example. "AttributeError: 'module' object has no attribute 'poll'", "RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example. ] SKIP_ON_WINDOWS = [ os.path.join('Cpp', 'GTest', 'test_plan.py'), ] ROOT_DIR_CONTENTS = [ "setup.py", "requirements.txt", "README.rst", "LICENSE.md" ]
34.733333
166
0.636413
98db431598c035c6864fd313e00c493666f532f6
1,223
py
Python
peco/template/template.py
Tikubonn/peco
c77fc163ad31d3c271d299747914ce4ef3386987
[ "MIT" ]
null
null
null
peco/template/template.py
Tikubonn/peco
c77fc163ad31d3c271d299747914ce4ef3386987
[ "MIT" ]
null
null
null
peco/template/template.py
Tikubonn/peco
c77fc163ad31d3c271d299747914ce4ef3386987
[ "MIT" ]
null
null
null
from io import StringIO
23.519231
66
0.555192
98dc08bcdfcddaf7d2d055024948658ae151bf17
2,342
py
Python
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py
ministryofjustice/mtp-api
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
[ "MIT" ]
5
2016-01-05T12:21:35.000Z
2020-10-28T17:06:02.000Z
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py
ministryofjustice/mtp-api
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
[ "MIT" ]
209
2015-06-12T09:39:41.000Z
2022-03-21T16:01:19.000Z
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py
ministryofjustice/mtp-api
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
[ "MIT" ]
1
2021-04-11T06:19:23.000Z
2021-04-11T06:19:23.000Z
from core import getattr_path from rest_framework import status from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase
36.59375
105
0.634927
98dc59660d9259931f06beb23b9db7e987e199a4
3,800
py
Python
vipermonkey/core/filetype.py
lap1nou/ViperMonkey
631d242f43108226bb25ed91e773a274012dc8c2
[ "Unlicense" ]
874
2016-09-29T08:19:00.000Z
2022-03-28T03:34:16.000Z
vipermonkey/core/filetype.py
Mercury-180/ViperMonkey
1045dadcf7bebedc126ca36d25475e413196d053
[ "Unlicense" ]
94
2016-09-30T17:03:36.000Z
2022-03-01T17:25:26.000Z
vipermonkey/core/filetype.py
Mercury-180/ViperMonkey
1045dadcf7bebedc126ca36d25475e413196d053
[ "Unlicense" ]
186
2016-09-29T10:59:37.000Z
2022-03-26T10:20:38.000Z
""" Check for Office file types ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft VBA macros (Visual Basic for Applications), mainly for malware analysis. Author: Philippe Lagadec - http://www.decalage.info License: BSD, see source code or documentation Project Repository: https://github.com/decalage2/ViperMonkey """ # === LICENSE ================================================================== # ViperMonkey is copyright (c) 2015-2016 Philippe Lagadec (http://www.decalage.info) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Office magic numbers. magic_nums = { "office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97 "office2007" : "50 4B 3 4", # Office 2007+ (PKZip) } # PE magic number. pe_magic_num = "4D 5A" def is_pe_file(fname, is_data): """ Check to see if the given file is a PE executable. return - True if it is a PE file, False if not. """ # Read the 1st 8 bytes of the file. curr_magic = get_1st_8_bytes(fname, is_data) # See if we the known magic #. return (curr_magic.startswith(pe_magic_num)) def is_office_file(fname, is_data): """ Check to see if the given file is a MS Office file format. return - True if it is an Office file, False if not. """ # Read the 1st 8 bytes of the file. curr_magic = get_1st_8_bytes(fname, is_data) # See if we have 1 of the known magic #s. for typ in magic_nums.keys(): magic = magic_nums[typ] if (curr_magic.startswith(magic)): return True return False
33.043478
84
0.678158
98dec69515aeffc54b77de9f6161248b53aa1b30
2,699
py
Python
packs/kubernetes/tests/test_third_party_resource.py
userlocalhost2000/st2contrib
1a5f759e76401743ed9023d298a3d767e3885db1
[ "Apache-2.0" ]
164
2015-01-17T16:08:33.000Z
2021-08-03T02:34:07.000Z
packs/kubernetes/tests/test_third_party_resource.py
userlocalhost2000/st2contrib
1a5f759e76401743ed9023d298a3d767e3885db1
[ "Apache-2.0" ]
442
2015-01-01T11:19:01.000Z
2017-09-06T23:26:17.000Z
packs/kubernetes/tests/test_third_party_resource.py
userlocalhost2000/st2contrib
1a5f759e76401743ed9023d298a3d767e3885db1
[ "Apache-2.0" ]
202
2015-01-13T00:37:40.000Z
2020-11-07T11:30:10.000Z
from st2tests.base import BaseSensorTestCase from third_party_resource import ThirdPartyResource
40.283582
90
0.567247
98df6d63c240e8262eac8f0396a8b8f0ecd76ac8
10,728
py
Python
PrometheusScrapper/scrapper.py
masterchef/webscraper
f47220e941980e2a6dda593d74696062784062e1
[ "MIT" ]
null
null
null
PrometheusScrapper/scrapper.py
masterchef/webscraper
f47220e941980e2a6dda593d74696062784062e1
[ "MIT" ]
null
null
null
PrometheusScrapper/scrapper.py
masterchef/webscraper
f47220e941980e2a6dda593d74696062784062e1
[ "MIT" ]
null
null
null
import datetime import getpass import logging import os import pathlib import platform import re import smtplib import sys from contextlib import contextmanager from email.message import EmailMessage from functools import wraps import azure.functions as func import click import gspread import pandas as pd from apscheduler.schedulers.background import BlockingScheduler from oauth2client.service_account import ServiceAccountCredentials from selenium import webdriver from selenium.common.exceptions import TimeoutException from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) log.addHandler(handler) def run(email, username, email_to, password, gsheet, doc_key): log.info('In run') content = [] for link in os.environ["searchLinks"].split(): content += get_prometheus_apartments(link) formatted_content = format_email(content) if gsheet: log.info('Updating gsheet') update_historical_data(doc_key, content) formatted_content += f'For historical data click the link below:\nhttps://docs.google.com/spreadsheets/d/1XZocxmyQ91e1exBvwDAaSR8Rhavy9WPnwLSz0Z5SKsM/edit?usp=sharing' if email: log.info('Sending email') send_email(username, password, email_to, formatted_content) log.info(content) if __name__ == '__main__': cli()
34.495177
175
0.646905
98e0601566ba652e64eedad746be214634e5e438
17,357
py
Python
MrWorldwide.py
AnonymousHacker1279/MrWorldwide
a782194e1ebe3a1cd73409e3d4dc9946700bcc0e
[ "MIT" ]
null
null
null
MrWorldwide.py
AnonymousHacker1279/MrWorldwide
a782194e1ebe3a1cd73409e3d4dc9946700bcc0e
[ "MIT" ]
null
null
null
MrWorldwide.py
AnonymousHacker1279/MrWorldwide
a782194e1ebe3a1cd73409e3d4dc9946700bcc0e
[ "MIT" ]
null
null
null
from PyQt6.QtWidgets import QApplication, QWidget, QFileDialog import PyQt6.QtCore as QtCore import PyQt6.QtGui as QtGui import sys, time, json, requests, traceback, configparser, os import MrWorldwideUI, ConfigurationUI, UpdateManagerUI version = "v1.0.0" def readConfigurationFile(config): try: configFile = open("config.ini") configFile.close() return config.read("config.ini") except: config['general'] = {} config['general']['libretranslate_mirror'] = 'https://translate.astian.org/translate' config['defaults'] = {} config['defaults']['default_source_language'] = LangTypes.ENGLISH config['defaults']['default_target_language'] = LangTypes.SPANISH with open('config.ini', 'w') as configFile: config.write(configFile) configFile.close() return config def main(): global app app = QApplication(sys.argv) app.setQuitOnLastWindowClosed(False) app.setStyle("Fusion") form = MrWorldwide() form.show() app.exec() if __name__ == '__main__': main()
37.008529
399
0.75157
98e15c2d42b427bf4ffb23842980cd80d4cd57bf
7,429
py
Python
tools/az_cli.py
google/cloud-forensics-utls
719093b4a229e5e97c30d93faabb1ccf3b6ee422
[ "Apache-2.0" ]
null
null
null
tools/az_cli.py
google/cloud-forensics-utls
719093b4a229e5e97c30d93faabb1ccf3b6ee422
[ "Apache-2.0" ]
null
null
null
tools/az_cli.py
google/cloud-forensics-utls
719093b4a229e5e97c30d93faabb1ccf3b6ee422
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Demo CLI tool for Azure.""" import os from datetime import datetime from typing import TYPE_CHECKING from Crypto.PublicKey import RSA from libcloudforensics import logging_utils from libcloudforensics.providers.azure.internal import account from libcloudforensics.providers.azure.internal import monitoring from libcloudforensics.providers.azure import forensics logging_utils.SetUpLogger(__name__) logger = logging_utils.GetLogger(__name__) if TYPE_CHECKING: import argparse def ListInstances(args: 'argparse.Namespace') -> None: """List instances in Azure subscription. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ az_account = account.AZAccount(args.default_resource_group_name) instances = az_account.compute.ListInstances( resource_group_name=args.resource_group_name) logger.info('Instances found:') for instance in instances.values(): boot_disk = instance.GetBootDisk() logger.info( 'Name: {0:s}, Boot disk: {1:s}'.format(instance.name, boot_disk.name)) def ListDisks(args: 'argparse.Namespace') -> None: """List disks in Azure subscription. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ az_account = account.AZAccount(args.default_resource_group_name) disks = az_account.compute.ListDisks( resource_group_name=args.resource_group_name) logger.info('Disks found:') for disk_name, disk in disks.items(): logger.info('Name: {0:s}, Region: {1:s}'.format(disk_name, disk.region)) def CreateDiskCopy(args: 'argparse.Namespace') -> None: """Create an Azure disk copy. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ logger.info('Starting disk copy...') disk_copy = forensics.CreateDiskCopy(args.default_resource_group_name, instance_name=args.instance_name, disk_name=args.disk_name, disk_type=args.disk_type, region=args.region, src_profile=args.src_profile, dst_profile=args.dst_profile) logger.info( 'Done! Disk {0:s} successfully created. You will find it in ' 'your Azure subscription under the name {1:s}.'.format( disk_copy.resource_id, disk_copy.name)) def StartAnalysisVm(args: 'argparse.Namespace') -> None: """Start forensic analysis VM. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ attach_disks = [] if args.attach_disks: attach_disks = args.attach_disks.split(',') # Check if attach_disks parameter exists and if there # are any empty entries. if not (attach_disks and all(elements for elements in attach_disks)): logger.error('error: parameter --attach_disks: {0:s}'.format( args.attach_disks)) return ssh_public_key = args.ssh_public_key if not ssh_public_key: # According to https://docs.microsoft.com/cs-cz/samples/azure-samples/ # resource-manager-python-template-deployment/resource-manager-python- # template-deployment/ there's no API to generate a new SSH key pair in # Azure, so we do this manually... ssh_public_key = _GenerateSSHKeyPair(args.instance_name) logger.info('Starting analysis VM...') vm = forensics.StartAnalysisVm(args.default_resource_group_name, args.instance_name, int(args.disk_size), ssh_public_key, cpu_cores=int(args.cpu_cores), memory_in_mb=int(args.memory_in_mb), region=args.region, attach_disks=attach_disks, dst_profile=args.dst_profile) logger.info('Analysis VM started.') logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1]))) def _GenerateSSHKeyPair(vm_name: str) -> str: """Generate a SSH key pair and returns its public key. Both public and private keys will be saved in the current directory. Args: vm_name (str): The VM name for which to generate the key pair. Returns: str: The public key for the generated SSH key pair. Raises: ValueError: If vm_name is None. """ if not vm_name: raise ValueError('Parameter vm_name must not be None.') logger.info('Generating a new SSH key pair for VM: {0:s}'.format(vm_name)) key = RSA.generate(2048) key_name = '{0:s}-ssh'.format(vm_name) public_key = key.publickey().exportKey('OpenSSH') path_public_key = os.path.join(os.getcwd(), key_name + '.pub') private_key = key.exportKey('PEM') path_private_key = os.path.join(os.getcwd(), key_name + '.pem') with open(path_private_key, 'wb') as f: f.write(private_key) with open(path_public_key, 'wb') as f: f.write(public_key) logger.info('SSH key pair generated. Public key saved in {0:s}, private key ' 'saved in {1:s}'.format(path_public_key, path_private_key)) return public_key.decode('utf-8') def ListMetrics(args: 'argparse.Namespace') -> None: """List Azure Monitoring metrics for a resource. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ az_account = account.AZAccount(args.default_resource_group_name) az_monitoring = monitoring.AZMonitoring(az_account) metrics = az_monitoring.ListAvailableMetricsForResource(args.resource_id) for metric in metrics: logger.info('Available metric: {0:s}'.format(metric)) def QueryMetrics(args: 'argparse.Namespace') -> None: """Query Azure Monitoring metrics for a resource. Args: args (argparse.Namespace): Arguments from ArgumentParser. Raises: RuntimeError: If from_date or to_date could not be parsed. """ az_account = account.AZAccount(args.default_resource_group_name) az_monitoring = monitoring.AZMonitoring(az_account) from_date, to_date = args.from_date, args.to_date if from_date and to_date: try: from_date = datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%SZ') to_date = datetime.strptime(to_date, '%Y-%m-%dT%H:%M:%SZ') except ValueError as exception: raise RuntimeError( 'Cannot parse date: {0!s}'.format(exception)) from exception metrics = az_monitoring.GetMetricsForResource( args.resource_id, metrics=args.metrics, from_date=from_date, to_date=to_date, interval=args.interval, aggregation=args.aggregation or 'Total', qfilter=args.qfilter) for metric, metric_value in metrics.items(): logger.info('Metric: {0:s}'.format(metric)) for timestamp, value in metric_value.items(): logger.info(' Timestamp: {0:s}, value: {1:s}'.format(timestamp, value))
35.208531
79
0.679768
98e581895367116db85fb5bcc24f1ed7b42ed751
2,181
py
Python
bbio/bbio.py
timgates42/PyBBIO
0d46115059ed7ec0c17afb6dd7ed2f507b4f2b8a
[ "MIT" ]
102
2015-01-29T04:28:49.000Z
2022-01-03T18:27:50.000Z
bbio/bbio.py
timgates42/PyBBIO
0d46115059ed7ec0c17afb6dd7ed2f507b4f2b8a
[ "MIT" ]
62
2015-01-29T11:05:13.000Z
2019-12-03T04:30:34.000Z
bbio/bbio.py
timgates42/PyBBIO
0d46115059ed7ec0c17afb6dd7ed2f507b4f2b8a
[ "MIT" ]
58
2015-02-10T14:31:18.000Z
2022-03-29T13:24:03.000Z
""" PyBBIO - bbio.py Copyright (c) 2012-2015 - Alexander Hiam <alex@graycat.io> Released under the MIT license https://github.com/graycatlabs/PyBBIO """ import sys, atexit from .platform import platform_init, platform_cleanup from .common import ADDITIONAL_CLEANUP, util_init def bbio_init(): """ Pre-run initialization, i.e. starting module clocks, etc. """ util_init() platform_init() def bbio_cleanup(): """ Post-run cleanup, i.e. stopping module clocks, etc. """ # Run user cleanup routines: for cleanup in ADDITIONAL_CLEANUP: try: cleanup() except Exception as e: # Something went wrong with one of the cleanup routines, but we # want to keep going; just print the error and continue print "*Exception raised trying to call cleanup routine '%s':\n %s" %\ (cleanup, e) platform_cleanup() # The following code detects if Python is running interactively, # and if so initializes PyBBIO on import and registers PyBBIO's # cleanup to be called at exit, otherwise it defines the run() and # stop() methods for the file based control flow: import __main__ if not hasattr(__main__, '__file__'): # We're in the interpreter, see: # http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode bbio_init() print "PyBBIO initialized" atexit.register(interactive_cleanup) else: bbio_init() atexit.register(bbio_cleanup) # Imported in a Python file, define run() and stop(): def run(setup, loop): """ The main loop; must be passed a setup and a loop function. First the setup function will be called once, then the loop function wil be called continuously until a stop signal is raised, e.g. CTRL-C or a call to the stop() function from within the loop. """ try: setup() while (True): loop() except KeyboardInterrupt: # Manual exit signal, clean up and exit happy exit(0) def stop(): """ Preferred way for a program to stop itself. """ raise KeyboardInterrupt # Expected happy stop condition in run()
32.073529
85
0.692343
98e5e44eba98b059fc30bc12fb7cf43b26e82f78
365
py
Python
app/models/endeavors.py
theLaborInVain/kdm-manager-api
fa8744c9b8a739262d1b94900648254cc69d16e1
[ "MIT" ]
2
2020-03-04T13:43:45.000Z
2020-11-03T20:34:21.000Z
app/models/endeavors.py
theLaborInVain/kdm-manager-api
fa8744c9b8a739262d1b94900648254cc69d16e1
[ "MIT" ]
64
2019-07-19T19:19:50.000Z
2022-03-03T21:19:28.000Z
app/models/endeavors.py
theLaborInVain/kdm-manager-api
fa8744c9b8a739262d1b94900648254cc69d16e1
[ "MIT" ]
null
null
null
""" The Endeavors asset collection has a number of irregular assets. Be careful writing any custom code here. """ from app.assets import endeavors from app import models
20.277778
79
0.706849
98e710a1b1cb3e42d4cbdb66250958e21888c440
804
py
Python
interface/inter5.py
CeciliaDornelas/Python
883959ed2e10cd8e8ace2b640e1944edc0c1d8a3
[ "MIT" ]
null
null
null
interface/inter5.py
CeciliaDornelas/Python
883959ed2e10cd8e8ace2b640e1944edc0c1d8a3
[ "MIT" ]
null
null
null
interface/inter5.py
CeciliaDornelas/Python
883959ed2e10cd8e8ace2b640e1944edc0c1d8a3
[ "MIT" ]
null
null
null
import sys from PyQt5 import QtCore, QtWidgets from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget from PyQt5.QtCore import QSize if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) mainWin = HelloWindow() mainWin.show() sys.exit( app.exec_() )
26.8
69
0.691542
98e753afbcdb25feef4bb770897b167108c721b5
1,523
py
Python
setup.py
notwa/scipybiteopt
62e1510789b680483ad867984849af215a9848c5
[ "MIT" ]
null
null
null
setup.py
notwa/scipybiteopt
62e1510789b680483ad867984849af215a9848c5
[ "MIT" ]
null
null
null
setup.py
notwa/scipybiteopt
62e1510789b680483ad867984849af215a9848c5
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import sys import numpy from setuptools import setup, Extension #include markdown description in pip page this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() # https://github.com/pypa/packaging-problems/issues/84 # no sensible way to include header files by default headers = ['scipybiteopt/biteopt.h', 'scipybiteopt/biteoptort.h', 'scipybiteopt/spheropt.h', 'scipybiteopt/biteaux.h', 'scipybiteopt/nmsopt.h'] module1 = Extension('scipybiteopt.biteopt', sources=get_c_sources(['scipybiteopt/biteopt_py_ext.cpp'], include_headers=(sys.argv[1] == "sdist")), language="c++", include_dirs=[numpy.get_include()], extra_compile_args=['-std=c++11', '-O3'] if os.name != 'nt' else ['-O3']) setup(name='scipybiteopt', version='1.1.1', description="Scipy style wrapper for Aleksey Vaneev's BiteOpt", author='dschmitz89', author_email='danielschmitzsiegen@gmail.com', license='MIT', long_description=long_description, long_description_content_type='text/markdown', url = 'https://github.com/dschmitz89/scipybiteopt', packages = ['scipybiteopt'], ext_modules = [module1], install_requires=[ 'numpy'] )
35.418605
119
0.670387
98e97be18c63f8ef9e8f59a9c1da5ea5229f6454
2,619
py
Python
qiskit_experiments/data_processing/__init__.py
yoshida-ryuhei/qiskit-experiments
82561acf86b407dcda0a9ec69fe18de2b0a592a2
[ "Apache-2.0" ]
null
null
null
qiskit_experiments/data_processing/__init__.py
yoshida-ryuhei/qiskit-experiments
82561acf86b407dcda0a9ec69fe18de2b0a592a2
[ "Apache-2.0" ]
null
null
null
qiskit_experiments/data_processing/__init__.py
yoshida-ryuhei/qiskit-experiments
82561acf86b407dcda0a9ec69fe18de2b0a592a2
[ "Apache-2.0" ]
null
null
null
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ =========================================================== Data Processing (:mod:`qiskit_experiments.data_processing`) =========================================================== .. currentmodule:: qiskit_experiments.data_processing Data processing is the act of taking the data returned by the backend and converting it into a format that can be analyzed. It is implemented as a chain of data processing steps that transform various input data, e.g. IQ data, into a desired format, e.g. population, which can be analyzed. These data transformations may consist of multiple steps, such as kerneling and discrimination. Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction` also called a `node`. The data processor implements the :meth:`__call__` method. Once initialized, it can thus be used as a standard python function: .. code-block:: python processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...]) out_data = processor(in_data) The data input to the processor is a sequence of dictionaries each representing the result of a single circuit. The output of the processor is a numpy array whose shape and data type depend on the combination of the nodes in the data processor. Uncertainties that arise from quantum measurements or finite sampling can be taken into account in the nodes: a standard error can be generated in a node and can be propagated through the subsequent nodes in the data processor. Correlation between computed values is also considered. Classes ======= .. autosummary:: :toctree: ../stubs/ DataProcessor DataAction TrainableDataAction Data Processing Nodes ===================== .. autosummary:: :toctree: ../stubs/ Probability MarginalizeCounts ToImag ToReal SVD AverageData BasisExpectationValue MinMaxNormalize """ from .data_action import DataAction, TrainableDataAction from .nodes import ( Probability, MarginalizeCounts, ToImag, ToReal, SVD, AverageData, BasisExpectationValue, MinMaxNormalize, ) from .data_processor import DataProcessor
31.178571
97
0.71974
98e9db17617d3ce2f8dbdda50ebfbe93ce11f25b
10,064
py
Python
models/pointnet2_sem_seg_msg_haptic.py
yufeiwang63/Pointnet_Pointnet2_pytorch
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
[ "MIT" ]
null
null
null
models/pointnet2_sem_seg_msg_haptic.py
yufeiwang63/Pointnet_Pointnet2_pytorch
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
[ "MIT" ]
null
null
null
models/pointnet2_sem_seg_msg_haptic.py
yufeiwang63/Pointnet_Pointnet2_pytorch
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
[ "MIT" ]
null
null
null
import torch.nn as nn import torch.nn.functional as F from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation if __name__ == '__main__': import torch model = get_model(13) xyz = torch.rand(6, 9, 2048) (model(xyz))
54.695652
207
0.64527
98eaf0ff524a7491427b7b19f617c3c6aaefc6a4
100
py
Python
backend/src/notifications/admin.py
YujithIsura/request-management
3c683274881ef7798779e03a24042034edcd941c
[ "MIT" ]
3
2021-11-21T20:46:00.000Z
2021-12-02T14:47:18.000Z
notification/admin.py
lautarianoo/django_social_network
ec83af7267f830a2463cb591138dae1a088f9a4e
[ "BSD-3-Clause" ]
169
2020-04-09T08:39:25.000Z
2021-09-03T01:07:01.000Z
notification/admin.py
lautarianoo/django_social_network
ec83af7267f830a2463cb591138dae1a088f9a4e
[ "BSD-3-Clause" ]
13
2020-04-05T20:53:11.000Z
2022-02-28T14:52:17.000Z
from django.contrib import admin from .models import Notification admin.site.register(Notification)
25
33
0.85
98eb89e6efe4554abbe1506f10c8ccfbcb3dedf8
2,234
py
Python
HoverSlam.py
GiantWaffleCode/WafflePython
d3e85ce6d9c792e7338eb825307f7bb48113742a
[ "MIT" ]
13
2020-10-13T00:19:21.000Z
2020-12-31T02:38:58.000Z
HoverSlam.py
GiantWaffleCode/WafflePython
d3e85ce6d9c792e7338eb825307f7bb48113742a
[ "MIT" ]
null
null
null
HoverSlam.py
GiantWaffleCode/WafflePython
d3e85ce6d9c792e7338eb825307f7bb48113742a
[ "MIT" ]
10
2020-10-13T00:19:52.000Z
2020-12-31T02:39:42.000Z
import krpc import time import math from simple_pid import PID conn = krpc.connect(name="UI Test") vessel = conn.space_center.active_vessel kerbin_frame = vessel.orbit.body.reference_frame orb_frame = vessel.orbital_reference_frame srf_frame = vessel.surface_reference_frame surface_gravity = vessel.orbit.body.surface_gravity current_met = conn.add_stream(getattr, vessel, 'met') current_roll = conn.add_stream(getattr, vessel.flight(), 'roll') current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch') current_heading = conn.add_stream(getattr, vessel.flight(), 'heading') current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude') lowest = conn.add_stream(vessel.bounding_box, srf_frame) current_drag = conn.add_stream(getattr, vessel.flight(), 'drag') current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force') current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed') vessel.control.activate_next_stage() vessel.control.sas = True time.sleep(.2) vessel.control.sas_mode = conn.space_center.SASMode.retrograde for engine in vessel.parts.engines: engine.gimbal_locked = True while True: aero_amp = math.sqrt(current_aero()[0] ** 2 + current_aero()[1] ** 2 + current_aero()[2] ** 2) time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass) + vessel.orbit.body.surface_gravity) if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed(): print(current_speed()) print(f"Start Hover Slam Burn") vessel.control.throttle = .9 break while current_speed() > 50: print(current_speed()) time.sleep(.01) pass print(f"Switch to Stab") for leg in vessel.parts.legs: leg.deployed = True pid1 = PID(.15, 0, .5, setpoint=0) pid1.output_limits = (0, 1) pid1.sample_time = 0.01 while bottom_altitude() > 1: vessel.control.throttle = pid1(bottom_altitude()) # pid1.setpoint *= .98 time.sleep(.01) vessel.control.sas_mode = conn.space_center.SASMode.radial vessel.control.throttle = 0
33.848485
91
0.705461
98ee2fa044a20258e55e590fef0af310684f4e34
433
py
Python
tests/unit_tests/cx_core/integration/integration_test.py
clach04/controllerx
b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe
[ "MIT" ]
204
2020-01-18T10:12:13.000Z
2022-03-27T09:40:17.000Z
tests/unit_tests/cx_core/integration/integration_test.py
clach04/controllerx
b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe
[ "MIT" ]
329
2020-01-17T17:18:53.000Z
2022-03-29T11:20:30.000Z
tests/unit_tests/cx_core/integration/integration_test.py
clach04/controllerx
b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe
[ "MIT" ]
66
2020-01-19T20:17:21.000Z
2022-03-13T15:03:41.000Z
from cx_core import integration as integration_module from cx_core.controller import Controller
27.0625
75
0.678984
98ee487f9a2345f91b85bcae94f9855580455dc1
478
py
Python
asystem-adoc/src/main/template/python/script_util.py
ggear/asystem_archive
b97f67218e8aa60991fba386c9e73d27d20d6c47
[ "Apache-2.0" ]
null
null
null
asystem-adoc/src/main/template/python/script_util.py
ggear/asystem_archive
b97f67218e8aa60991fba386c9e73d27d20d6c47
[ "Apache-2.0" ]
2
2021-03-25T21:27:09.000Z
2022-02-11T03:38:48.000Z
asystem-adoc/src/main/template/python/script_util.py
ggear/asystem_archive
b97f67218e8aa60991fba386c9e73d27d20d6c47
[ "Apache-2.0" ]
null
null
null
############################################################################### # # Python script utilities as included from the cloudera-framework-assembly, # do not edit directly # ############################################################################### import os import re
29.875
79
0.433054
98ee7596428318903272a404f3751220eec8a490
11,760
py
Python
datapackage_pipelines/web/server.py
gperonato/datapackage-pipelines
72b98918db1c19590586a3a85c5b087227cbbc3b
[ "MIT" ]
109
2016-09-01T08:41:55.000Z
2021-11-10T10:08:35.000Z
datapackage_pipelines/web/server.py
gperonato/datapackage-pipelines
72b98918db1c19590586a3a85c5b087227cbbc3b
[ "MIT" ]
144
2016-08-30T16:26:50.000Z
2021-04-18T09:06:12.000Z
datapackage_pipelines/web/server.py
gperonato/datapackage-pipelines
72b98918db1c19590586a3a85c5b087227cbbc3b
[ "MIT" ]
34
2016-09-05T12:46:53.000Z
2022-03-05T01:53:49.000Z
import datetime import os from io import BytesIO import logging from functools import wraps from copy import deepcopy from collections import Counter import slugify import yaml import mistune import requests from flask import \ Blueprint, Flask, render_template, abort, send_file, make_response from flask_cors import CORS from flask_jsonpify import jsonify from flask_basicauth import BasicAuth from datapackage_pipelines.status import status_mgr from datapackage_pipelines.utilities.stat_utils import user_facing_stats YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper markdown = mistune.Markdown(hard_wrap=True) status = status_mgr() def basic_auth_required(view_func): """ A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. """ return wrapper blueprint = Blueprint('dpp', 'dpp') def _make_badge_response(subject, text, colour): image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format( subject, text, colour) r = requests.get(image_url) buffer_image = BytesIO(r.content) buffer_image.seek(0) res = make_response(send_file(buffer_image, mimetype='image/svg+xml')) res.headers['Cache-Control'] = \ 'max-age=0, no-cache, no-store, must-revalidate' res.headers['Expires'] = '0' return res app = Flask(__name__) app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \ and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False): app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME'] app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD'] app.config['BASIC_AUTH_ACTIVE'] = True basic_auth = BasicAuth(app) CORS(app) url_prefix = os.environ.get('DPP_BASE_PATH', '/') if not url_prefix.endswith('/'): url_prefix += '/' logging.info('Serving on path %s', url_prefix) app.register_blueprint(blueprint, url_prefix=url_prefix)
33.696275
79
0.631463
98eec9960afb05f934f3e80b57d22d6b3147c3f1
1,425
py
Python
MoveSim/code/models/losses.py
tobinsouth/privacy-preserving-synthetic-mobility-data
fd4d1851b47e3e7304761a894b460e8345fae5db
[ "MIT" ]
null
null
null
MoveSim/code/models/losses.py
tobinsouth/privacy-preserving-synthetic-mobility-data
fd4d1851b47e3e7304761a894b460e8345fae5db
[ "MIT" ]
null
null
null
MoveSim/code/models/losses.py
tobinsouth/privacy-preserving-synthetic-mobility-data
fd4d1851b47e3e7304761a894b460e8345fae5db
[ "MIT" ]
null
null
null
# coding: utf-8 import numpy as np import torch.nn as nn
27.403846
67
0.523509
98ef6a5aa62915725ae521746cef94f51adfcf47
1,316
py
Python
board/game.py
petthauk/chess_ml
2a66ca8511fd4eef71607a7f56417d039d94dbf9
[ "MIT" ]
null
null
null
board/game.py
petthauk/chess_ml
2a66ca8511fd4eef71607a7f56417d039d94dbf9
[ "MIT" ]
null
null
null
board/game.py
petthauk/chess_ml
2a66ca8511fd4eef71607a7f56417d039d94dbf9
[ "MIT" ]
null
null
null
import pygame as pg from pygame.locals import * import sys import board.chess_board as board w = 60 * 8 h = 60 * 8 if __name__ == "__main__": # Launch main-function if running this script game = Game() game.run()
24.830189
73
0.458207
98efb4404db7ca8bc8ddf99fbe40494ec2e70aa1
2,515
py
Python
pix2pix/Dataset_util.py
Atharva-Phatak/Season-Tranfer
d6a0d4d42e396677920ffb81ab0086b0aa05d3c3
[ "MIT" ]
2
2019-07-02T14:00:15.000Z
2019-07-11T15:50:41.000Z
pix2pix/Dataset_util.py
Atharva-Phatak/Season-Tranfer
d6a0d4d42e396677920ffb81ab0086b0aa05d3c3
[ "MIT" ]
null
null
null
pix2pix/Dataset_util.py
Atharva-Phatak/Season-Tranfer
d6a0d4d42e396677920ffb81ab0086b0aa05d3c3
[ "MIT" ]
null
null
null
#importing libraries import torch import torch.utils.data as data import os import random from PIL import Image
36.985294
150
0.553082
98efd5c91e56c42872a45ff29528b847156d1400
20,126
py
Python
crslab/system/C2CRS_System.py
Zyh716/WSDM2022-C2CRS
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
[ "MIT" ]
4
2022-03-24T02:14:50.000Z
2022-03-30T02:28:19.000Z
crslab/system/C2CRS_System.py
RUCAIBox/WSDM2022-C2CRS
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
[ "MIT" ]
null
null
null
crslab/system/C2CRS_System.py
RUCAIBox/WSDM2022-C2CRS
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
[ "MIT" ]
2
2022-03-23T02:24:24.000Z
2022-03-28T12:45:43.000Z
# @Time : 2022/1/1 # @Author : Yuanhang Zhou # @email : sdzyh002@gmail.com import os from math import floor import torch from loguru import logger from typing import List, Dict from copy import copy, deepcopy import pickle import os import numpy import ipdb from crslab.config import PRETRAIN_PATH, SAVE_PATH from crslab.data import get_dataloader, dataset_language_map from crslab.evaluator.metrics.base import AverageMetric from crslab.evaluator.metrics.gen import PPLMetric from crslab.system.base import BaseSystem from crslab.system.utils.functions import ind2txt, ind2txt2 import random from tqdm import tqdm
42.549683
154
0.622131
98efe77eec76324cc9234c09e5f48bc8417b9d98
1,198
py
Python
morepath/__init__.py
hugovk/morepath
5596f9ce43ee4e5cd73eaa2ab9ef37825f88ae28
[ "BSD-3-Clause" ]
314
2015-01-01T01:42:52.000Z
2022-01-07T21:46:15.000Z
morepath/__init__.py
hugovk/morepath
5596f9ce43ee4e5cd73eaa2ab9ef37825f88ae28
[ "BSD-3-Clause" ]
369
2015-01-02T19:10:40.000Z
2021-07-03T04:37:27.000Z
morepath/__init__.py
hugovk/morepath
5596f9ce43ee4e5cd73eaa2ab9ef37825f88ae28
[ "BSD-3-Clause" ]
37
2015-01-11T09:22:02.000Z
2021-07-02T20:48:20.000Z
# flake8: noqa """This is the main public API of Morepath. Additional public APIs can be imported from the :mod:`morepath.error` and :mod:`morepath.pdbsupport` modules. For custom directive implementations that interact with core directives for grouping or subclassing purposes, or that need to use one of the Morepath registries, you may need to import from :mod:`morepath.directive`. The other submodules are considered private. If you find yourself needing to import from them in application or extension code, please report an issue about it on the Morepath issue tracker. """ from dectate import commit from .app import App, dispatch_method from .core import ( excview_tween_factory as EXCVIEW, poisoned_host_header_protection_tween_factory as HOST_HEADER_PROTECTION, model_predicate, name_predicate, request_method_predicate, ) from .core import request_method_predicate as LAST_VIEW_PREDICATE from .view import render_json, render_html, redirect from .request import Request, Response from .autosetup import scan, autoscan from .authentication import Identity, IdentityPolicy, NO_IDENTITY from .converter import Converter from .reify import reify from .run import run
37.4375
76
0.810518
98f2204e0eeff6cafe4a1031fc879a4bec0db151
37
py
Python
src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py
GosthMan/AuShadha
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
[ "PostgreSQL" ]
46
2015-03-04T14:19:47.000Z
2021-12-09T02:58:46.000Z
src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py
aytida23/AuShadha
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
[ "PostgreSQL" ]
2
2015-06-05T10:29:04.000Z
2015-12-06T16:54:10.000Z
src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py
aytida23/AuShadha
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
[ "PostgreSQL" ]
24
2015-03-23T01:38:11.000Z
2022-01-24T16:23:42.000Z
EMAIL_AND_FAX_FORM_CONSTANTS = { }
12.333333
32
0.756757
98f428d0ea0b7f44539193898ee9647b5c6c689f
2,242
py
Python
marketDataRetrieval.py
amertx/Monte-Carlo-Simulation
6c3a616bc67e668d80a73247ca279e10f6d46cd5
[ "MIT" ]
null
null
null
marketDataRetrieval.py
amertx/Monte-Carlo-Simulation
6c3a616bc67e668d80a73247ca279e10f6d46cd5
[ "MIT" ]
null
null
null
marketDataRetrieval.py
amertx/Monte-Carlo-Simulation
6c3a616bc67e668d80a73247ca279e10f6d46cd5
[ "MIT" ]
null
null
null
#Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation #import of libraries import numpy as np import pandas as pd from pandas_datareader import data as wb import matplotlib.pyplot as plt from scipy.stats import norm #ticker selection
29.116883
118
0.711864
98f43fcd4c7844a9b69d2baa890a95f4841f18e8
31,716
py
Python
HelloDeepSpeed/train_bert_ds.py
mrwyattii/DeepSpeedExamples
6bd444a7c62e9d7d320dd4c1e1142062f50c861d
[ "MIT" ]
null
null
null
HelloDeepSpeed/train_bert_ds.py
mrwyattii/DeepSpeedExamples
6bd444a7c62e9d7d320dd4c1e1142062f50c861d
[ "MIT" ]
null
null
null
HelloDeepSpeed/train_bert_ds.py
mrwyattii/DeepSpeedExamples
6bd444a7c62e9d7d320dd4c1e1142062f50c861d
[ "MIT" ]
null
null
null
""" Modified version of train_bert.py that adds DeepSpeed """ import os import datetime import json import pathlib import re import string from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union import random import datasets import fire import logging import loguru import numpy as np import pytz import sh import torch import torch.nn as nn import deepspeed from torch.utils.data import DataLoader, Dataset from torch.utils.tensorboard import SummaryWriter from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.models.roberta import RobertaConfig, RobertaModel from transformers.models.roberta.modeling_roberta import ( RobertaLMHead, RobertaPreTrainedModel, ) ###################################################################### ####################### Logging Functions ############################ ###################################################################### logger = loguru.logger def log_dist(message: str, ranks: List[int] = [], level: int = logging.INFO) -> None: """Log messages for specified ranks only""" my_rank = int(os.environ.get("RANK", "0")) if my_rank in ranks: if level == logging.INFO: logger.info(f'[Rank {my_rank}] {message}') if level == logging.ERROR: logger.error(f'[Rank {my_rank}] {message}') if level == logging.DEBUG: logger.debug(f'[Rank {my_rank}] {message}') ###################################################################### ############### Dataset Creation Related Functions ################### ###################################################################### TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] def collate_function(batch: List[Tuple[List[int], List[int]]], pad_token_id: int) -> Dict[str, torch.Tensor]: """Collect a list of masked token indices, and labels, and batch them, padding to max length in the batch. """ max_length = max(len(token_ids) for token_ids, _ in batch) padded_token_ids = [ token_ids + [pad_token_id for _ in range(0, max_length - len(token_ids))] for token_ids, _ in batch ] padded_labels = [ labels + [pad_token_id for _ in range(0, max_length - len(labels))] for _, labels in batch ] src_tokens = torch.LongTensor(padded_token_ids) tgt_tokens = torch.LongTensor(padded_labels) attention_mask = src_tokens.ne(pad_token_id).type_as(src_tokens) return { "src_tokens": src_tokens, "tgt_tokens": tgt_tokens, "attention_mask": attention_mask, } def masking_function( text: str, tokenizer: TokenizerType, mask_prob: float, random_replace_prob: float, unmask_replace_prob: float, max_length: int, ) -> Tuple[List[int], List[int]]: """Given a text string, randomly mask wordpieces for Bert MLM training. Args: text (str): The input text tokenizer (TokenizerType): The tokenizer for tokenization mask_prob (float): What fraction of tokens to mask random_replace_prob (float): Of the masked tokens, how many should be replaced with random tokens (improves performance) unmask_replace_prob (float): Of the masked tokens, how many should be replaced with the original token (improves performance) max_length (int): The maximum sequence length to consider. Note that for Bert style models, this is a function of the number of positional embeddings you learn Returns: Tuple[List[int], List[int]]: The masked token ids (based on the tokenizer passed), and the output labels (padded with `tokenizer.pad_token_id`) """ # Note: By default, encode does add the BOS and EOS token # Disabling that behaviour to make this more clear tokenized_ids = ([tokenizer.bos_token_id] + tokenizer.encode(text, add_special_tokens=False, truncation=True, max_length=max_length - 2) + [tokenizer.eos_token_id]) seq_len = len(tokenized_ids) tokenized_ids = np.array(tokenized_ids) subword_mask = np.full(len(tokenized_ids), False) # Masking the BOS and EOS token leads to slightly worse performance low = 1 high = len(subword_mask) - 1 mask_choices = np.arange(low, high) num_subwords_to_mask = max( int((mask_prob * (high - low)) + np.random.rand()), 1) subword_mask[np.random.choice(mask_choices, num_subwords_to_mask, replace=False)] = True # Create the labels first labels = np.full(seq_len, tokenizer.pad_token_id) labels[subword_mask] = tokenized_ids[subword_mask] tokenized_ids[subword_mask] = tokenizer.mask_token_id # Now of the masked tokens, choose how many to replace with random and how many to unmask rand_or_unmask_prob = random_replace_prob + unmask_replace_prob if rand_or_unmask_prob > 0: rand_or_unmask = subword_mask & (np.random.rand(len(tokenized_ids)) < rand_or_unmask_prob) if random_replace_prob == 0: unmask = rand_or_unmask rand_mask = None elif unmask_replace_prob == 0: unmask = None rand_mask = rand_or_unmask else: unmask_prob = unmask_replace_prob / rand_or_unmask_prob decision = np.random.rand(len(tokenized_ids)) < unmask_prob unmask = rand_or_unmask & decision rand_mask = rand_or_unmask & (~decision) if unmask is not None: tokenized_ids[unmask] = labels[unmask] if rand_mask is not None: weights = np.ones(tokenizer.vocab_size) weights[tokenizer.all_special_ids] = 0 probs = weights / weights.sum() num_rand = rand_mask.sum() tokenized_ids[rand_mask] = np.random.choice(tokenizer.vocab_size, num_rand, p=probs) return tokenized_ids.tolist(), labels.tolist() T = TypeVar("T") def create_data_iterator( mask_prob: float, random_replace_prob: float, unmask_replace_prob: float, batch_size: int, max_seq_length: int = 512, tokenizer: str = "roberta-base", ) -> InfiniteIterator: """Create the dataloader. Args: mask_prob (float): Fraction of tokens to mask random_replace_prob (float): Fraction of masked tokens to replace with random token unmask_replace_prob (float): Fraction of masked tokens to replace with the actual token batch_size (int): The batch size of the generated tensors max_seq_length (int, optional): The maximum sequence length for the MLM task. Defaults to 512. tokenizer (str, optional): The tokenizer to use. Defaults to "roberta-base". Returns: InfiniteIterator: The torch DataLoader, wrapped in an InfiniteIterator class, to be able to continuously generate samples """ wikitext_dataset = datasets.load_dataset("wikitext", "wikitext-2-v1", split="train") wikitext_dataset = wikitext_dataset.filter( lambda record: record["text"] != "").map( lambda record: {"text": record["text"].rstrip("\n")}) tokenizer = AutoTokenizer.from_pretrained(tokenizer) masking_function_partial = partial( masking_function, tokenizer=tokenizer, mask_prob=mask_prob, random_replace_prob=random_replace_prob, unmask_replace_prob=unmask_replace_prob, max_length=max_seq_length, ) dataset = WikiTextMLMDataset(wikitext_dataset, masking_function_partial) collate_fn_partial = partial(collate_function, pad_token_id=tokenizer.pad_token_id) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn_partial) return InfiniteIterator(dataloader) ###################################################################### ############### Model Creation Related Functions ##################### ###################################################################### def create_model(num_layers: int, num_heads: int, ff_dim: int, h_dim: int, dropout: float) -> RobertaMLMModel: """Create a Bert model with the specified `num_heads`, `ff_dim`, `h_dim` and `dropout` Args: num_layers (int): The number of layers num_heads (int): The number of attention heads ff_dim (int): The intermediate hidden size of the feed forward block of the transformer h_dim (int): The hidden dim of the intermediate representations of the transformer dropout (float): The value of dropout to be used. Note that we apply the same dropout to both the attention layers and the FF layers Returns: RobertaMLMModel: A Roberta model for MLM task """ roberta_config_dict = { "attention_probs_dropout_prob": dropout, "bos_token_id": 0, "eos_token_id": 2, "hidden_act": "gelu", "hidden_dropout_prob": dropout, "hidden_size": h_dim, "initializer_range": 0.02, "intermediate_size": ff_dim, "layer_norm_eps": 1e-05, "max_position_embeddings": 514, "model_type": "roberta", "num_attention_heads": num_heads, "num_hidden_layers": num_layers, "pad_token_id": 1, "type_vocab_size": 1, "vocab_size": 50265, } roberta_config = RobertaConfig.from_dict(roberta_config_dict) roberta_encoder = RobertaModel(roberta_config) roberta_model = RobertaMLMModel(roberta_config, roberta_encoder) return roberta_model ###################################################################### ########### Experiment Management Related Functions ################## ###################################################################### def get_unique_identifier(length: int = 8) -> str: """Create a unique identifier by choosing `length` random characters from list of ascii characters and numbers """ alphabet = string.ascii_lowercase + string.digits uuid = "".join(alphabet[ix] for ix in np.random.choice(len(alphabet), length)) return uuid def create_experiment_dir(checkpoint_dir: pathlib.Path, all_arguments: Dict[str, Any]) -> pathlib.Path: """Create an experiment directory and save all arguments in it. Additionally, also store the githash and gitdiff. Finally create a directory for `Tensorboard` logs. The structure would look something like checkpoint_dir `-experiment-name |- hparams.json |- githash.log |- gitdiff.log `- tb_dir/ Args: checkpoint_dir (pathlib.Path): The base checkpoint directory all_arguments (Dict[str, Any]): The arguments to save Returns: pathlib.Path: The experiment directory """ # experiment name follows the following convention # {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid} current_time = datetime.datetime.now(pytz.timezone("US/Pacific")) expname = "bert_pretrain.{0}.{1}.{2}.{3}.{4}.{5}.{6}".format( current_time.year, current_time.month, current_time.day, current_time.hour, current_time.minute, current_time.second, get_unique_identifier(), ) exp_dir = checkpoint_dir / expname if not is_rank_0(): return exp_dir exp_dir.mkdir(exist_ok=False) hparams_file = exp_dir / "hparams.json" with hparams_file.open("w") as handle: json.dump(obj=all_arguments, fp=handle, indent=2) # Save the git hash try: gitlog = sh.git.log("-1", format="%H", _tty_out=False, _fg=False) with (exp_dir / "githash.log").open("w") as handle: handle.write(gitlog.stdout.decode("utf-8")) except sh.ErrorReturnCode_128: log_dist( "Seems like the code is not running from" " within a git repo, so hash will" " not be stored. However, it" " is strongly advised to use" " version control.", ranks=[0], level=logging.INFO) # And the git diff try: gitdiff = sh.git.diff(_fg=False, _tty_out=False) with (exp_dir / "gitdiff.log").open("w") as handle: handle.write(gitdiff.stdout.decode("utf-8")) except sh.ErrorReturnCode_129: log_dist( "Seems like the code is not running from" " within a git repo, so diff will" " not be stored. However, it" " is strongly advised to use" " version control.", ranks=[0], level=logging.INFO) # Finally create the Tensorboard Dir tb_dir = exp_dir / "tb_dir" tb_dir.mkdir(exist_ok=False) return exp_dir ###################################################################### ################ Checkpoint Related Functions ######################## ###################################################################### def load_model_checkpoint( load_checkpoint_dir: pathlib.Path, model: torch.nn.Module, optimizer: torch.optim.Optimizer, ) -> Tuple[int, torch.nn.Module, torch.optim.Optimizer]: """Loads the optimizer state dict and model state dict from the load_checkpoint_dir into the passed model and optimizer. Searches for the most recent checkpoint to load from Args: load_checkpoint_dir (pathlib.Path): The base checkpoint directory to load from model (torch.nn.Module): The model to load the checkpoint weights into optimizer (torch.optim.Optimizer): The optimizer to load the checkpoint weigths into Returns: Tuple[int, torch.nn.Module, torch.optim.Optimizer]: The checkpoint step, model with state_dict loaded and optimizer with state_dict loaded """ log_dist( f"Loading model and optimizer checkpoint from {load_checkpoint_dir}", ranks=[0], level=logging.INFO) checkpoint_files = list( filter( lambda path: re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name) is not None, load_checkpoint_dir.glob("*.pt"), )) assert len(checkpoint_files) > 0, "No checkpoints found in directory" checkpoint_files = sorted( checkpoint_files, key=lambda path: int( re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name).group("iter_no") ), ) latest_checkpoint_path = checkpoint_files[-1] checkpoint_step = int( re.search(r"iter_(?P<iter_no>\d+)\.pt", latest_checkpoint_path.name).group("iter_no")) state_dict = torch.load(latest_checkpoint_path) model.load_state_dict(state_dict["model"], strict=True) optimizer.load_state_dict(state_dict["optimizer"]) log_dist( f"Loading model and optimizer checkpoints done. Loaded from {latest_checkpoint_path}", ranks=[0], level=logging.INFO) return checkpoint_step, model, optimizer ###################################################################### ######################## Driver Functions ############################ ###################################################################### def train( checkpoint_dir: str = None, load_checkpoint_dir: str = None, # Dataset Parameters mask_prob: float = 0.15, random_replace_prob: float = 0.1, unmask_replace_prob: float = 0.1, max_seq_length: int = 512, tokenizer: str = "roberta-base", # Model Parameters num_layers: int = 6, num_heads: int = 8, ff_dim: int = 512, h_dim: int = 256, dropout: float = 0.1, # Training Parameters batch_size: int = 8, num_iterations: int = 10000, checkpoint_every: int = 1000, log_every: int = 10, local_rank: int = -1, ) -> pathlib.Path: """Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf) (transformer encoder only) model for MLM Task Args: checkpoint_dir (str): The base experiment directory to save experiments to mask_prob (float, optional): The fraction of tokens to mask. Defaults to 0.15. random_replace_prob (float, optional): The fraction of masked tokens to replace with random token. Defaults to 0.1. unmask_replace_prob (float, optional): The fraction of masked tokens to leave unchanged. Defaults to 0.1. max_seq_length (int, optional): The maximum sequence length of the examples. Defaults to 512. tokenizer (str, optional): The tokenizer to use. Defaults to "roberta-base". num_layers (int, optional): The number of layers in the Bert model. Defaults to 6. num_heads (int, optional): Number of attention heads to use. Defaults to 8. ff_dim (int, optional): Size of the intermediate dimension in the FF layer. Defaults to 512. h_dim (int, optional): Size of intermediate representations. Defaults to 256. dropout (float, optional): Amout of Dropout to use. Defaults to 0.1. batch_size (int, optional): The minibatch size. Defaults to 8. num_iterations (int, optional): Total number of iterations to run the model for. Defaults to 10000. checkpoint_every (int, optional): Save checkpoint after these many steps. ..note :: You want this to be frequent enough that you can resume training in case it crashes, but not so much that you fill up your entire storage ! Defaults to 1000. log_every (int, optional): Print logs after these many steps. Defaults to 10. local_rank (int, optional): Which GPU to run on (-1 for CPU). Defaults to -1. Returns: pathlib.Path: The final experiment directory """ device = (torch.device("cuda", local_rank) if (local_rank > -1) and torch.cuda.is_available() else torch.device("cpu")) ################################ ###### Create Exp. Dir ######### ################################ if checkpoint_dir is None and load_checkpoint_dir is None: log_dist( "Need to specify one of checkpoint_dir" " or load_checkpoint_dir", ranks=[0], level=logging.ERROR) return if checkpoint_dir is not None and load_checkpoint_dir is not None: log_dist( "Cannot specify both checkpoint_dir" " and load_checkpoint_dir", ranks=[0], level=logging.ERROR) return if checkpoint_dir: log_dist("Creating Experiment Directory", ranks=[0], level=logging.INFO) checkpoint_dir = pathlib.Path(checkpoint_dir) checkpoint_dir.mkdir(exist_ok=True) all_arguments = { # Dataset Params "mask_prob": mask_prob, "random_replace_prob": random_replace_prob, "unmask_replace_prob": unmask_replace_prob, "max_seq_length": max_seq_length, "tokenizer": tokenizer, # Model Params "num_layers": num_layers, "num_heads": num_heads, "ff_dim": ff_dim, "h_dim": h_dim, "dropout": dropout, # Training Params "batch_size": batch_size, "num_iterations": num_iterations, "checkpoint_every": checkpoint_every, } exp_dir = create_experiment_dir(checkpoint_dir, all_arguments) log_dist(f"Experiment Directory created at {exp_dir}", ranks=[0], level=logging.INFO) else: log_dist("Loading from Experiment Directory", ranks=[0], level=logging.INFO) load_checkpoint_dir = pathlib.Path(load_checkpoint_dir) assert load_checkpoint_dir.exists() with (load_checkpoint_dir / "hparams.json").open("r") as handle: hparams = json.load(handle) # Set the hparams # Dataset Params mask_prob = hparams.get("mask_prob", mask_prob) tokenizer = hparams.get("tokenizer", tokenizer) random_replace_prob = hparams.get("random_replace_prob", random_replace_prob) unmask_replace_prob = hparams.get("unmask_replace_prob", unmask_replace_prob) max_seq_length = hparams.get("max_seq_length", max_seq_length) # Model Params ff_dim = hparams.get("ff_dim", ff_dim) h_dim = hparams.get("h_dim", h_dim) dropout = hparams.get("dropout", dropout) num_layers = hparams.get("num_layers", num_layers) num_heads = hparams.get("num_heads", num_heads) # Training Params batch_size = hparams.get("batch_size", batch_size) _num_iterations = hparams.get("num_iterations", num_iterations) num_iterations = max(num_iterations, _num_iterations) checkpoint_every = hparams.get("checkpoint_every", checkpoint_every) exp_dir = load_checkpoint_dir # Tensorboard writer if is_rank_0(): tb_dir = exp_dir / "tb_dir" assert tb_dir.exists() summary_writer = SummaryWriter(log_dir=tb_dir) ################################ ###### Create Datasets ######### ################################ log_dist("Creating Datasets", ranks=[0], level=logging.INFO) data_iterator = create_data_iterator( mask_prob=mask_prob, random_replace_prob=random_replace_prob, unmask_replace_prob=unmask_replace_prob, tokenizer=tokenizer, max_seq_length=max_seq_length, batch_size=batch_size, ) log_dist("Dataset Creation Done", ranks=[0], level=logging.INFO) ################################ ###### Create Model ############ ################################ log_dist("Creating Model", ranks=[0], level=logging.INFO) model = create_model( num_layers=num_layers, num_heads=num_heads, ff_dim=ff_dim, h_dim=h_dim, dropout=dropout, ) log_dist("Model Creation Done", ranks=[0], level=logging.INFO) ################################ ###### DeepSpeed engine ######## ################################ log_dist("Creating DeepSpeed engine", ranks=[0], level=logging.INFO) ds_config = { "train_micro_batch_size_per_gpu": batch_size, "optimizer": { "type": "Adam", "params": { "lr": 1e-4 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": 1, "offload_optimizer": { "device": "cpu" } } } model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=ds_config) log_dist("DeepSpeed engine created", ranks=[0], level=logging.INFO) ################################ #### Load Model checkpoint ##### ################################ start_step = 1 if load_checkpoint_dir is not None: _, client_state = model.load_checkpoint(load_dir=load_checkpoint_dir) checkpoint_step = client_state['checkpoint_step'] start_step = checkpoint_step + 1 ################################ ####### The Training Loop ###### ################################ log_dist( f"Total number of model parameters: {sum([p.numel() for p in model.parameters()]):,d}", ranks=[0], level=logging.INFO) model.train() losses = [] for step, batch in enumerate(data_iterator, start=start_step): if step >= num_iterations: break # Move the tensors to device for key, value in batch.items(): batch[key] = value.to(device) # Forward pass loss = model(**batch) # Backward pass model.backward(loss) # Optimizer Step model.step() losses.append(loss.item()) if step % log_every == 0: log_dist("Loss: {0:.4f}".format(np.mean(losses)), ranks=[0], level=logging.INFO) if is_rank_0(): summary_writer.add_scalar(f"Train/loss", np.mean(losses), step) if step % checkpoint_every == 0: model.save_checkpoint(save_dir=exp_dir, client_state={'checkpoint_step': step}) log_dist("Saved model to {0}".format(exp_dir), ranks=[0], level=logging.INFO) # Save the last checkpoint if not saved yet if step % checkpoint_every != 0: model.save_checkpoint(save_dir=exp_dir, client_state={'checkpoint_step': step}) log_dist("Saved model to {0}".format(exp_dir), ranks=[0], level=logging.INFO) return exp_dir if __name__ == "__main__": torch.manual_seed(42) np.random.seed(0) random.seed(0) fire.Fire(train)
36.836237
95
0.576334
98f5a9225473ea31a925278ee4add1b0f458f788
825
py
Python
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py
vamsitallapudi/Coderefer-Python-Projects
a7acc682251661e296c64533f4a85d47e6eedda2
[ "Apache-2.0" ]
1
2021-01-03T06:42:58.000Z
2021-01-03T06:42:58.000Z
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py
vamsitallapudi/Coderefer-Python-Projects
a7acc682251661e296c64533f4a85d47e6eedda2
[ "Apache-2.0" ]
null
null
null
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py
vamsitallapudi/Coderefer-Python-Projects
a7acc682251661e296c64533f4a85d47e6eedda2
[ "Apache-2.0" ]
null
null
null
# Given a singly linked list, determine if it is a palindrome. # Definition for singly-linked list.
22.916667
62
0.530909
98f70d5ddc8fc406905d54058613214bd95d62ce
290
py
Python
__init__.py
CloudCIX/rolly
8fafd655cb82881ae2cf75a475904cddc39e2f9a
[ "Apache-2.0" ]
6
2019-12-09T16:13:21.000Z
2020-07-16T11:42:33.000Z
__init__.py
CloudCIX/rolly
8fafd655cb82881ae2cf75a475904cddc39e2f9a
[ "Apache-2.0" ]
null
null
null
__init__.py
CloudCIX/rolly
8fafd655cb82881ae2cf75a475904cddc39e2f9a
[ "Apache-2.0" ]
1
2021-01-02T09:44:39.000Z
2021-01-02T09:44:39.000Z
""" Rocky is a CLI based provisioning and management tool for CloudCIX Cloud software. Rocky is designed to operate in an out of band (OOB) network, serarated from other CloudCIX networks. Rocky's purpose is to facilitate monitoring, testing, debug and recovery """ __version__ = '0.3.5'
32.222222
101
0.772414
98f808b42f55c190413c10c0ee75bee408ae97c6
1,671
py
Python
calculator.py
harshitbansal373/Python-Games
4e879b0a97b4b420ed6d440cd2d6a0332a2109b7
[ "MIT" ]
null
null
null
calculator.py
harshitbansal373/Python-Games
4e879b0a97b4b420ed6d440cd2d6a0332a2109b7
[ "MIT" ]
null
null
null
calculator.py
harshitbansal373/Python-Games
4e879b0a97b4b420ed6d440cd2d6a0332a2109b7
[ "MIT" ]
null
null
null
from tkinter import * import time root=Tk() root.title('Calculator') root.config(bg='wheat') s='' text=StringVar() f=Frame(root,bg='#dcdde1') e=Entry(f,textvariable=text,bg='#f5f6fa',fg='#353b48',font='roboto 34 bold',justify='right',relief=RAISED) e.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH) l=['#aabbcc','#bbccdd','#ccddee','#ddeeff'] for i in ['789/','456*','123+','.0-=']: f=Frame(root,bg=l.pop()) for j in i: b=Button(f,text=j,bg='#00a8ff',fg='#353b48',font='roboto 34 italic',command=(lambda x=j:display(x)) if j!='=' else solve) b.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH) f1=Frame(root,bg='#dcdde1') clear=Button(f1,text='C',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear) clear.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) clear1=Button(f1,text='CE',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear1) clear1.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) f1.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH) f2=Frame(root,bg='#dcdde1') label=Label(f2,bg='#00a8ff',fg='#353b48',font='roboto 34') label.pack(padx=10,pady=10,expand=YES,fill=BOTH) f2.pack(padx=10,pady=10,expand=YES,fill=BOTH) con() root.mainloop()
25.318182
125
0.668462
98f8ea06315deb6bd9599f36bf3f99bf2965db61
8,280
py
Python
src/Main.py
OlavH96/Master
f98476063e579b7b2a80b81a2c0ca4005f5fce80
[ "MIT" ]
null
null
null
src/Main.py
OlavH96/Master
f98476063e579b7b2a80b81a2c0ca4005f5fce80
[ "MIT" ]
null
null
null
src/Main.py
OlavH96/Master
f98476063e579b7b2a80b81a2c0ca4005f5fce80
[ "MIT" ]
null
null
null
import glob import os import keras import tensorflow as tf from keras.models import load_model from keras.callbacks import ModelCheckpoint import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import src.util.Files as Files from src.util.ImageLoader import load_images_generator, resize_image, load_images_generator_with_filename import numpy as np import logging as log import random from src.util.Arguments import anomaly_arguments, get_model_choice import src.util.Arguments as Arguments from scipy.stats import norm from PIL import Image from src.train.Models import autoencoder, conv_autoencoder, vae_autoencoder, vae_loss, get_dummy_loss, from_argument_choice import src.train.Models as Models import src.util.Filenames as Filenames import math os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' def train_on_images(epochs, max_x, max_y, path, model_type, model_name, arg_steps, validation_path, color_mode="RGB"): sess = tf.Session() keras.backend.set_session(sess) # max_x = max([i.shape[0] for i in images]) # max_y = max([i.shape[1] for i in images]) # max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png') # print(max_x, max_y) # 304, 298 epochs = epochs shape = (max_y, max_x, 3) model = Models.from_argument_choice(model_type, shape) steps = len(glob.glob(path)) if arg_steps != 0: steps = arg_steps model.summary() # define the checkpoint checkpoint = ModelCheckpoint(model_name, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] log.info('Fitting model...') if validation_path: history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode), validation_data=centered_image_generator(validation_path, max_x, max_y, color_mode=color_mode), validation_steps=100, epochs=epochs, steps_per_epoch=steps, callbacks=callbacks_list) else: history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode), epochs=epochs, steps_per_epoch=steps, callbacks=callbacks_list) model.save(model_name) loss = history.history['loss'] try: plt.plot(loss) if validation_path: val_loss = history.history['val_loss'] plt.plot(val_loss, color='g') plt.title(model_name) plt.ylabel("Loss") plt.xlabel("Epoch") plt.savefig(f'training_loss_{model_name}.png') except: log.info("Failed to create loss graph") log.info('Finished fitting model') return model def load_model_and_predict(model_path, num_predictions, path, max_x, max_y, model_type, model=None, color_mode="RGB", template_only=False): # vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5) im_shape = (max_x, max_y, 3) if model_type == get_model_choice(Arguments.VAE) and not model: model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)}) mu = model.get_layer('mu').output log_var = model.get_layer('log').output model.summary() print(mu, log_var) model.compile(optimizer='rmsprop', loss=vae_loss(im_shape, log_var, mu)) if model_type == get_model_choice(Arguments.CONVVAE) and not model: model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)}) encoder = model.get_layer('encoder') decoder = model.get_layer('decoder') mu = encoder.get_layer('mu').output log_var = encoder.get_layer('log').output model.compile(optimizer='adam', loss=vae_loss(im_shape, log_var, mu)) if model_type != get_model_choice(Arguments.VAE) and not model: model = load_model(model_path) model.summary() print("Loaded Model", model, model.input_shape) max_x = model.input_shape[1] max_y = model.input_shape[2] images = list(image_generator_with_filename(path, max_x, max_y, color_mode=color_mode)) random.shuffle(images) index = 0 print(f'Loaded {len(images)} images') model_name = model_path.split('.')[0] save_dir = Files.makedir_else_cleardir(f'./predictions/{model_name}_{Filenames.remove_path(Filenames.strip_path_modifier(path))}') for i, filename in images: # centered_image_generator(path, max_x, max_y): hashed = Filenames.md5hash(filename) anomaly = "anomaly" in filename extra = "_anomaly_" if anomaly else "_normal_" pred = model.predict(i) print(pred.shape) for ii in i: if color_mode == 'HSV': ii = Image.fromarray((ii * 255).astype(np.uint8), 'HSV') ii = ii.convert("RGB") ii = np.array(ii) plt.imsave(str(save_dir / f'orig{extra}{hashed}_{index}.png'), ii) #plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1) print("input shape",i.shape) evaluate = model.evaluate(i, i) if type(evaluate) is list: evaluate = evaluate[0] print(index, evaluate) for p in pred: #print("prediction",p) p = p / np.max(p) if color_mode == 'HSV': p = Image.fromarray((p * 255).astype(np.uint8), 'HSV') p = p.convert('RGB') p = np.array(p) if template_only: # Hacky solution, oh well template_path = './src/sign_detection/image_generation/images/signs/png/362.50/362_5.png' im = Image.open(template_path) im = im.convert('RGB') im = im.resize(size=(64,64)) im = np.array(im) score = image_mse(i[0], im) plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{score}.png'), im) else: plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{str(evaluate)}.png'), p) index += 1 if index == num_predictions: break if __name__ == '__main__': args = anomaly_arguments() log.info('Arguments', args) print("Arguments", args) model = None if args.do_training: model = train_on_images( epochs=args.epochs, path=args.path, max_x=args.max_x, max_y=args.max_y, model_type=args.model_type, model_name=args.model, arg_steps=args.steps, color_mode=args.color, validation_path=args.validation_path ) if args.do_predict: load_model_and_predict( model_path=args.model, num_predictions=args.num_predictions, max_x=args.max_x, max_y=args.max_y, path=args.pred_path if args.pred_path else args.path, model_type=args.model_type, model=model, color_mode=args.color, template_only=args.template )
35.844156
139
0.620411
98f921edc0f4676c7070bf0e769ce5e1dab739bb
1,353
py
Python
daproli/manipulation.py
ermshaua/daproli
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
[ "BSD-3-Clause" ]
null
null
null
daproli/manipulation.py
ermshaua/daproli
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
[ "BSD-3-Clause" ]
null
null
null
daproli/manipulation.py
ermshaua/daproli
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
[ "BSD-3-Clause" ]
null
null
null
from .utils import _get_return_type def windowed(data, size, step=1, ret_type=None): ''' dp.windowed applies a window function to a collection of data items. Parameters ----------- :param data: an iterable collection of data :param size: the window size :param step: the window step :param ret_type: if provided the used return type, otherwise ret_type(data) :return: the windowed data list Examples ----------- >>> import daproli as dp >>> numbers = range(10) >>> dp.windowed(numbers, 2, step=2) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] ''' if ret_type is None: ret_type = _get_return_type(data) return [ret_type(data[i:i+size]) for i in range(0, len(data)-(size-1), step)] def flatten(data, ret_type=None): ''' dp.flatten applies a flatten function to a collection of data items. Parameters ----------- :param data: an iterable collection of data :param ret_type: if provided the used return type, otherwise ret_type(data) :return: the flattened data collection Examples ----------- >>> import daproli as dp >>> dp.flatten([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ''' if ret_type is None: ret_type = _get_return_type(data) return ret_type([item for sub in data for item in sub])
30.066667
81
0.610495
98fc678951f86f4c4317fc775c6ba763f66da302
8,717
py
Python
ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
gcxtx/ambari
133d9c4661b21182482c25f96c3f0bf0a9740a9f
[ "Apache-2.0" ]
1
2021-05-06T06:24:04.000Z
2021-05-06T06:24:04.000Z
ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
gcxtx/ambari
133d9c4661b21182482c25f96c3f0bf0a9740a9f
[ "Apache-2.0" ]
null
null
null
ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
gcxtx/ambari
133d9c4661b21182482c25f96c3f0bf0a9740a9f
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import * import json import sys from only_for_platform import not_for_platform, PLATFORM_WINDOWS
46.367021
141
0.494207
98fd965b02157810b02af85a0eee51f0f9a9f9e1
5,040
py
Python
Udacity P3 Additional Files/model.py
sayeayed/Udacity-Project4
da39d0013d35d90818f9aa24ef097e185e705489
[ "MIT" ]
null
null
null
Udacity P3 Additional Files/model.py
sayeayed/Udacity-Project4
da39d0013d35d90818f9aa24ef097e185e705489
[ "MIT" ]
null
null
null
Udacity P3 Additional Files/model.py
sayeayed/Udacity-Project4
da39d0013d35d90818f9aa24ef097e185e705489
[ "MIT" ]
null
null
null
import os import csv import numpy as np from sklearn.utils import shuffle ## Read in frame data samples = [] with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file reader = csv.reader(csvfile) #as a readable csv for line in reader: samples.append(line) #add each line of the log file to samples samples = samples[1:] # to remove table header samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased from sklearn.model_selection import train_test_split train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB ## Define generator to handle small portions of images at a time so that training is not as memory-heavy # compile and train the model using the generator function train_generator = generator(train_samples, batch_size=32) validation_generator = generator(validation_samples, batch_size=32) ch, row, col = 3, 160, 320 # Full image format #import Keras model layers from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda from keras.layers.convolutional import Conv2D, Cropping2D from keras.layers.pooling import MaxPooling2D # BUILD MODEL model = Sequential() # Preprocess incoming data, centered around zero with small standard deviation model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch))) # Crop incoming data (training, validation, and autonomous so that everything is consistent) model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training # Begin CNN (similar to NVIDIA architecture) # Convolution layer 1-3, kernel size 5 with stride of 2 model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu')) model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu')) model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu')) # Convolution layers 4-5, kernel size 3 wth stride of 1 model.add(Conv2D(64,(3,3),activation='relu')) model.add(Conv2D(64,(3,3),activation='relu')) # Flatten convolution output to yield single numerical result model.add(Flatten()) # Fully connected layers to complete computations, gradually decreasing in parameters until final value model.add(Dense(100)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) ## Training hyper parameters to play with ## Stop training checkpoints... # save_path = 'model{epoch:02d}-{val_loss:.2f}.h5' # checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True) # stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5) ## OR batch_size = 32 epochs = 5 #*** ## Compile and train the model model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators #save the trained model model.save('model.h5')
48.461538
222
0.709127
98fe28b6ed135c40a04274c069f20df97e941299
2,357
py
Python
utils/wavelengthfit_prim.py
GeminiDRSoftware/GHOSTDR
79cd1ac81a7458e06668d6dac51fc6f9c9c61b31
[ "BSD-3-Clause" ]
1
2019-09-05T15:29:25.000Z
2019-09-05T15:29:25.000Z
utils/wavelengthfit_prim.py
GeminiDRSoftware/GHOSTDR
79cd1ac81a7458e06668d6dac51fc6f9c9c61b31
[ "BSD-3-Clause" ]
null
null
null
utils/wavelengthfit_prim.py
GeminiDRSoftware/GHOSTDR
79cd1ac81a7458e06668d6dac51fc6f9c9c61b31
[ "BSD-3-Clause" ]
2
2017-10-10T23:23:53.000Z
2022-02-15T23:28:22.000Z
#!/usr/bin/env python3 """ A script containing the basic principles of the extraction primitive inner workings""" from __future__ import division, print_function from ghostdr import polyfit import numpy as pn # Firstly, let's find all the needed files fitsdir='/Users/mireland/data/ghost/cal_frames/' #Define the files in use (NB xmod.txt and wavemod.txt should be correct) arc_file = fitsdir+"arc_extracted.fits" # load it in now: extracted_flux,extracted_vars=pyfits.getdata(arc_file) # Where is the default location for the model? By default it is a parameter # in the ghost class. If this needs to be overwritten, go ahead. # This is the xmod file. Wherever it is saved from the flat reduction. xmodel_file=fitsdir+'GHOST_1_1_blue_std_xmodPolyfit.fits' # All the other models... which are currently in the "test" directory. wmodel_file=test_files_dir+'wparams_blue_std.fits' spatmod_file=test_files_dir+'spatmod.fits' specmod_file=test_files_dir+'specmod.fits' rotmod_file=test_files_dir+'rotmod2.fits' # Find the arc line list file arclinefile='/home/jbento/code/ghostdr/ghostdr/ADCONFIG_GHOST/lookups/GHOST/Polyfit/mnras0378-0221-SD1.txt' arcwaves, arcfluxes= np.loadtxt(arclinefile,usecols=[1,2]).T #instantiate the ghost arm arm = polyfit.GhostArm('blue',mode='std') arm.spectral_format_with_matrix(xpars,wpars,spatpars,specpars,rotpars) #Get the initial default model from the lookup location xpars=pyfits.getdata(xmodel_file) wpars=pyfits.getdata(wmodel_file) spatpars=pyfits.getdata(spatmod_file) specpars=pyfits.getdata(specmod_file) rotpars=pyfits.getdata(rotmod_file) slitview = polyfit.SlitView(image_array, flat_image_array, mode='std') # The extractor is given the polyfit "arm" object, and a slitview object which has # been instantiated with the slit viewer data. extractor = polyfit.Extractor(arm, slitview) #Now find the other lines, after first re-loading into the extractor. # the inspect parameter is a verbose option for visualising the line # finding results lines_out=extractor.find_lines(extracted_flux, arcwaves, inspect=False) #Now finally do the wavelength fit! fitted_params, wave_and_resid = arm.read_lines_and_fit(wpars,lines_out,ydeg=3,xdeg=3) # Optionally show residuals? #Now write the output to a file, in whatever format suits the recipe system best. pyfits.writeto('outputs.fits',fitted_params)
35.712121
107
0.801018
98ff5d19bcbfb3d13ae61a0ad4df7649e741ec52
1,506
py
Python
time_management/test/kronos_test.py
AyushRawal/time-management
a8876f7b681da837c41f17cf896eaa895017f17f
[ "MIT" ]
1
2021-11-15T19:35:51.000Z
2021-11-15T19:35:51.000Z
time_management/test/kronos_test.py
AyushRawal/time-management
a8876f7b681da837c41f17cf896eaa895017f17f
[ "MIT" ]
null
null
null
time_management/test/kronos_test.py
AyushRawal/time-management
a8876f7b681da837c41f17cf896eaa895017f17f
[ "MIT" ]
null
null
null
import unittest import datetime import kronos string_format_time = "%Y-%m-%d %H:%M:%S" date_time_str = "2020-07-19 18:14:21"
41.833333
83
0.717131
98ffa0f6e3c8edf444c1fa0391cb1792a90df5ec
1,368
py
Python
mfc/mfc.py
FuelCellUAV/FC_datalogger
1b4b4fecb6a842f3ba685115db01a50cca7596c7
[ "CC0-1.0" ]
null
null
null
mfc/mfc.py
FuelCellUAV/FC_datalogger
1b4b4fecb6a842f3ba685115db01a50cca7596c7
[ "CC0-1.0" ]
null
null
null
mfc/mfc.py
FuelCellUAV/FC_datalogger
1b4b4fecb6a842f3ba685115db01a50cca7596c7
[ "CC0-1.0" ]
null
null
null
##!/usr/bin/env python3 # Mass Flow Controller Arduino driver # Copyright (C) 2015 Simon Howroyd, Jason James # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ############################################################################# # Import libraries from time import sleep #from quick2wire.i2c import I2CMaster, reading # Define class # External getter def getMoles(self, fun, ch): rate = self.get(fun,ch)*(7.0/6280.0) # TODO should be *125.718/134.82 (density H2 at 1.5bar) return rate
32.571429
101
0.638889
c70208d0f7ec90b8fef96ebe7d673c28540df5bc
2,558
py
Python
odm/dialects/postgresql/green.py
quantmind/pulsar-odm
5955c20beca0a89270c2b390335838deb7d5915e
[ "BSD-3-Clause" ]
16
2015-02-17T22:23:48.000Z
2020-08-08T09:35:53.000Z
odm/dialects/postgresql/green.py
quantmind/pulsar-odm
5955c20beca0a89270c2b390335838deb7d5915e
[ "BSD-3-Clause" ]
11
2015-02-25T11:37:09.000Z
2016-03-04T12:08:11.000Z
odm/dialects/postgresql/green.py
quantmind/pulsar-odm
5955c20beca0a89270c2b390335838deb7d5915e
[ "BSD-3-Clause" ]
3
2017-02-27T10:24:31.000Z
2020-10-08T05:43:15.000Z
from asyncio import Future from greenlet import getcurrent import psycopg2 from psycopg2 import * # noqa from psycopg2 import extensions, OperationalError __version__ = psycopg2.__version__ def psycopg2_wait_callback(conn): """A wait callback to allow greenlet to work with Psycopg. The caller must be from a greenlet other than the main one. :param conn: psycopg2 connection or file number This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. """ while True: state = conn.poll() if state == extensions.POLL_OK: # Done with waiting break elif state == extensions.POLL_READ: _wait_fd(conn) elif state == extensions.POLL_WRITE: _wait_fd(conn, read=False) else: # pragma nocover raise OperationalError("Bad result from poll: %r" % state) # INTERNALS def _wait_fd(conn, read=True): '''Wait for an event on file descriptor ``fd``. :param conn: file descriptor :param read: wait for a read event if ``True``, otherwise a wait for write event. This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. ''' current = getcurrent() parent = current.parent assert parent, '"_wait_fd" must be called by greenlet with a parent' try: fileno = conn.fileno() except AttributeError: fileno = conn future = Future() # When the event on fd occurs switch back to the current greenlet if read: future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read) else: future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read) # switch back to parent greenlet parent.switch(future) # Back on the child greenlet. Raise error if there is one future.result() try: extensions.POLL_OK except AttributeError: # pragma nocover from pulsar import ImproperlyConfigured raise ImproperlyConfigured( 'Psycopg2 does not have support for asynchronous connections. ' 'You need at least version 2.2.0 of Psycopg2.') extensions.set_wait_callback(psycopg2_wait_callback)
29.744186
76
0.67631
c70375d862917fab136e0bc4321aa240c2c6c44e
27,984
py
Python
test/test_replica_set_connection.py
h4ck3rm1k3/mongo-python-driver
dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c
[ "Apache-2.0" ]
1
2019-04-27T20:15:11.000Z
2019-04-27T20:15:11.000Z
test/test_replica_set_connection.py
h4ck3rm1k3/mongo-python-driver
dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c
[ "Apache-2.0" ]
null
null
null
test/test_replica_set_connection.py
h4ck3rm1k3/mongo-python-driver
dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c
[ "Apache-2.0" ]
null
null
null
# Copyright 2011-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the replica_set_connection module.""" import copy import datetime import os import signal import socket import sys import time import thread import traceback import unittest sys.path[0:0] = [""] from nose.plugins.skip import SkipTest from bson.son import SON from bson.tz_util import utc from pymongo.connection import Connection from pymongo.read_preferences import ReadPreference from pymongo.replica_set_connection import ReplicaSetConnection from pymongo.replica_set_connection import _partition_node from pymongo.database import Database from pymongo.errors import (AutoReconnect, ConfigurationError, ConnectionFailure, InvalidName, OperationFailure) from test import version from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host host = os.environ.get("DB_IP", 'localhost') port = int(os.environ.get("DB_PORT", 27017)) pair = '%s:%d' % (host, port) if __name__ == "__main__": unittest.main()
36.966975
85
0.611885
c703e56a113105edf215384785217acba5d2eb75
2,177
py
Python
jqi/cmd.py
jan-g/jqi
f304f9fda33ac9b9eae98848d2a64acbe0893131
[ "CC-BY-3.0", "Apache-2.0" ]
3
2020-04-15T13:40:59.000Z
2021-06-30T10:09:33.000Z
jqi/cmd.py
jan-g/jqi
f304f9fda33ac9b9eae98848d2a64acbe0893131
[ "CC-BY-3.0", "Apache-2.0" ]
null
null
null
jqi/cmd.py
jan-g/jqi
f304f9fda33ac9b9eae98848d2a64acbe0893131
[ "CC-BY-3.0", "Apache-2.0" ]
null
null
null
import argparse_helper as argparse import config_dir import sys from .editor import Editor if __name__ == '__main__': main("-f", "foo", "/tmp/x")
31.550725
109
0.601746
c704254fb8b8187007babe4836f7f7b5682b3b65
888
py
Python
setup.py
ASKBOT/python-import-utils
9cc317cc2a42dd46d41d53e8209203ccfe528c11
[ "BSD-2-Clause" ]
1
2015-07-19T10:36:42.000Z
2015-07-19T10:36:42.000Z
setup.py
ASKBOT/python-import-utils
9cc317cc2a42dd46d41d53e8209203ccfe528c11
[ "BSD-2-Clause" ]
null
null
null
setup.py
ASKBOT/python-import-utils
9cc317cc2a42dd46d41d53e8209203ccfe528c11
[ "BSD-2-Clause" ]
null
null
null
import ez_setup ez_setup.use_setuptools() from setuptools import setup, find_packages import import_utils setup( name = "import-utils", version = import_utils.__version__, description = 'A module that supports simple programmatic module imports', packages = find_packages(), author = 'Evgeny.Fadeev', author_email = 'evgeny.fadeev@gmail.com', license = 'BSD', keywords = 'import, module', url = 'http://askbot.org', include_package_data = True, classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], long_description = import_utils.__doc__ )
30.62069
78
0.647523
c704a4dc1d06546eaf240da05c092e6fa0ab7b9d
1,704
py
Python
visual_dynamics/policies/random_offset_camera_target_policy.py
alexlee-gk/visual_dynamics
90227bb0d0aebb1989117b5c25ca311655ca7cc7
[ "MIT" ]
30
2017-04-05T12:55:09.000Z
2022-03-14T14:31:31.000Z
visual_dynamics/policies/random_offset_camera_target_policy.py
alexlee-gk/visual_dynamics
90227bb0d0aebb1989117b5c25ca311655ca7cc7
[ "MIT" ]
1
2017-06-19T02:39:03.000Z
2017-06-19T02:39:03.000Z
visual_dynamics/policies/random_offset_camera_target_policy.py
alexlee-gk/visual_dynamics
90227bb0d0aebb1989117b5c25ca311655ca7cc7
[ "MIT" ]
13
2017-04-05T12:55:09.000Z
2021-03-16T01:59:12.000Z
import numpy as np from visual_dynamics.policies import CameraTargetPolicy
47.333333
112
0.629695
c70662701931e0df30976bfadaca0ac6c230e738
1,401
py
Python
Day3/Day3.py
ErAgOn-AmAnSiRoHi/Advent-of-Code-2021
0f0d59483d93f6fce4aa06fb36101aea08b02fc3
[ "MIT" ]
null
null
null
Day3/Day3.py
ErAgOn-AmAnSiRoHi/Advent-of-Code-2021
0f0d59483d93f6fce4aa06fb36101aea08b02fc3
[ "MIT" ]
null
null
null
Day3/Day3.py
ErAgOn-AmAnSiRoHi/Advent-of-Code-2021
0f0d59483d93f6fce4aa06fb36101aea08b02fc3
[ "MIT" ]
null
null
null
with open("inputday3.txt") as f: data = [x for x in f.read().split()] gamma = "" epsilon = "" for b in range(0, len(data[0])): one = 0 zero = 0 for c in range(0, len(data)): if data[c][b] == '0': zero += 1 else: one += 1 if zero > one: gamma += '0' epsilon += '1' else: gamma += '1' epsilon += '0' g = int(gamma, 2) e = int(epsilon, 2) print("PART 1", g * e) gamma = "" epsilon = "" data2 = data.copy() index = 0 while len(data) > 1: one = 0 zero = 0 ones = [] zeroes = [] for c in range(0, len(data)): if data[c][index] == "0": zero += 1 zeroes.append(data[c]) else: one += 1 ones.append(data[c]) if zero > one: data = zeroes else: data = ones index += 1 oxygen = int(data[0], 2) data = data2 index = 0 while len(data) > 1: one = 0 zero = 0 ones = [] zeroes = [] for c in range(0, len(data)): if data[c][index] == '0': zero += 1 zeroes.append(data[c]) else: one += 1 ones.append(data[c]) if one < zero: data = ones else: data = zeroes index += 1 co2 = int(data[0], 2) print("PART 2", oxygen * co2)
18.932432
41
0.417559
c706f98a7ed12b68d12a292394d4a9f058dbea40
12,449
py
Python
keras2pytorch_dataset.py
MPCAICDM/MPCA
c996435a0578ea4160f934bc01041c2ef23468f3
[ "MIT" ]
null
null
null
keras2pytorch_dataset.py
MPCAICDM/MPCA
c996435a0578ea4160f934bc01041c2ef23468f3
[ "MIT" ]
null
null
null
keras2pytorch_dataset.py
MPCAICDM/MPCA
c996435a0578ea4160f934bc01041c2ef23468f3
[ "MIT" ]
null
null
null
from __future__ import print_function from PIL import Image import os import os.path import numpy as np import sys from misc import AverageMeter from eval_accuracy import simple_accuracy if sys.version_info[0] == 2: import cPickle as pickle else: import pickle import torch.utils.data as data import torch from multiprocessing import Value def train_reorganized(trainloader, model, criterion, optimizer, epochs): # train the model model.train() top1 = AverageMeter() losses = AverageMeter() for epoch in range(epochs): for batch_idx, (inputs) in enumerate(trainloader): targets = torch.LongTensor(np.tile(np.arange(inputs.size(1)), inputs.size(0))) inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1)) inputs, targets = torch.autograd.Variable(inputs.cuda()), torch.autograd.Variable(targets.cuda()) outputs, _ = model(inputs) loss = criterion(outputs, targets) prec1 = simple_accuracy(outputs.data.cpu(), targets.data.cpu()) top1.update(prec1, inputs.size(0)) losses.update(loss.data.cpu(), inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % 10 == 0: print('Epoch: [{} | {}], batch: {}, loss: {}, Accuracy: {}'.format(epoch + 1, epochs, batch_idx + 1, losses.avg, top1.avg))
34.969101
139
0.618684
c7075ad8e2a1229e14b617586ca8b05a9f86dd2f
1,920
py
Python
mir/tools/mir_repo_utils.py
fenrir-z/ymir-cmd
6fbffd3c1ff5dd1c9a44b55de411523b50567661
[ "Apache-2.0" ]
1
2022-01-12T03:12:47.000Z
2022-01-12T03:12:47.000Z
mir/tools/mir_repo_utils.py
fenrir-z/ymir-cmd
6fbffd3c1ff5dd1c9a44b55de411523b50567661
[ "Apache-2.0" ]
null
null
null
mir/tools/mir_repo_utils.py
fenrir-z/ymir-cmd
6fbffd3c1ff5dd1c9a44b55de411523b50567661
[ "Apache-2.0" ]
null
null
null
import json import logging import os from typing import Optional from mir import scm from mir.tools import mir_storage
32.542373
110
0.655729
c70864a5d3c270e78a0bc9da8738245a6e27664f
3,624
py
Python
utils/edit_utils.py
ermekaitygulov/STIT
93dca8d589b555fa99a5c5438a8517a52d8898c3
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
6
2022-03-11T23:42:12.000Z
2022-03-28T09:39:25.000Z
utils/edit_utils.py
bycloudai/STIT-Windows
cadb2a01457bfd1c90bcd8d220587b48e1c2327a
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
utils/edit_utils.py
bycloudai/STIT-Windows
cadb2a01457bfd1c90bcd8d220587b48e1c2327a
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
import argparse import math import os import pickle from typing import List import cv2 import numpy as np import torch from PIL import Image, ImageDraw, ImageFont import configs.paths_config from configs import paths_config from training.networks import SynthesisBlock
32.648649
117
0.683499
c7087550ae8556b1933bc7961a3ed0e9783aaa07
6,845
py
Python
conll_df/conll_df.py
interrogator/conll-df
35611f295e3f8230f574142151e3a19098edfdca
[ "MIT" ]
27
2017-03-17T15:39:16.000Z
2021-11-23T09:10:10.000Z
conll_df/conll_df.py
interrogator/conll-df
35611f295e3f8230f574142151e3a19098edfdca
[ "MIT" ]
2
2017-11-21T05:33:04.000Z
2018-09-22T13:05:06.000Z
conll_df/conll_df.py
interrogator/conll-df
35611f295e3f8230f574142151e3a19098edfdca
[ "MIT" ]
8
2017-03-17T14:59:34.000Z
2022-02-25T19:09:27.000Z
import pandas as pd # UD 1.0 CONLL_COLUMNS = ['i', 'w', 'l', 'p', 'n', 'm', 'g', 'f', 'd', 'c'] # UD 2.0 CONLL_COLUMNS_V2 = ['i', 'w', 'l', 'x', 'p', 'm', 'g', 'f', 'e', 'o'] # possible morphological attributes MORPH_ATTS = ['type', 'animacy', #'gender', 'number' "Abbr", "Animacy", "Aspect", "Case", "Definite", "Degree", "Evident", "Foreign", "Gender", "Mood", "NumType", "Number", "Person", "Polarity", "Polite", "Poss", "PronType", "Reflex", "Tense", "VerbForm", "Voice", "Type"] def _make_sent_csv(sentstring, fname, meta, splitter, i, skip_meta=False): """ Take one CONLL-U sentence and add all metadata to each row Return: str (CSV data) and dict (sent level metadata) """ fixed_lines = [] raw_lines = sentstring.splitlines() for line in raw_lines: if not line: continue if line.startswith('#'): if not skip_meta: try: k, v = line.lstrip('# ').split(splitter, 1) except ValueError: k, v = line.lstrip('# ').split(splitter.strip(), 1) meta[k.lower().strip()] = v.strip() else: line = '%s\t%s\t%s' % (fname, i, line) fixed_lines.append(line) return '\n'.join(fixed_lines), meta def _add_governors_to_df(df): """ Add governor info to a DF. Increases memory usage quite a bit. """ # save the original index i = df.index.get_level_values('i') # add g dfg = df.set_index('g', append=True) # remove i dfg = dfg.reset_index('i') dfg = df.loc[dfg.index] dfg = dfg[['w', 'l', 'p', 'f']] dfg['i'] = i dfg = dfg.set_index('i', append=True) dfg.index.names = ['file', 's', 'g', 'i'] dfg = dfg.reset_index('g', drop=True) for c in list(dfg.columns): try: dfg[c] = dfg[c].cat.add_categories(['ROOT']) except (AttributeError, ValueError): pass dfg = dfg.fillna('ROOT') dfg.columns = ['gw', 'gl', 'gp', 'gf'] dfg = df.join(dfg, how="inner") return dfg def conll_df(path, corpus_name=False, corp_folder=False, v2="auto", skip_morph=False, skip_meta=False, add_gov=False, drop=['text', 'newdoc id'], file_index=True, categories=True, extra_fields='auto', drop_redundant=True, **kwargs): """ Optimised CONLL-U reader for v2.0 data Args: path (str): the file to prepare Returns: pd.DataFrame: 2d array representation of file data """ import os import re try: from io import StringIO except ImportError: from StringIO import StringIO splitter = ' = ' if v2 else '=' with open(path, 'r') as fo: data = fo.read().strip('\n') if v2 == 'auto': v2 = 'sent_id = ' in data[:9999] fname = os.path.basename(path) # metadata that applies filewide # a little bonus for those with annual data basedict = {} if not skip_meta: year = re.search(r'[12][0-9][0-9][0-9]', fname) if year: basedict['year'] = year.group(0) sents = data.split('\n\n') sents_meta = [_make_sent_csv(sstring, fname, dict(basedict), splitter, i, skip_meta=skip_meta) \ for i, sstring in enumerate(sents, start=1)] sents, metadata = zip(*sents_meta) # make the sent df sents = '\n\n'.join(sents) sents = StringIO(sents) if v2: cols = ['file', 's'] + CONLL_COLUMNS_V2 else: cols = ['file', 's'] + CONLL_COLUMNS df = pd.read_csv(sents, sep="\t", header=None, names=cols, quoting=kwargs.pop('quoting', 3), index_col=[0, 1, 2], engine='c', na_filter=False, **kwargs) if v2 and not skip_morph: df['m'] = df['m'].fillna('') df['o'] = df['o'].fillna('') if extra_fields == 'auto': # evil line to get all possible keys in the final column extra_fields = list(df['o'].str.extractall(r'(?:^|\|)([^=]+?)=')[0].unique()) cats = MORPH_ATTS + extra_fields if 'SpaceAfter' not in cats: cats.append('SpaceAfter') cats = list(set(cats)) om = df['o'].str.cat(df['m'], sep='|').str.strip('|_') # this is a very slow list comp, but i can't think of a better way to do it. # the 'extractall' solution makes columns for not just the value, but the key... extra = [om.str.extract('%s=([^|$]+)' % cat.title(), expand=True) for cat in cats] extra = pd.concat(extra, axis=1) extra.columns = cats df = pd.concat([df, extra], axis=1) # make and join the meta df if not skip_meta: metadata = {i: d for i, d in enumerate(metadata, start=1)} metadata = pd.DataFrame(metadata).T metadata.index.name = 's' df = metadata.join(df, how='inner') # we never want these to show up as a dataframe column badcols = ['sent_id', 's', 'i', 'file'] # if we aren't parsing morph and extra columns, we should at least keep them if not skip_morph: badcols += ['o', 'm'] if drop: badcols = badcols + drop df = df.drop(badcols, axis=1, errors='ignore') # some evil code to handle conll-u files where g col could be a string if 'g' in df.columns: df['g'] = df['g'].fillna(0) if df['g'].dtype in [object, str]: df['g'] = df['g'].str.replace('_', '0').astype(int) df['g'] = df['g'].astype(int) df = df.fillna('_') # attempt to categorise data if categories: for c in list(df.columns): if c in ['g', 'date']: continue try: df[c] = df[c].astype('category') except: pass if add_gov: df = _add_governors_to_df(df) if not file_index: df.index = df.index.droplevel('file') if drop_redundant: empty_cols = [] for c in df.columns: if len(df[c].unique()) == 1: empty_cols.append(c) df = df.drop(empty_cols, axis=1) #reorder columns so that important things are first firsts = CONLL_COLUMNS_V2 if v2 else CONLL_COLUMNS firsts = [i for i in firsts if i in list(df.columns)] lasts = [i for i in list(df.columns) if i not in firsts] df = df[firsts + lasts] return df
30.154185
100
0.512491
c708da26fb5e59e5b2a82edc62ad3d6177cc9df2
2,491
py
Python
scripts/postgres_to_lmdb_bars_60m.py
alexanu/atpy
3f4b5cfe7de7633ef053d2feaddae421806a9799
[ "MIT" ]
24
2018-03-22T06:22:11.000Z
2022-03-14T09:04:44.000Z
scripts/postgres_to_lmdb_bars_60m.py
alexanu/atpy
3f4b5cfe7de7633ef053d2feaddae421806a9799
[ "MIT" ]
null
null
null
scripts/postgres_to_lmdb_bars_60m.py
alexanu/atpy
3f4b5cfe7de7633ef053d2feaddae421806a9799
[ "MIT" ]
9
2018-03-22T06:22:11.000Z
2020-09-19T16:47:13.000Z
#!/bin/python3 import argparse import datetime import functools import logging import os import psycopg2 from dateutil.relativedelta import relativedelta from atpy.data.cache.lmdb_cache import * from atpy.data.cache.postgres_cache import BarsInPeriodProvider from atpy.data.cache.postgres_cache import request_adjustments from atpy.data.splits_dividends import adjust_df if __name__ == "__main__": logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="PostgreSQL to LMDB configuration") parser.add_argument('-lmdb_path', type=str, default=None, help="LMDB Path") parser.add_argument('-delta_back', type=int, default=8, help="Default number of years to look back") parser.add_argument('-adjust_splits', action='store_true', default=True, help="Adjust splits before saving") parser.add_argument('-adjust_dividends', action='store_true', default=False, help="Adjust dividends before saving") args = parser.parse_args() lmdb_path = args.lmdb_path if args.lmdb_path is not None else os.environ['ATPY_LMDB_PATH'] con = psycopg2.connect(os.environ['POSTGRESQL_CACHE']) adjustments = None if args.adjust_splits and args.adjust_dividends: adjustments = request_adjustments(conn=con, table_name='splits_dividends') elif args.adjust_splits: adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='split') elif args.adjust_dividends: adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='dividend') now = datetime.datetime.now() bgn_prd = datetime.datetime(now.year - args.delta_back, 1, 1) bgn_prd = bgn_prd + relativedelta(days=7 - bgn_prd.weekday()) cache_read = functools.partial(read_pickle, lmdb_path=lmdb_path) bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table='bars_60m', bgn_prd=bgn_prd, delta=relativedelta(days=7), overlap=relativedelta(microseconds=-1), cache=cache_read) for i, df in enumerate(bars_in_period): if cache_read(bars_in_period.current_cache_key()) is None: if adjustments is not None: adjust_df(df, adjustments) write(bars_in_period.current_cache_key(), df, lmdb_path) logging.info('Saving ' + bars_in_period.current_cache_key()) else: logging.info('Cache hit on ' + bars_in_period.current_cache_key())
43.701754
158
0.733842