hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4a3dde4993c9a95e8e97065cdef59f7fea5aa64 | 3,988 | py | Python | tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | a8ff803fd7dedd50c757630aa83b79fb624c0414 | [
"CC0-1.0"
] | 1 | 2022-01-27T14:29:39.000Z | 2022-01-27T14:29:39.000Z | tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | a8ff803fd7dedd50c757630aa83b79fb624c0414 | [
"CC0-1.0"
] | null | null | null | tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | a8ff803fd7dedd50c757630aa83b79fb624c0414 | [
"CC0-1.0"
] | null | null | null | from unittest import TestCase
from ..helpers import (
create_web3,
create_contract,
get_future_execution_start_at_timestamp,
proceed_time,
get_prediction_time_shift,
get_purchase_time_shift,
get_shipping_time_shift,
get_publication_time_shift,
get_tournament_id,
get_chain_id,
create_store,
generate_redis_namespace,
BaseHardhatTestCase
)
from src.web3 import get_account_address
execution_start_at = get_future_execution_start_at_timestamp()
content = 'abc'.encode()
model_id = 'model1'
model_id_other = 'model_other'
| 31.904 | 84 | 0.654965 |
c4a4559c8dad0a9248c5e83d6827ea4d86bb7ecb | 579 | py | Python | tests/test_add_option_backtrace.py | ponponon/loguru | d38ced7539b888e9e9db7495f49f4499b3ee77e1 | [
"MIT"
] | 11,391 | 2018-12-08T17:44:13.000Z | 2022-03-31T17:55:24.000Z | tests/test_add_option_backtrace.py | ponponon/loguru | d38ced7539b888e9e9db7495f49f4499b3ee77e1 | [
"MIT"
] | 610 | 2018-12-08T18:03:03.000Z | 2022-03-31T22:28:14.000Z | tests/test_add_option_backtrace.py | ponponon/loguru | d38ced7539b888e9e9db7495f49f4499b3ee77e1 | [
"MIT"
] | 601 | 2018-12-08T17:46:42.000Z | 2022-03-30T04:23:56.000Z | from loguru import logger
# See "test_catch_exceptions.py" for extended testing
| 23.16 | 75 | 0.651123 |
c4a4edff90f57692413fd77c390b6d607d322a51 | 251 | py | Python | BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | obj=Employee()
del obj | 25.1 | 55 | 0.609562 |
c4a64cd498868ef1b6019445d7127a1f346b9fe4 | 13,670 | py | Python | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-12-11T19:13:59.000Z | 2019-12-11T19:13:59.000Z | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx
| 31.643519 | 88 | 0.59744 |
c4a6ac024777e5d5757393235c2f8a34ef55a681 | 531 | py | Python | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null |
from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations')
| 23.086957 | 49 | 0.770245 |
c4a933bda29b080bd1aab7e22c3ee0df61cffb17 | 3,510 | py | Python | tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | 7 | 2016-09-27T00:21:46.000Z | 2017-03-18T20:04:29.000Z | tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | null | null | null | tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | 2 | 2017-03-16T21:47:43.000Z | 2020-10-20T22:58:03.000Z | # Copyright 2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Test async version of mqtt libraries. Depends on hbmqtt
(https://github.com/beerfactory/hbmqtt)
"""
import unittest
import sys
import asyncio
import string
from random import choice, seed
from antevents.base import Scheduler, SensorPub, SensorEvent
import antevents.linq.output
import antevents.linq.combinators
import antevents.linq.select
from antevents.adapters.mqtt_async import QueueWriter, QueueReader
from antevents.linq.transducer import PeriodicMedianTransducer
from utils import ValueListSensor, ValidateAndStopSubscriber
seed()
try:
import hbmqtt
HBMQTT_AVAILABLE = True
except ImportError:
HBMQTT_AVAILABLE = False
URL = "mqtt://localhost:1883"
VALUES = [
1.0,
2.5,
3.7,
4.1,
8.1,
0.5,
6.5,
4.5,
3.9,
6.5
]
EXPECTED = [
2.5,
4.1,
4.5,
6.5
]
CHARS=string.ascii_letters+string.digits
if __name__ == '__main__':
unittest.main()
| 30 | 88 | 0.665242 |
c4ac27bb61de371ddbaa59cbf8dcb19f1eb8972f | 7,407 | py | Python | edit/main.py | team-alpha-kr/Partner-pyWeb | da88f10b4c511616219a701a794fa0a5ef33f01b | [
"Apache-2.0"
] | null | null | null | edit/main.py | team-alpha-kr/Partner-pyWeb | da88f10b4c511616219a701a794fa0a5ef33f01b | [
"Apache-2.0"
] | null | null | null | edit/main.py | team-alpha-kr/Partner-pyWeb | da88f10b4c511616219a701a794fa0a5ef33f01b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
import os
from flask import Flask, request, render_template, request, redirect, url_for, jsonify
from flask_discord import DiscordOAuth2Session, requires_authorization
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(814742019489660939, "rvSBVHtGPflSASjeGEEKdZxC5Z_w1UM_ovc_xD0ZPcFy1UeUybFM4ClGANu6CEWTQame", adapter=RequestsWebhookAdapter())
run_webhook = Webhook.partial(804602090537091072, "6ZMww14Nh7OVeeHUt5bWeixreoWQmSzPVfFmIpU3BEr8OYLGqickY1VyoqH2IeMs1Kd8", adapter=RequestsWebhookAdapter())
app = Flask(__name__)
app.secret_key = b"%\xe0'\x01\xdeH\x8e\x85m|\xb3\xffCN\xc9g"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "false"
app.config["DISCORD_CLIENT_ID"] = "801279922722045962"
app.config["DISCORD_CLIENT_SECRET"] = "zosKMQ95etnO1dZv7D5vet7TyVhyXwt5" # Discord client secret.
# app.config["DISCORD_REDIRECT_URI"] = "http://localhost:3333/callback" # URL to your callback endpoint.
app.config["DISCORD_REDIRECT_URI"] = "https://partner-e.alphakr.xyz/callback" # URL to your callback endpoint.
app.config["DISCORD_BOT_TOKEN"] = "ODAxMjc5OTIyNzIyMDQ1OTYy.YAeYFA.G9TddtDdPZ3Xlb7AAHD6ddVWVbY"
discord = DiscordOAuth2Session(app)
# S: 2021
# S: 210210
# E: 210210
# E: 2021
run_webhook.send(" - !")
app.run(host='0.0.0.0', port=3333, debug=False) | 37.984615 | 212 | 0.699743 |
c4ad485be7bdd5e1ac650e5ab444023836dc2e62 | 257 | py | Python | protocols/tpkt.py | dparnishchev/s7scan | 87a7aeeb3c932491745dfded2577d221083f87df | [
"Unlicense"
] | 98 | 2018-10-12T10:36:55.000Z | 2022-03-31T15:55:46.000Z | protocols/tpkt.py | FOGSEC/s7scan | d7f9c3bbd6a97a7f83991ea865be95b0e9280346 | [
"Unlicense"
] | null | null | null | protocols/tpkt.py | FOGSEC/s7scan | d7f9c3bbd6a97a7f83991ea865be95b0e9280346 | [
"Unlicense"
] | 35 | 2018-10-12T17:08:25.000Z | 2022-03-28T20:12:27.000Z | from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
| 25.7 | 48 | 0.614786 |
c4ad9991f367ca79cfc5f643798ad08df02746df | 905 | py | Python | pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 0a7202ee6ee5424486ce6ade1d3b18d8139d4ffb | [
"BSD-3-Clause"
] | 3 | 2021-05-17T20:38:32.000Z | 2021-11-16T17:54:26.000Z | pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 0a7202ee6ee5424486ce6ade1d3b18d8139d4ffb | [
"BSD-3-Clause"
] | 32 | 2021-04-29T13:27:13.000Z | 2021-07-01T07:22:58.000Z | pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 0a7202ee6ee5424486ce6ade1d3b18d8139d4ffb | [
"BSD-3-Clause"
] | 1 | 2021-04-30T06:40:21.000Z | 2021-04-30T06:40:21.000Z | import ipyvuetify as v
| 26.617647 | 53 | 0.340331 |
c4adaa3c0b3bb1d07161a72f61bcbc0a5a83b810 | 1,946 | py | Python | args_parser.py | vmartinv/capital_gains_calculator | bb322ca4bc3ab1ab7db702848d3cb5d0b5677fff | [
"MIT"
] | null | null | null | args_parser.py | vmartinv/capital_gains_calculator | bb322ca4bc3ab1ab7db702848d3cb5d0b5677fff | [
"MIT"
] | 23 | 2022-02-07T05:29:52.000Z | 2022-03-30T05:29:29.000Z | args_parser.py | vmartinv/capital_gains_calculator | bb322ca4bc3ab1ab7db702848d3cb5d0b5677fff | [
"MIT"
] | null | null | null | import argparse
import datetime
| 29.044776 | 90 | 0.631552 |
c4ae3cca9fef2f5e1a6a2cb205c909acf0583766 | 286 | py | Python | src/pydts/examples_utils/datasets.py | tomer1812/pydts | 7891a0b4b66dc7b986ebb7344c2c8f8d54e56ccc | [
"MIT"
] | null | null | null | src/pydts/examples_utils/datasets.py | tomer1812/pydts | 7891a0b4b66dc7b986ebb7344c2c8f8d54e56ccc | [
"MIT"
] | null | null | null | src/pydts/examples_utils/datasets.py | tomer1812/pydts | 7891a0b4b66dc7b986ebb7344c2c8f8d54e56ccc | [
"MIT"
] | null | null | null | import pandas as pd
from pydts.config import *
DATASETS_DIR = os.path.join(os.path.dirname((os.path.dirname(__file__))), 'datasets') | 35.75 | 85 | 0.758741 |
c4aef0df820c8e4498c5c1703e7a91b20097e686 | 621 | py | Python | busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 52df06b82e15572d0cd9c9d13ba2d5136585bc2d | [
"MIT"
] | 2 | 2020-09-01T12:06:07.000Z | 2021-09-24T09:54:57.000Z | busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 52df06b82e15572d0cd9c9d13ba2d5136585bc2d | [
"MIT"
] | null | null | null | busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 52df06b82e15572d0cd9c9d13ba2d5136585bc2d | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-06 19:33
from django.db import migrations, models
import django.db.models.deletion
| 25.875 | 133 | 0.615137 |
c4aef217e9c718184f0d3ad8cf65a1b19d71e9bd | 12,799 | py | Python | livy/cli/submit.py | tzing/python-livy | 0f2b0bf7832ae8fc65399506da294f4e8e019626 | [
"MIT"
] | 1 | 2022-01-27T03:04:29.000Z | 2022-01-27T03:04:29.000Z | livy/cli/submit.py | tzing/python-livy | 0f2b0bf7832ae8fc65399506da294f4e8e019626 | [
"MIT"
] | null | null | null | livy/cli/submit.py | tzing/python-livy | 0f2b0bf7832ae8fc65399506da294f4e8e019626 | [
"MIT"
] | null | null | null | """Submit a batch task to livy server."""
import argparse
import datetime
import importlib
import json
import logging
import re
import typing
import livy
import livy.cli.config
import livy.cli.logging
logger = logging.getLogger(__name__)
def main(argv=None):
"""CLI entrypoint"""
# parse argument
cfg = livy.cli.config.load()
parser = argparse.ArgumentParser(
prog="livy submit",
description=__doc__,
)
parser.add_argument(
"script",
help="Path to the script that contains the application to be executed",
)
parser.add_argument(
"args",
nargs="*",
help="Arguments for the task script",
)
parser.add_argument(
"--class-name",
metavar="COM.EXAMPLE.FOO",
help="Application Java/Spark main class (for Java/Scala task)",
)
parser.add_argument(
"--jars",
nargs="+",
metavar="FOO.JAR",
help="Java dependencies to be used in this batch",
)
parser.add_argument(
"--py-files",
nargs="+",
metavar="FOO.ZIP",
help="Python dependencies to be used in this batch",
)
parser.add_argument(
"--files",
nargs="+",
metavar="FOO.TXT",
help="Files to be used in this batch",
)
parser.add_argument(
"--archives",
nargs="+",
metavar="FOO.TAR",
help="Archives to be used in this batch",
)
parser.add_argument(
"--queue-name",
metavar="DEFAULT",
help="The name of the YARN queue to which submitted",
)
parser.add_argument(
"--session-name",
metavar="HELLO",
help="The session name to execute this batch",
)
group = parser.add_argument_group("pre-submit actions")
group.add_argument(
"--on-pre-submit",
metavar="PLUG",
nargs="+",
default=cfg.submit.pre_submit,
help="Run plugin(s) before submit",
)
group = parser.add_argument_group("livy server configuration")
group.add_argument(
"--api-url",
required=cfg.root.api_url is None,
default=cfg.root.api_url,
help="Base-URL for Livy API server",
)
group.add_argument(
"--driver-memory",
metavar="10G",
default=cfg.submit.driver_memory,
type=argmem,
help="Amount of memory to use for the driver process.",
)
group.add_argument(
"--driver-cores",
metavar="N",
default=cfg.submit.driver_cores,
type=int,
help="Number of cores to use for the driver process.",
)
group.add_argument(
"--executor-memory",
metavar="10G",
default=cfg.submit.executor_memory,
type=argmem,
help="Amount of memory to use for the executor process.",
)
group.add_argument(
"--executor-cores",
metavar="N",
default=cfg.submit.executor_cores,
type=int,
help="Number of cores to use for each executor.",
)
group.add_argument(
"--num-executors",
metavar="N",
default=cfg.submit.num_executors,
type=int,
help="Number of executors to launch for this batch.",
)
group.add_argument(
"--spark-conf",
metavar="CONF_NAME=VALUE",
nargs="+",
default=cfg.submit.spark_conf,
type=argkvpair,
help="Spark configuration properties.",
)
group = parser.add_argument_group("post-submit actions")
g = group.add_mutually_exclusive_group()
g.set_defaults(watch_log=cfg.submit.watch_log)
g.add_argument(
"--watch-log",
dest="watch_log",
action="store_true",
help="Watching for logs until it is finished",
)
g.add_argument(
"--no-watch-log",
dest="watch_log",
action="store_false",
help="Not to watch for logs. Only submit the task and quit.",
)
group = parser.add_argument_group("after-task-finish actions")
group.add_argument(
"--on-task-success",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_success,
help="Run plugin(s) on task is finished and success",
)
group.add_argument(
"--on-task-failed",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and failed",
)
group.add_argument(
"--on-task-ended",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and ended and regardless to its state",
)
livy.cli.logging.setup_argparse(parser)
args: PreSubmitArguments = parser.parse_args(argv)
# time stamping
tzlocal = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
args.time_prog_start = now()
# setup logger
livy.cli.logging.init(args)
console = livy.cli.logging.get("livy-read-log.main")
console.info("Submission task started")
# run pre-submit actions
args: TaskEndedArguments = run_hook(console, "PRE-SUBMIT", args, args.on_pre_submit)
# check server state
client = livy.LivyClient(url=args.api_url)
try:
client.check(False)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
# build request payload
submit_parameter = {}
for key, value in [
("file", args.script),
("class_name", args.class_name),
("args", args.args),
("jars", args.jars),
("py_files", args.py_files),
("files", args.files),
("driver_memory", args.driver_memory),
("driver_cores", args.driver_cores),
("executor_memory", args.executor_memory),
("executor_cores", args.executor_cores),
("num_executors", args.num_executors),
("archives", args.archives),
("queue", args.queue_name),
("name", args.session_name),
("conf", {k: v for k, v in args.spark_conf}),
]:
if value:
submit_parameter[key] = value
console.info(
"Creating batch with parameters: %s",
json.dumps(submit_parameter, indent=2),
)
# timing
args.time_task_submit = now()
console.debug("Batch submission time= %s", args.time_task_submit)
# submit
try:
submit_resp = client.create_batch(**submit_parameter)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
console.info("Server response: %s", json.dumps(submit_resp, indent=2))
args.batch_id = submit_resp.get("id", None)
if not isinstance(args.batch_id, int) or args.batch_id < 0:
console.error("Failed to get batch id. Something goes wrong.")
return 1
# watch log
if not args.watch_log:
console.info("Batch %d created.", args.batch_id)
return 0
console.info("Start reading logs of batch %d", args.batch_id)
reader = livy.LivyBatchLogReader(client, args.batch_id)
try:
reader.read_until_finish()
except livy.RequestError as e:
console.error(
"Error occurs during read log. HTTP code=%d, Reason=%s", e.code, e.reason
)
return 1
except KeyboardInterrupt:
msg_args = args.batch_id, args.api_url # just for shorten
console.warning("Keyboard interrupt. Local livy-submit process terminating.")
console.warning("Your task might be still running on the server.")
console.warning("For reading the logs, call:")
console.warning(" livy read-log %d --api-url %s", *msg_args)
console.warning("For stopping the task, call:")
console.warning(" livy kill %d --api-url %s", *msg_args)
return 1
# timing
args.time_task_ended = now()
console.debug("Batch finishing time= %s", args.time_task_ended)
# get ending state
try:
args.state = client.get_batch_state(args.batch_id)
except livy.RequestError:
console.error("Error during query batch ending state.")
return 1
if args.state == "success":
exit_code = 0
state_level = logging.INFO
else:
exit_code = 1
state_level = logging.WARNING
console.log(state_level, "Batch#%d ended with state= %s", args.batch_id, args.state)
elapsed_time = args.time_task_ended - args.time_task_submit
console.info(
"Batch execution time: %dsec (%s)",
elapsed_time.total_seconds(),
human_readable_timeperiod(elapsed_time),
)
# run task-end actions
if args.state == "success":
args = run_hook(console, "TASK-SUCCESS", args, args.on_task_success)
else:
args = run_hook(console, "TASK-FAILED", args, args.on_task_failed)
args = run_hook(console, "TASK", args, args.on_task_ended)
return exit_code
def argmem(s: str):
"""Validate input for memory size"""
if not re.fullmatch(r"\d+[gm]b?", s, re.RegexFlag.IGNORECASE):
raise argparse.ArgumentTypeError(
"please specific memory size in format '1234mb'"
)
return s
def argkvpair(val):
"""Splitting key value pair"""
k, v = val.split("=", 1)
return k, v
def run_hook(
logger: logging.Logger,
identifier: str,
args: argparse.Namespace,
actions: typing.List[str],
) -> argparse.Namespace:
"""Run hook actions"""
for action_name in actions:
logger.info("Run %s action %s", identifier.lower(), action_name)
func = get_function(action_name)
if not func:
logger.warning("Failed to get action function instance. Stop process.")
exit(1)
try:
args = func(identifier, args)
except:
logger.exception(
"Error occurs during %s action. Stop process.", identifier.lower()
)
exit(1)
if not isinstance(args, argparse.Namespace):
logger.error(
"Expect namespace object from %s's return value. Got %s",
action_name,
type(args).__name__,
)
exit(1)
return args
def get_function(name: str) -> typing.Callable:
"""Get function by module name"""
m = re.fullmatch(r"([\w.]+):(\w+)", name, re.RegexFlag.I)
if not m:
logger.error("Failed to resolve function name: %s", name)
logger.error("Please specific it in module:func format")
return
module_name, func_name = m.groups()
try:
module = importlib.import_module(module_name)
except ImportError:
logger.error("Failed to find module: %s", module_name)
return
try:
func = getattr(module, func_name)
except AttributeError:
logger.error("Failed to find function %s in %s", func_name, module_name)
return
return func
def human_readable_timeperiod(period: datetime.timedelta):
"""Convert time period to human readable format"""
total_seconds = int(period.total_seconds())
terms = []
days = total_seconds // 86400
if days:
terms.append(f"{days}d")
hours = total_seconds // 3600 % 24
if hours:
terms.append(f"{hours}h")
minutes = total_seconds // 60 % 60
if minutes:
terms.append(f"{minutes}m")
seconds = total_seconds % 60
if seconds:
terms.append(f"{seconds}s")
return " ".join(terms)
if __name__ == "__main__":
exit(main())
| 27.823913 | 88 | 0.612313 |
c4affc4f83188559537e4d8ac9659aa6dc764f0f | 1,283 | py | Python | setup.py | nickyfoto/premoji | c8da5f713fe90175923e47212905b7dd39825f92 | [
"MIT"
] | null | null | null | setup.py | nickyfoto/premoji | c8da5f713fe90175923e47212905b7dd39825f92 | [
"MIT"
] | null | null | null | setup.py | nickyfoto/premoji | c8da5f713fe90175923e47212905b7dd39825f92 | [
"MIT"
] | null | null | null | """Minimal setup file for learn project."""
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name = 'premoji',
version = '0.1.4',
description = 'predict emoji on given text',
long_description = README,
long_description_content_type = "text/markdown",
license = "MIT",
author = 'Qiang Huang',
author_email = 'nickyfoto@gmail.com',
url = 'https://macworks.io',
download_url = 'https://github.com/nickyfoto/premoji/archive/v0.1.3-alpha.tar.gz',
packages = find_packages(where='src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires = [
'numpy',
'scikit-learn',
],
classifiers = [
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.7',
]
)
| 32.897436 | 150 | 0.639127 |
c4b0600fb893dd2bf03b8ad35d0df6623f45ef39 | 272 | py | Python | 02-current-time.py | KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals | 5eae96818baa3bf10e091bfe39498efe237c9e7f | [
"MIT"
] | null | null | null | 02-current-time.py | KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals | 5eae96818baa3bf10e091bfe39498efe237c9e7f | [
"MIT"
] | null | null | null | 02-current-time.py | KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals | 5eae96818baa3bf10e091bfe39498efe237c9e7f | [
"MIT"
] | null | null | null | # Author: Keith Williams
# Date: 21/09/2017
from time import strftime
# This line prints the current date and time to the console in the format 01-10-2017 13:15:30.
# strftime must be imported from the time package before being used.
print(strftime("%d-%m-%Y %H:%M:%S")) | 34 | 94 | 0.731618 |
c4b06868e47596162a61d75f212de04777f1a9b9 | 160 | py | Python | git_management/clone.py | afsantaliestra/scripts | 06f9a1c269749570061a42c5c5b586944e8c9a45 | [
"MIT"
] | null | null | null | git_management/clone.py | afsantaliestra/scripts | 06f9a1c269749570061a42c5c5b586944e8c9a45 | [
"MIT"
] | null | null | null | git_management/clone.py | afsantaliestra/scripts | 06f9a1c269749570061a42c5c5b586944e8c9a45 | [
"MIT"
] | null | null | null | import os
filepath = 'list.txt'
with open(filepath) as fp:
while line := fp.readline():
line = line.strip()
os.system(f'git clone {line}')
| 20 | 38 | 0.6 |
c4b069d181b9ce2a2a43be231c32efdbc6f68ff6 | 2,453 | py | Python | train.py | amansoni/sequential-decision-problem-algorithms | 73276dc954a8507f67f84d260b2923703b8add10 | [
"MIT"
] | null | null | null | train.py | amansoni/sequential-decision-problem-algorithms | 73276dc954a8507f67f84d260b2923703b8add10 | [
"MIT"
] | null | null | null | train.py | amansoni/sequential-decision-problem-algorithms | 73276dc954a8507f67f84d260b2923703b8add10 | [
"MIT"
] | null | null | null | import argparse
import os
import sys
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('-w', '--num-workers', default=1, type=int,
help="Number of workers")
parser.add_argument('-r', '--remotes', default=None,
help='The address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901).')
parser.add_argument('-e', '--env-id', type=str, default="PongDeterministic-v3",
help="Environment id")
parser.add_argument('-l', '--log-dir', type=str, default="/tmp/pong",
help="Log directory path")
if __name__ == "__main__":
run()
| 35.550725 | 109 | 0.591928 |
c4b0a273d439a49bd4401c9381668556a506e643 | 8,116 | py | Python | MoleculeACE/benchmark/evaluation/results.py | molML/MoleculeACE | e831d2371a9b89f4853a03d5c04cc4bf59f64ee0 | [
"MIT"
] | 9 | 2022-03-26T17:36:03.000Z | 2022-03-29T19:50:26.000Z | MoleculeACE/benchmark/evaluation/results.py | molML/MoleculeACE | e831d2371a9b89f4853a03d5c04cc4bf59f64ee0 | [
"MIT"
] | null | null | null | MoleculeACE/benchmark/evaluation/results.py | molML/MoleculeACE | e831d2371a9b89f4853a03d5c04cc4bf59f64ee0 | [
"MIT"
] | null | null | null | """
Class that holds the results: used for evaluating model performance on activity cliff compounds
Derek van Tilborg, Eindhoven University of Technology, March 2022
"""
import os
import numpy as np
from MoleculeACE.benchmark.utils.const import Algorithms
from .metrics import calc_rmse, calc_q2f3
| 48.891566 | 120 | 0.638369 |
c4b0cff3a089b5a105c23dc4c0935c7ecd2fb0ae | 70 | py | Python | checkout/orders/__init__.py | accelero-cloud/tutorials | 9a9580e60bc216bf45ec0011f6d9b6b14d5a8d03 | [
"Apache-2.0"
] | 2 | 2019-08-09T16:15:40.000Z | 2020-01-12T09:46:28.000Z | checkout/orders/__init__.py | accelero-cloud/tutorials | 9a9580e60bc216bf45ec0011f6d9b6b14d5a8d03 | [
"Apache-2.0"
] | 2 | 2021-03-31T18:48:41.000Z | 2021-12-13T19:49:46.000Z | checkout/orders/__init__.py | accelero-cloud/tutorials | 9a9580e60bc216bf45ec0011f6d9b6b14d5a8d03 | [
"Apache-2.0"
] | null | null | null | from checkout.orders.order_service import Order, AuthorisationRequest
| 35 | 69 | 0.885714 |
c4b0d7f117651d7395b65de65e764c7fea1c1e3d | 87 | py | Python | hiisi/__init__.py | ritvje/hiisi | 56f8abb3013296172c8c2919a33519856a903a81 | [
"MIT"
] | null | null | null | hiisi/__init__.py | ritvje/hiisi | 56f8abb3013296172c8c2919a33519856a903a81 | [
"MIT"
] | null | null | null | hiisi/__init__.py | ritvje/hiisi | 56f8abb3013296172c8c2919a33519856a903a81 | [
"MIT"
] | null | null | null | from .hiisi import HiisiHDF
from .odim import OdimPVOL, OdimCOMP
__version__ = "0.0.6"
| 21.75 | 36 | 0.770115 |
c4b163c2e1d8ffe2281a2938faf90b326b80b931 | 34,527 | py | Python | src/hangar/repository.py | jjmachan/hangar-py | c1cfa2f8f997d7d0f114e015aea333829e029451 | [
"Apache-2.0"
] | null | null | null | src/hangar/repository.py | jjmachan/hangar-py | c1cfa2f8f997d7d0f114e015aea333829e029451 | [
"Apache-2.0"
] | null | null | null | src/hangar/repository.py | jjmachan/hangar-py | c1cfa2f8f997d7d0f114e015aea333829e029451 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
import weakref
import warnings
from typing import Union, Optional, List
from .merger import select_merge_algorithm
from .constants import DIR_HANGAR
from .remotes import Remotes
from .context import Environments
from .diagnostics import ecosystem, integrity
from .records import heads, parsing, summarize, vcompat, commiting
from .checkout import ReaderCheckout, WriterCheckout
from .diff import DiffAndConflicts, ReaderUserDiff
from .utils import (
is_valid_directory_path,
is_suitable_user_key,
is_ascii,
folder_size,
format_bytes
)
def clone(self, user_name: str, user_email: str, remote_address: str,
*, remove_old: bool = False) -> str:
"""Download a remote repository to the local disk.
The clone method implemented here is very similar to a `git clone`
operation. This method will pull all commit records, history, and data
which are parents of the remote's `master` branch head commit. If a
:class:`Repository` exists at the specified directory,
the operation will fail.
Parameters
----------
user_name : str
Name of the person who will make commits to the repository. This
information is recorded permanently in the commit records.
user_email : str
Email address of the repository user. This information is recorded
permanently in any commits created.
remote_address : str
location where the
:class:`hangar.remote.server.HangarServer` process is
running and accessible by the clone user.
remove_old : bool, optional, kwarg only
DANGER! DEVELOPMENT USE ONLY! If enabled, a
:class:`hangar.repository.Repository` existing on disk at the same
path as the requested clone location will be completely removed and
replaced with the newly cloned repo. (the default is False, which
will not modify any contents on disk and which will refuse to create
a repository at a given location if one already exists there.)
Returns
-------
str
Name of the master branch for the newly cloned repository.
"""
self.init(user_name=user_name, user_email=user_email, remove_old=remove_old)
self._remote.add(name='origin', address=remote_address)
branch = self._remote.fetch(remote='origin', branch='master')
HEAD = heads.get_branch_head_commit(self._env.branchenv, branch_name=branch)
heads.set_branch_head_commit(self._env.branchenv, 'master', HEAD)
with warnings.catch_warnings(record=False):
warnings.simplefilter('ignore', category=UserWarning)
co = self.checkout(write=True, branch='master')
co.reset_staging_area()
co.close()
return 'master'
def init(self,
user_name: str,
user_email: str,
*,
remove_old: bool = False) -> str:
"""Initialize a Hangar repository at the specified directory path.
This function must be called before a checkout can be performed.
Parameters
----------
user_name : str
Name of the repository user account.
user_email : str
Email address of the repository user account.
remove_old : bool, kwarg-only
DEVELOPER USE ONLY -- remove and reinitialize a Hangar
repository at the given path, Default = False
Returns
-------
str
the full directory path where the Hangar repository was
initialized on disk.
"""
pth = self._env.init_repo(user_name=user_name,
user_email=user_email,
remove_old=remove_old)
return str(pth)
def log(self,
branch: str = None,
commit: str = None,
*,
return_contents: bool = False,
show_time: bool = False,
show_user: bool = False) -> Optional[dict]:
"""Displays a pretty printed commit log graph to the terminal.
.. note::
For programatic access, the return_contents value can be set to true
which will retrieve relevant commit specifications as dictionary
elements.
Parameters
----------
branch : str, optional
The name of the branch to start the log process from. (Default value
= None)
commit : str, optional
The commit hash to start the log process from. (Default value = None)
return_contents : bool, optional, kwarg only
If true, return the commit graph specifications in a dictionary
suitable for programatic access/evaluation.
show_time : bool, optional, kwarg only
If true and return_contents is False, show the time of each commit
on the printed log graph
show_user : bool, optional, kwarg only
If true and return_contents is False, show the committer of each
commit on the printed log graph
Returns
-------
Optional[dict]
Dict containing the commit ancestor graph, and all specifications.
"""
self.__verify_repo_initialized()
res = summarize.log(branchenv=self._env.branchenv,
refenv=self._env.refenv,
branch=branch,
commit=commit,
return_contents=return_contents,
show_time=show_time,
show_user=show_user)
return res
def summary(self, *, branch: str = '', commit: str = '') -> None:
"""Print a summary of the repository contents to the terminal
Parameters
----------
branch : str, optional
A specific branch name whose head commit will be used as the summary
point (Default value = '')
commit : str, optional
A specific commit hash which should be used as the summary point.
(Default value = '')
"""
self.__verify_repo_initialized()
ppbuf = summarize.summary(self._env, branch=branch, commit=commit)
print(ppbuf.getvalue())
return None
def _details(self, *, line_limit=100, line_length=100) -> None: # pragma: no cover
"""DEVELOPER USE ONLY: Dump some details about the underlying db structure to disk.
"""
print(summarize.details(
self._env.branchenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.refenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.hashenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.stageenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.stagehashenv, line_limit=line_limit, line_length=line_length).getvalue())
for commit, commitenv in self._env.cmtenv.items():
print(summarize.details(
commitenv, line_limit=line_limit, line_length=line_length).getvalue())
return
def _ecosystem_details(self) -> dict:
"""DEVELOPER USER ONLY: log and return package versions on the system.
"""
eco = ecosystem.get_versions()
return eco
def diff(self, master: str, dev: str) -> DiffAndConflicts:
"""Calculate diff between master and dev branch/commits.
Diff is calculated as if we are to merge "dev" into "master"
Parameters
----------
master: str
branch name or commit hash digest to use as the "master" which
changes made in "dev" are compared to.
dev: str
branch name or commit hash digest to use as the "dev"
(ie. "feature") branch which changes have been made to
which are to be compared to the contents of "master".
Returns
-------
DiffAndConflicts
Standard output diff structure.
"""
current_branches = self.list_branches()
# assert branch / commit specified by "master" exists and
# standardize into "digest" rather than "branch name" arg type
if master in current_branches:
masterHEAD = heads.get_branch_head_commit(
branchenv=self._env.branchenv, branch_name=master)
else:
cmtExists = commiting.check_commit_hash_in_history(
refenv=self._env.refenv, commit_hash=master)
if not cmtExists:
raise ValueError(f'`master` {master} is not valid branch/commit.')
masterHEAD = master
# same check & transform for "dev" branch/commit arg.
if dev in current_branches:
devHEAD = heads.get_branch_head_commit(
branchenv=self._env.branchenv, branch_name=dev)
else:
cmtExists = commiting.check_commit_hash_in_history(
refenv=self._env.refenv, commit_hash=dev)
if not cmtExists:
raise ValueError(f'`dev` {dev} is not valid branch/commit.')
devHEAD = dev
# create differ object and generate results...
diff = ReaderUserDiff(commit_hash=masterHEAD,
branchenv=self._env.branchenv,
refenv=self._env.refenv)
res = diff.commit(dev_commit_hash=devHEAD)
return res
def merge(self, message: str, master_branch: str, dev_branch: str) -> str:
"""Perform a merge of the changes made on two branches.
Parameters
----------
message: str
Commit message to use for this merge.
master_branch : str
name of the master branch to merge into
dev_branch : str
name of the dev/feature branch to merge
Returns
-------
str
Hash of the commit which is written if possible.
"""
self.__verify_repo_initialized()
commit_hash = select_merge_algorithm(
message=message,
branchenv=self._env.branchenv,
stageenv=self._env.stageenv,
refenv=self._env.refenv,
stagehashenv=self._env.stagehashenv,
master_branch=master_branch,
dev_branch=dev_branch,
repo_path=self._repo_path)
return commit_hash
def create_branch(self, name: str, base_commit: str = None) -> heads.BranchHead:
"""create a branch with the provided name from a certain commit.
If no base commit hash is specified, the current writer branch ``HEAD``
commit is used as the ``base_commit`` hash for the branch. Note that
creating a branch does not actually create a checkout object for
interaction with the data. to interact you must use the repository
checkout method to properly initialize a read (or write) enabled
checkout object.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
>>> repo.create_branch('testbranch')
BranchHead(name='testbranch', digest='b66b...a8cc')
>>> repo.list_branches()
['master', 'testbranch']
>>> co = repo.checkout(write=True, branch='testbranch')
>>> # add data ...
>>> newDigest = co.commit('added some stuff')
>>> repo.create_branch('new-changes', base_commit=newDigest)
BranchHead(name='new-changes', digest='35kd...3254')
>>> repo.list_branches()
['master', 'new-changes', 'testbranch']
Parameters
----------
name : str
name to assign to the new branch
base_commit : str, optional
commit hash to start the branch root at. if not specified, the
writer branch ``HEAD`` commit at the time of execution will be used,
defaults to None
Returns
-------
:class:`~.heads.BranchHead`
NamedTuple[str, str] with fields for ``name`` and ``digest`` of the
branch created (if the operation was successful)
Raises
------
ValueError
If the branch name provided contains characters outside of alpha-numeric
ascii characters and ".", "_", "-" (no whitespace), or is > 64 characters.
ValueError
If the branch already exists.
RuntimeError
If the repository does not have at-least one commit on the "default"
(ie. ``master``) branch.
"""
self.__verify_repo_initialized()
if (not is_ascii(name)) or (not is_suitable_user_key(name)):
err = ValueError(
f'Branch name provided: {name} invalid. Must contain only alpha-numeric '
f'or "." "_" "-" ascii characters. And be <= 64 Characters')
raise err from None
createdBranch = heads.create_branch(
branchenv=self._env.branchenv,
name=name,
base_commit=base_commit)
return createdBranch
def remove_branch(self, name: str, *, force_delete: bool = False) -> heads.BranchHead:
"""Permanently delete a branch pointer from the repository history.
Since a branch (by definition) is the name associated with the HEAD
commit of a historical path, the default behavior of this method is to
throw an exception (no-op) should the ``HEAD`` not be referenced as an
ancestor (or at least as a twin) of a separate branch which is
currently *ALIVE*. If referenced in another branch's history, we are
assured that all changes have been merged and recorded, and that this
pointer can be safely deleted without risk of damage to historical
provenance or (eventual) loss to garbage collection.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
>>> repo.create_branch('first-testbranch')
BranchHead(name='first-testbranch', digest='9785...56da')
>>> repo.create_branch('second-testbranch')
BranchHead(name='second-testbranch', digest='9785...56da')
>>> repo.list_branches()
['master', 'first-testbranch', 'second-testbranch']
>>> # Make a commit to advance a branch
>>> co = repo.checkout(write=True, branch='first-testbranch')
>>> # add data ...
>>> co.commit('added some stuff')
'3l253la5hna3k3a553256nak35hq5q534kq35532'
>>> co.close()
>>> repo.remove_branch('second-testbranch')
BranchHead(name='second-testbranch', digest='9785...56da')
A user may manually specify to delete an un-merged branch, in which
case the ``force_delete`` keyword-only argument should be set to
``True``.
>>> # check out master and try to remove 'first-testbranch'
>>> co = repo.checkout(write=True, branch='master')
>>> co.close()
>>> repo.remove_branch('first-testbranch')
Traceback (most recent call last):
...
RuntimeError: ("The branch first-testbranch is not fully merged. "
"If you are sure you want to delete it, re-run with "
"force-remove parameter set.")
>>> # Now set the `force_delete` parameter
>>> repo.remove_branch('first-testbranch', force_delete=True)
BranchHead(name='first-testbranch', digest='9785...56da')
It is important to note that *while this method will handle all safety
checks, argument validation, and performs the operation to permanently
delete a branch name/digest pointer, **no commit refs along the history
will be deleted from the Hangar database**.* Most of the history contains
commit refs which must be safe in other branch histories, and recent
commits may have been used as the base for some new history. As such, even
if some of the latest commits leading up to a deleted branch ``HEAD`` are
orphaned (unreachable), the records (and all data added in those commits)
will remain on the disk.
In the future, we intend to implement a garbage collector which will remove
orphan commits which have not been modified for some set amount of time
(probably on the order of a few months), but this is not implemented at the
moment.
Should an accidental forced branch deletion occur, *it is possible to
recover* and create a new branch head pointing to the same commit. If
the commit digest of the removed branch ``HEAD`` is known, its as simple as
specifying a name and the ``base_digest`` in the normal
:meth:`create_branch` method. If the digest is unknown, it will be a
bit more work, but some of the developer facing introspection tools /
routines could be used to either manually or (with minimal effort)
programmatically find the orphan commit candidates. If you find
yourself having accidentally deleted a branch, and must get it back,
please reach out on the `Github Issues
<https://github.com/tensorwerk/hangar-py/issues>`__ page. We'll gladly
explain more in depth and walk you through the process in any way we
can help!
Parameters
----------
name : str
name of the branch which should be deleted. This branch must exist, and
cannot refer to a remote tracked branch (ie. origin/devbranch), please
see exception descriptions for other parameters determining validity of
argument
force_delete : bool, optional
If True, remove the branch pointer even if the changes are un-merged in
other branch histories. May result in orphaned commits which may be
time-consuming to recover if needed, by default False
Returns
-------
:class:`~.heads.BranchHead`
NamedTuple[str, str] with fields for `name` and `digest` of the branch
pointer deleted.
Raises
------
ValueError
If a branch with the provided name does not exist locally
PermissionError
If removal of the branch would result in a repository with zero local
branches.
PermissionError
If a write enabled checkout is holding the writer-lock at time of this
call.
PermissionError
If the branch to be removed was the last used in a write-enabled
checkout, and whose contents form the base of the staging area.
RuntimeError
If the branch has not been fully merged into other branch histories,
and ``force_delete`` option is not ``True``.
"""
self.__verify_repo_initialized()
res = heads.remove_branch(branchenv=self._env.branchenv,
refenv=self._env.refenv,
name=name,
force_delete=force_delete)
return res
def list_branches(self) -> List[str]:
"""list all branch names created in the repository.
Returns
-------
List[str]
the branch names recorded in the repository
"""
self.__verify_repo_initialized()
branches = heads.get_branch_names(self._env.branchenv)
return branches
def verify_repo_integrity(self) -> bool:
"""Verify the integrity of the repository data on disk.
Runs a full cryptographic verification of repository contents in order
to ensure the integrity of all data and history recorded on disk.
.. note::
This proof may take a significant amount of time to run for
repositories which:
1. store significant quantities of data on disk.
2. have a very large number of commits in their history.
As a brief explanation for why these are the driving factors behind
processing time:
1. Every single piece of data in the repositories history must be read
from disk, cryptographically hashed, and compared to the expected
value. There is no exception to this rule; regardless of when a piece
of data was added / removed from an column, or for how many (or how
few) commits some sample exists in. The integrity of the commit tree at
any point after some piece of data is added to the repo can only be
validated if it - and all earlier data pieces - are proven to be intact
and unchanged.
Note: This does not mean that the verification is repeatedly
performed for every commit some piece of data is stored in. Each
data piece is read from disk and verified only once, regardless of
how many commits some piece of data is referenced in.
2. Each commit reference (defining names / contents of a commit) must be
decompressed and parsed into a usable data structure. We scan across
all data digests referenced in the commit and ensure that the
corresponding data piece is known to hangar (and validated as
unchanged). The commit refs (along with the corresponding user records,
message, and parent map), are then re-serialized and cryptographically
hashed for comparison to the expected value. While this process is
fairly efficient for a single commit, it must be repeated for each
commit in the repository history, and may take a non-trivial amount of
time for repositories with thousands of commits.
While the two points above are the most time consuming operations,
there are many more checks which are performed alongside them as part
of the full verification run.
Returns
-------
bool
True if integrity verification is successful, otherwise False; in
this case, a message describing the offending component will be
printed to stdout.
"""
self.__verify_repo_initialized()
heads.acquire_writer_lock(self._env.branchenv, 'VERIFY_PROCESS')
try:
integrity.run_verification(
branchenv=self._env.branchenv,
hashenv=self._env.hashenv,
refenv=self._env.refenv,
repo_path=self._env.repo_path)
finally:
heads.release_writer_lock(self._env.branchenv, 'VERIFY_PROCESS')
return True
def force_release_writer_lock(self) -> bool:
"""Force release the lock left behind by an unclosed writer-checkout
.. warning::
*NEVER USE THIS METHOD IF WRITER PROCESS IS CURRENTLY ACTIVE.* At the time
of writing, the implications of improper/malicious use of this are not
understood, and there is a a risk of of undefined behavior or (potentially)
data corruption.
At the moment, the responsibility to close a write-enabled checkout is
placed entirely on the user. If the `close()` method is not called
before the program terminates, a new checkout with write=True will fail.
The lock can only be released via a call to this method.
.. note::
This entire mechanism is subject to review/replacement in the future.
Returns
-------
bool
if the operation was successful.
"""
self.__verify_repo_initialized()
forceReleaseSentinal = parsing.repo_writer_lock_force_release_sentinal()
success = heads.release_writer_lock(self._env.branchenv, forceReleaseSentinal)
return success
| 41.300239 | 95 | 0.610363 |
c4b186ebba7523cfef5343184718edecec88a7e6 | 10,731 | py | Python | kronos/utils.py | jtaghiyar/kronos | 6cc3665f43b5868ad98def762c533eb74dd501e1 | [
"MIT"
] | 17 | 2016-01-10T23:54:06.000Z | 2021-01-30T09:36:19.000Z | kronos/utils.py | jtaghiyar/kronos | 6cc3665f43b5868ad98def762c533eb74dd501e1 | [
"MIT"
] | 3 | 2016-10-11T02:38:01.000Z | 2017-03-14T03:27:34.000Z | kronos/utils.py | jtaghiyar/kronos | 6cc3665f43b5868ad98def762c533eb74dd501e1 | [
"MIT"
] | 6 | 2015-12-10T21:52:31.000Z | 2019-10-07T18:57:57.000Z | '''
Created on Apr 16, 2014
@author: jtaghiyar
'''
import os
import subprocess as sub
from plumber import Plumber
from job_manager import LocalJobManager
from workflow_manager import WorkFlow
from helpers import trim, make_dir, export_to_environ
| 36.131313 | 96 | 0.620911 |
c4b26e3d04e87e26583a1a633be309fd89f11d9a | 3,047 | py | Python | tests/base.py | the-dotify-project/dotify | f4bbed4f847cdfd073886c384a2d881456a1f7d9 | [
"MIT"
] | 3 | 2021-05-18T12:04:46.000Z | 2021-12-16T14:35:15.000Z | tests/base.py | the-dotify-project/dotify | f4bbed4f847cdfd073886c384a2d881456a1f7d9 | [
"MIT"
] | 38 | 2021-05-13T21:24:20.000Z | 2022-03-14T19:22:27.000Z | tests/base.py | the-dotify-project/dotify | f4bbed4f847cdfd073886c384a2d881456a1f7d9 | [
"MIT"
] | null | null | null | from pathlib import Path
from re import sub
from shutil import rmtree
from unittest import TestCase
from dotify import Dotify, models
| 33.119565 | 83 | 0.643912 |
c4b380ac5b2bec0b07861a3d99e7430566f32546 | 2,724 | py | Python | odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 2 | 2021-06-22T01:34:18.000Z | 2021-06-22T01:40:28.000Z | odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
return 'IMO ' + compact(number)
| 31.674419 | 76 | 0.714391 |
c4b3b6d76efc3c8c72713052f1e8b243b1695f31 | 265 | py | Python | yodl/__init__.py | brunolange/yodl | d9e957cacf1391fce3dfe9ac24e4fb434d14d8b0 | [
"MIT"
] | null | null | null | yodl/__init__.py | brunolange/yodl | d9e957cacf1391fce3dfe9ac24e4fb434d14d8b0 | [
"MIT"
] | null | null | null | yodl/__init__.py | brunolange/yodl | d9e957cacf1391fce3dfe9ac24e4fb434d14d8b0 | [
"MIT"
] | null | null | null | """yodl!
yodl provides a class decorator to build django models
from YAML configuration files
"""
from .decorators import yodl
from .io import yodlify
__author__ = "Bruno Lange"
__email__ = "blangeram@gmail.com"
__license__ = "MIT"
__all__ = ["yodl", "yodlify"]
| 18.928571 | 54 | 0.743396 |
c4b45d589da887df80357b5a791263b44c35a390 | 6,010 | py | Python | main.py | g-w1/hermes | 4c7388c0d75187b79c0c27e4322aa9e79a44666c | [
"MIT"
] | null | null | null | main.py | g-w1/hermes | 4c7388c0d75187b79c0c27e4322aa9e79a44666c | [
"MIT"
] | null | null | null | main.py | g-w1/hermes | 4c7388c0d75187b79c0c27e4322aa9e79a44666c | [
"MIT"
] | null | null | null | """
Usage: hermes install [-dsvV] <pkg>...
hermes -h | --help
hermes --version
Options:
-d, --depends Require dependency installation
-h, --help Display usage and options
-s, --check-sigs Verify package GPG signatures
-v, --verify Verify package checksums
-V, --verbose Display debugging messages
--version Display version
"""
from configure import valid_hermes_config
from configure import valid_pkg_config
from docopt import docopt # MIT License
import os # Standard Library
import requests # Apache License v2.0
import sh # MIT License
import tarfile # Standard Library
if __name__ == '__main__':
cli_args = docopt(__doc__, version='hermes v0.0.1')
print cli_args
# hermes_dir = os.path.dirname(sh.which('hermes'))
hermes_dir = 'hermes'
runtime_config = populate_runtime_config()
print runtime_config
pkg_configs = dict()
if cli_args['install']:
print 'Installing ', str(cli_args['<pkg>'])
main_installer(cli_args['<pkg>'])
| 30.820513 | 79 | 0.632612 |
c4b535911ba95193b86d162ae29dd779c08ef75c | 26,047 | py | Python | userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | null | null | null | userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | 1 | 2022-01-09T11:35:06.000Z | 2022-01-09T11:35:06.000Z | userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | null | null | null | import random
import requests
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot.cmdhelp import CmdHelp
LOVESTR = [
"The best and most beautiful things in this world cannot be seen or even heard, but must be felt with the heart.",
"You know you're in love when you can't fall asleep because reality is finally better than your dreams.",
"Love recognizes no barriers. It jumps hurdles, leaps fences, penetrates walls to arrive at its destination full of hope.",
"Being deeply loved by someone gives you strength, while loving someone deeply gives you courage.",
"The real lover is the man who can thrill you by kissing your forehead or smiling into your eyes or just staring into space.",
"I swear I couldn't love you more than I do right now, and yet I know I will tomorrow.",
"When I saw you I fell in love, and you smiled because you knew it.",
"In all the world, there is no heart for me like yours. / In all the world, there is no love for you like mine.",
"To love or have loved, that is enough. Ask nothing further. There is no other pearl to be found in the dark folds of life.",
"If you live to be a hundred, I want to live to be a hundred minus one day, so I never have to live without you.",
"Some love stories aren't epic novels. Some are short stories. But that doesn't make them any less filled with love.",
"As he read, I fell in love the way you fall asleep: slowly, and then all at once.",
"I've never had a moment's doubt. I love you. I believe in you completely. You are my dearest one. My reason for life.",
"Do I love you? My god, if your love were a grain of sand, mine would be a universe of beaches.",
"I am who I am because of you.",
"I just want you to know that you're very special... and the only reason I'm telling you is that I don't know if anyone else ever has.",
"Remember, we're madly in love, so it's all right to kiss me any time you feel like it.",
"I love you. I knew it the minute I met you.",
"I loved her against reason, against promise, against peace, against hope, against happiness, against all discouragement that could be.",
"I love you not because of who you are, but because of who I am when I am with you.",
]
DHOKA = [
"Humne Unse Wafa Ki, Aur Dil Bhi Gya Toot, Wo Bhi Chinaal Nikli, Uski Maa ki Chut.",
"Dabbe Me Dabba, Dabbe Me Cake ..Tu Chutiya Hai Zara Seesha To Dekh.",
"Kaam Se Kaam Rakhoge Toh Naam Hoga, Randi Log Ke Chakkkar Me Padoge to Naam Badnaam Hoga.",
"Usne Kaha- Mah Lyf maH Rule, Maine Kaha Bhag BSDK , Tujhy Paida Karna hi Teri Baap ki Sabse Badi Vul.",
"Humse Ulajhna Mat, BSDK Teri Hasi Mita Dunga, Muh Me Land Daal Ke..Sari Hosiyaari Gand Se Nikal Dunga.",
"Aur Sunau Bhosdiwalo ..Kya Haal Hai?..Tumhare Sakal Se Zayda Toh Tumhare Gand Laal Hai!!",
"Pata Nhi Kya Kashish Hai Tumhare Mohabbat Me,Jab Bhi Tumhe Yaad Karta Hu Mera Land Khada Ho Jata Hai.",
"Konsa Mohabbat Kounsi Story, Gand Faad Dunga Agr Bolne Aayi Sorry!",
"Naam Banta Hai Risk Se, Chutiya Banta Hai IshQ Se.",
"Sun Be, Ab Tujhy Mere Zindegi Me Ane ka Koi Haq Nhi,,Aur Tu 1 Number Ki Randi Hai Isme KOi Saq Nhi.",
"Beta Tu Chugli Karna Chor De , Hum Ungli Karna Chor Dengy.",
]
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
GDNOON = [
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good Afternoon Dear!`",
"`With a deep blue sky over my head and a relaxing wind around me, the only thing I am missing right now is the company of you. I wish you a refreshing afternoon!`",
"`The day has come a halt realizing that I am yet to wish you a great afternoon. My dear, if you thought you were forgotten, youre so wrong. Good afternoon!`",
"`Good afternoon! May the sweet peace be part of your heart today and always and there is life shining through your sigh. May you have much light and peace.`",
"`With you, every part of a day is beautiful. I live every day to love you more than yesterday. Wishing you an enjoyable afternoon my love!`",
"`This bright afternoon sun always reminds me of how you brighten my life with all the happiness. I miss you a lot this afternoon. Have a good time`!",
"`Nature looks quieter and more beautiful at this time of the day! You really dont want to miss the beauty of this time! Wishing you a happy afternoon!`",
"`What a wonderful afternoon to finish you day with! I hope youre having a great time sitting on your balcony, enjoying this afternoon beauty!`",
"`I wish I were with you this time of the day. We hardly have a beautiful afternoon like this nowadays. Wishing you a peaceful afternoon!`",
"`As you prepare yourself to wave goodbye to another wonderful day, I want you to know that, I am thinking of you all the time. Good afternoon!`",
"`This afternoon is here to calm your dog-tired mind after a hectic day. Enjoy the blessings it offers you and be thankful always. Good afternoon!`",
"`The gentle afternoon wind feels like a sweet hug from you. You are in my every thought in this wonderful afternoon. Hope you are enjoying the time!`",
"`Wishing an amazingly good afternoon to the most beautiful soul I have ever met. I hope you are having a good time relaxing and enjoying the beauty of this time!`",
"`Afternoon has come to indicate you, Half of your days work is over, Just another half a day to go, Be brisk and keep enjoying your works, Have a happy noon!`",
"`Mornings are for starting a new work, Afternoons are for remembering, Evenings are for refreshing, Nights are for relaxing, So remember people, who are remembering you, Have a happy noon!`",
"`If you feel tired and sleepy you could use a nap, you will see that it will help you recover your energy and feel much better to finish the day. Have a beautiful afternoon!`",
"`Time to remember sweet persons in your life, I know I will be first on the list, Thanks for that, Good afternoon my dear!`",
"`May this afternoon bring a lot of pleasant surprises for you and fills you heart with infinite joy. Wishing you a very warm and love filled afternoon!`",
"`Good, better, best. Never let it rest. Til your good is better and your better is best. Good Afternoon`",
"`May this beautiful afternoon fill your heart boundless happiness and gives you new hopes to start yours with. May you have lot of fun! Good afternoon dear!`",
"`As the blazing sun slowly starts making its way to the west, I want you to know that this beautiful afternoon is here to bless your life with success and peace. Good afternoon!`",
"`The deep blue sky of this bright afternoon reminds me of the deepness of your heart and the brightness of your soul. May you have a memorable afternoon!`",
"`Your presence could make this afternoon much more pleasurable for me. Your company is what I cherish all the time. Good afternoon!`",
"`A relaxing afternoon wind and the sweet pleasure of your company can make my day complete. Missing you so badly during this time of the day! Good afternoon!`",
"`Wishing you an afternoon experience so sweet and pleasant that feel thankful to be alive today. May you have the best afternoon of your life today!`",
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good afternoon dear!`",
"`Noon time its time to have a little break, Take time to breathe the warmth of the sun, Who is shining up in between the clouds, Good afternoon!`",
"`You are the cure that I need to take three times a day, in the morning, at the night and in the afternoon. I am missing you a lot right now. Good afternoon!`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`I pray to god that he keeps me close to you so we can enjoy these beautiful afternoons together forever! Wishing you a good time this afternoon!`",
"`You are every bit of special to me just like a relaxing afternoon is special after a toiling noon. Thinking of my special one in this special time of the day!`",
"`May your Good afternoon be light, blessed, enlightened, productive and happy.`",
"`Thinking of you is my most favorite hobby every afternoon. Your love is all I desire in life. Wishing my beloved an amazing afternoon!`",
"`I have tasted things that are so sweet, heard words that are soothing to the soul, but comparing the joy that they both bring, Ill rather choose to see a smile from your cheeks. You are sweet. I love you.`",
"`How I wish the sun could obey me for a second, to stop its scorching ride on my angel. So sorry it will be hot there. Dont worry, the evening will soon come. I love you.`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`With you every day is my lucky day. So lucky being your love and dont know what else to say. Morning night and noon, you make my day.`",
"`Your love is sweeter than what I read in romantic novels and fulfilling more than I see in epic films. I couldnt have been me, without you. Good afternoon honey, I love you!`",
"`No matter what time of the day it is, No matter what I am doing, No matter what is right and what is wrong, I still remember you like this time, Good Afternoon!`",
"`Things are changing. I see everything turning around for my favor. And the last time I checked, its courtesy of your love. 1000 kisses from me to you. I love you dearly and wishing you a very happy noon.`",
"`You are sometimes my greatest weakness, you are sometimes my biggest strength. I do not have a lot of words to say but let you make sure, you make my day, Good Afternoon!`",
"`Every afternoon is to remember the one whom my heart beats for. The one I live and sure can die for. Hope you doing good there my love. Missing your face.`",
"`My love, I hope you are doing well at work and that you remember that I will be waiting for you at home with my arms open to pamper you and give you all my love. I wish you a good afternoon!`",
"`Afternoons like this makes me think about you more. I desire so deeply to be with you in one of these afternoons just to tell you how much I love you. Good afternoon my love!`",
"`My heart craves for your company all the time. A beautiful afternoon like this can be made more enjoyable if you just decide to spend it with me. Good afternoon!`",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"`Get back here!`",
"`Not so fast...`",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"`Jokes on you, I'm everywhere`",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"`Go bother someone else, no-one here cares.`",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
'"Oh, look at me! I\'m so cool, I can run from a bot!" - this person',
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"eviral has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"eviral has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
eviralOSTR = [
"Hi !",
"Ello, gov'nor!",
"Whats crackin?",
"Howdy, howdy ,howdy!",
"hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"hello, sunshine!",
"`Hey, howdy, hi!`",
"Whats kickin, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"`Hey there, freshman!`",
"`I come in peace!`",
"`I come for peace!`",
"Ahoy, matey!",
"`Hi !`",
]
CONGRATULATION = [
"`Congratulations and BRAVO!`",
"`You did it! So proud of you!`",
"`This calls for celebrating! Congratulations!`",
"`I knew it was only a matter of time. Well done!`",
"`Congratulations on your well-deserved success.`",
"`Heartfelt congratulations to you.`",
"`Warmest congratulations on your achievement.`",
"`Congratulations and best wishes for your next adventure!`",
"`So pleased to see you accomplishing great things.`",
"`Feeling so much joy for you today. What an impressive achievement!`",
]
BYESTR = [
"`Nice talking with you`",
"`I've gotta go!`",
"`I've gotta run!`",
"`I've gotta split`",
"`I'm off!`",
"`Great to see you,bye`",
"`See you soon`",
"`Farewell!`",
]
GDNIGHT = [
"`Good night keep your dreams alive`",
"`Night, night, to a dear friend! May you sleep well!`",
"`May the night fill with stars for you. May counting every one, give you contentment!`",
"`Wishing you comfort, happiness, and a good nights sleep!`",
"`Now relax. The day is over. You did your best. And tomorrow youll do better. Good Night!`",
"`Good night to a friend who is the best! Get your forty winks!`",
"`May your pillow be soft, and your rest be long! Good night, friend!`",
"`Let there be no troubles, dear friend! Have a Good Night!`",
"`Rest soundly tonight, friend!`",
"`Have the best nights sleep, friend! Sleep well!`",
"`Have a very, good night, friend! You are wonderful!`",
"`Relaxation is in order for you! Good night, friend!`",
"`Good night. May you have sweet dreams tonight.`",
"`Sleep well, dear friend and have sweet dreams.`",
"`As we wait for a brand new day, good night and have beautiful dreams.`",
"`Dear friend, I wish you a night of peace and bliss. Good night.`",
"`Darkness cannot last forever. Keep the hope alive. Good night.`",
"`By hook or crook you shall have sweet dreams tonight. Have a good night, buddy!`",
"`Good night, my friend. I pray that the good Lord watches over you as you sleep. Sweet dreams.`",
"`Good night, friend! May you be filled with tranquility!`",
"`Wishing you a calm night, friend! I hope it is good!`",
"`Wishing you a night where you can recharge for tomorrow!`",
"`Slumber tonight, good friend, and feel well rested, tomorrow!`",
"`Wishing my good friend relief from a hard days work! Good Night!`",
"`Good night, friend! May you have silence for sleep!`",
"`Sleep tonight, friend and be well! Know that you have done your very best today, and that you will do your very best, tomorrow!`",
"`Friend, you do not hesitate to get things done! Take tonight to relax and do more, tomorrow!`",
"`Friend, I want to remind you that your strong mind has brought you peace, before. May it do that again, tonight! May you hold acknowledgment of this with you!`",
"`Wishing you a calm, night, friend! Hoping everything winds down to your liking and that the following day meets your standards!`",
"`May the darkness of the night cloak you in a sleep that is sound and good! Dear friend, may this feeling carry you through the next day!`",
"`Friend, may the quietude you experience tonight move you to have many more nights like it! May you find your peace and hold on to it!`",
"`May there be no activity for you tonight, friend! May the rest that you have coming to you arrive swiftly! May the activity that you do tomorrow match your pace and be all of your own making!`",
"`When the day is done, friend, may you know that you have done well! When you sleep tonight, friend, may you view all the you hope for, tomorrow!`",
"`When everything is brought to a standstill, friend, I hope that your thoughts are good, as you drift to sleep! May those thoughts remain with you, during all of your days!`",
"`Every day, you encourage me to do new things, friend! May tonights rest bring a new day that overflows with courage and exciting events!`",
]
GDMORNING = [
"`Life is full of uncertainties. But there will always be a sunrise after every sunset. Good morning!`",
"`It doesnt matter how bad was your yesterday. Today, you are going to make it a good one. Wishing you a good morning!`",
"`If you want to gain health and beauty, you should wake up early. Good morning!`",
"`May this morning offer you new hope for life! May you be happy and enjoy every moment of it. Good morning!`",
"`May the sun shower you with blessings and prosperity in the days ahead. Good morning!`",
"`Every sunrise marks the rise of life over death, hope over despair and happiness over suffering. Wishing you a very enjoyable morning today!`",
"`Wake up and make yourself a part of this beautiful morning. A beautiful world is waiting outside your door. Have an enjoyable time!`",
"`Welcome this beautiful morning with a smile on your face. I hope youll have a great day today. Wishing you a very good morning!`",
"`You have been blessed with yet another day. What a wonderful way of welcoming the blessing with such a beautiful morning! Good morning to you!`",
"`Waking up in such a beautiful morning is a guaranty for a day thats beyond amazing. I hope youll make the best of it. Good morning!`",
"`Nothing is more refreshing than a beautiful morning that calms your mind and gives you reasons to smile. Good morning! Wishing you a great day.`",
"`Another day has just started. Welcome the blessings of this beautiful morning. Rise and shine like you always do. Wishing you a wonderful morning!`",
"`Wake up like the sun every morning and light up the world your awesomeness. You have so many great things to achieve today. Good morning!`",
"`A new day has come with so many new opportunities for you. Grab them all and make the best out of your day. Heres me wishing you a good morning!`",
"`The darkness of night has ended. A new sun is up there to guide you towards a life so bright and blissful. Good morning dear!`",
"`Wake up, have your cup of morning tea and let the morning wind freshen you up like a happiness pill. Wishing you a good morning and a good day ahead!`",
"`Sunrises are the best; enjoy a cup of coffee or tea with yourself because this day is yours, good morning! Have a wonderful day ahead.`",
"`A bad day will always have a good morning, hope all your worries are gone and everything you wish could find a place. Good morning!`",
"`A great end may not be decided but a good creative beginning can be planned and achieved. Good morning, have a productive day!`",
"`Having a sweet morning, a cup of coffee, a day with your loved ones is what sets your Good Morning have a nice day!`",
"`Anything can go wrong in the day but the morning has to be beautiful, so I am making sure your morning starts beautiful. Good morning!`",
"`Open your eyes with a smile, pray and thank god that you are waking up to a new beginning. Good morning!`",
"`Morning is not only sunrise but A Beautiful Miracle of God that defeats the darkness and spread light. Good Morning.`",
"`Life never gives you a second chance. So, enjoy every bit of it. Why not start with this beautiful morning. Good Morning!`",
"`If you want to gain health and beauty, you should wake up early. Good Morning!`",
"`Birds are singing sweet melodies and a gentle breeze is blowing through the trees, what a perfect morning to wake you up. Good morning!`",
"`This morning is so relaxing and beautiful that I really dont want you to miss it in any way. So, wake up dear friend. A hearty good morning to you!`",
"`Mornings come with a blank canvas. Paint it as you like and call it a day. Wake up now and start creating your perfect day. Good morning!`",
"`Every morning brings you new hopes and new opportunities. Dont miss any one of them while youre sleeping. Good morning!`",
"`Start your day with solid determination and great attitude. Youre going to have a good day today. Good morning my friend!`",
"`Friendship is what makes life worth living. I want to thank you for being such a special friend of mine. Good morning to you!`",
"`A friend like you is pretty hard to come by in life. I must consider myself lucky enough to have you. Good morning. Wish you an amazing day ahead!`",
"`The more you count yourself as blessed, the more blessed you will be. Thank God for this beautiful morning and let friendship and love prevail this morning.`",
"`Wake up and sip a cup of loving friendship. Eat your heart out from a plate of hope. To top it up, a fork full of kindness and love. Enough for a happy good morning!`",
"`It is easy to imagine the world coming to an end. But it is difficult to imagine spending a day without my friends. Good morning.`",
]
CmdHelp("quotes").add_command(
"quote", None, "Sends a random mind-blowing quote"
).add_command("gdmng", None, "Sends a random Good Morning Quote").add_command(
"gdnyt", None, "Sends a random Good Night Quote"
).add_command(
"gdbye", None, "Sends a random Good Byee Quote"
).add_command(
"qhi", None, "Sends a random hello msg"
).add_command(
"congo", None, "Sends a random congratulations quote"
).add_command(
"chase", None, "Sends a random Chase quote"
).add_command(
"gdnoon", None, "Sends a random Good Afternoon quote"
).add_command(
"metoo", None, 'Sends a text saying "Mee too"'
).add_command(
"dhoka", None, "Sends a random Dhoka quote(katt gya bc)"
).add_command(
"love", None, "Sends a random love quote. (A stage before .dhoka)"
).add()
| 65.609572 | 214 | 0.702231 |
c4b5547f1e3ecbc952e52b926351b009c451edf6 | 22 | py | Python | celestial/client/system/__init__.py | ams-tech/celestial | 0c4c264563fe79d6838a1c40a1d114c1d6fcf23f | [
"MIT"
] | null | null | null | celestial/client/system/__init__.py | ams-tech/celestial | 0c4c264563fe79d6838a1c40a1d114c1d6fcf23f | [
"MIT"
] | null | null | null | celestial/client/system/__init__.py | ams-tech/celestial | 0c4c264563fe79d6838a1c40a1d114c1d6fcf23f | [
"MIT"
] | null | null | null | from . import cmdline
| 11 | 21 | 0.772727 |
c4b59ea674aa8a31f87633b437e5863be80f3ef3 | 4,089 | py | Python | tests/test_joints.py | slaclab/pystand | c0037d4af52cff98c7e758a7a0ff08156ade4646 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/test_joints.py | slaclab/pystand | c0037d4af52cff98c7e758a7a0ff08156ade4646 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/test_joints.py | slaclab/pystand | c0037d4af52cff98c7e758a7a0ff08156ade4646 | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-05-30T19:02:58.000Z | 2020-12-13T00:35:01.000Z | ############
# Standard #
############
import math
###############
# Third Party #
###############
import ophyd
import pytest
##########
# Module #
##########
from detrot import ConeJoint, AngledJoint, StandPoint, Point
from conftest import PseudoMotor
def test_cone_joint(pseudo_cone):
#Test Vertical
pseudo_cone.alpha = math.pi/2.
assert pytest.approx(pseudo_cone.joint.x) == 5
assert pytest.approx(pseudo_cone.joint.y) == 10
#Test Horizontal
pseudo_cone.alpha= 0
assert pseudo_cone.joint.x == 15
assert pseudo_cone.joint.y == 0
def test_cone_invert(pseudo_cone):
#Test 45
pseudo_cone.alpha = math.pi/4.
assert pseudo_cone.invert((13.07,9.07))[0] == pytest.approx(5,0.1)
assert pseudo_cone.invert((13.07,9.07))[1] == pytest.approx(10,0.1)
def test_angle_joint(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 10
assert pytest.approx(pseudo_angle.joint.z) == 0
#Test Horizontal
pseudo_angle.alpha = 0
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
#Test no-slide
pseudo_angle.slide = None
assert pytest.approx(pseudo_angle.joint.x) == 0
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
def test_angle_invert(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pseudo_angle.invert((6,12))[0] == pytest.approx(5,0.1)
assert pseudo_angle.invert((6,12))[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
assert pseudo_angle.invert((6,12)) == pytest.approx(10,0.1)
def test_position(pseudo_cone):
pseudo_cone.alpha= 0
assert pseudo_cone.position == (16, 2, 3)
pseudo_cone.alpha = math.pi/2.
assert pseudo_cone.position.x == pytest.approx(6,0.1)
assert pseudo_cone.position.y == 12
assert pseudo_cone.position.z == 3
def test_displacement(pseudo_angle):
assert pseudo_angle.displacement == (5,10)
pseudo_angle.slide = None
assert pseudo_angle.displacement == 10
def test_set_joint(pseudo_angle):
#Vertical
pseudo_angle.alpha = math.pi/2.
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement[0] == pytest.approx(5,0.1)
assert pseudo_angle.displacement[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement == pytest.approx(10,0.1)
def test_model(pseudo_angle, pseudo_cone):
model = AngledJoint.model(pseudo_angle)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test no slide
pseudo_angle.slide = None
model = AngledJoint.model(pseudo_angle)
assert model.slide == None
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test cone
model = ConeJoint.model(pseudo_cone)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_cone.displacement
def test_stop(pseudo_cone):
pseudo_cone.stop()
pseudo_cone.slide.stop_call.method.assert_called_with()
pseudo_cone.lift.stop_call.method.assert_called_with()
def test_cmp():
p1 = PseudoMotor(5)
p2 = PseudoMotor(10)
assert AngledJoint(p1,p2) == AngledJoint(p1, p2)
| 30.288889 | 71 | 0.682563 |
c4b707a809f0ff9f343cd22eb8b5bddf218a75e6 | 656 | py | Python | tests/test_utils.py | munirjojoverge/rl_AD_urban_baselines | c17cb97fb6a1edd3134b340194e82f4c3ca4f065 | [
"MIT"
] | 6 | 2019-02-05T08:17:29.000Z | 2022-03-22T12:47:53.000Z | tests/test_utils.py | munirjojoverge/rl_AD_urban_baselines | c17cb97fb6a1edd3134b340194e82f4c3ca4f065 | [
"MIT"
] | null | null | null | tests/test_utils.py | munirjojoverge/rl_AD_urban_baselines | c17cb97fb6a1edd3134b340194e82f4c3ca4f065 | [
"MIT"
] | 1 | 2019-09-08T14:15:56.000Z | 2019-09-08T14:15:56.000Z | import numpy as np
from urban_AD_env.utils import rotated_rectangles_intersect
| 50.461538 | 100 | 0.625 |
c4b756acdec987e0a8f79a384a24813dcb1b56b5 | 3,758 | py | Python | learning_journal/tests.py | hcodydibble/pyramid-learning-journal | eb7a59526885a420b6818fcb888497c21674cf76 | [
"MIT"
] | null | null | null | learning_journal/tests.py | hcodydibble/pyramid-learning-journal | eb7a59526885a420b6818fcb888497c21674cf76 | [
"MIT"
] | 3 | 2019-12-26T16:39:40.000Z | 2021-06-01T21:57:09.000Z | learning_journal/tests.py | hcodydibble/pyramid-learning-journal | eb7a59526885a420b6818fcb888497c21674cf76 | [
"MIT"
] | null | null | null | """Functions that test server functions."""
import pytest
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from datetime import datetime
from learning_journal.models import Entry
def test_list_view_returns_list_of_entries_in_dict(dummy_request):
"""Test for the list_view function."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert 'journals' in response
assert isinstance(response['journals'], list)
def test_adding_to_dummy_db_works(dummy_request):
"""Test that adding to dummy db works."""
assert len(dummy_request.dbsession.query(Entry).all()) == 0
test_entry = Entry(
title="Fake Title",
creation_date=datetime.now(),
body="The body lul"
)
dummy_request.dbsession.add(test_entry)
assert len(dummy_request.dbsession.query(Entry).all()) == 1
def test_list_view_returns_a_dict(dummy_request):
"""Function to test if list_view returns a dict."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert isinstance(response, dict)
def test_list_view_returns_proper_amount_of_content(dummy_request):
"""Home view response has content."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
query = dummy_request.dbsession.query(Entry).all()
assert len(response["journals"]) == len(query)
def test_about_view_returns_a_dict(dummy_request):
"""Test that about view returns dict."""
from learning_journal.views.default import about_view
response = about_view(dummy_request)
assert isinstance(response, dict)
def test_create_view_returns_a_dict(dummy_request):
"""Test that create view returns dict."""
from learning_journal.views.default import create_view
response = create_view(dummy_request)
assert isinstance(response, dict)
def test_detail_view_returns_post_detail(dummy_request):
"""Test that detail view returns post details."""
from learning_journal.views.default import detail_view
test_entry = Entry(
title="Fake Title",
creation_date=datetime.now(),
body="The body lul"
)
dummy_request.dbsession.add(test_entry)
dummy_request.matchdict['id'] = 1
response = detail_view(dummy_request)
assert response['post'].title == "Fake Title"
def test_create_view_get_empty_is_empty_dict(dummy_request):
"""Test that GET request on create view returns empty dict."""
from learning_journal.views.default import create_view
dummy_request.method = "GET"
response = create_view(dummy_request)
assert response == {}
def test_create_view_post_works(dummy_request):
"""Test that create view post creates new entry."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
test_post = {"title": "Test", "body": "This is a body."}
dummy_request.POST = test_post
response = create_view(dummy_request)
assert response.status_code == 302
def test_create_view_raises_bad_request(dummy_request):
"""Test that an incomplete post request returns HTTPBadRequest."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
test_post = {"title": "Test"}
dummy_request.POST = test_post
with pytest.raises(HTTPBadRequest):
create_view(dummy_request)
def test_new_entry_redirects_to_home_page(testapp, empty_db):
"""Test that after adding a new entry you get redirected to home page."""
test_entry = {
"title": "Fake Title",
"body": "The body lul"
}
response = testapp.post("/journal/new-entry", test_entry)
assert response.location == "http://localhost/"
| 35.121495 | 77 | 0.735498 |
c4b79caceb9a36abb81dbce0bc005d6ef54fc982 | 977 | py | Python | hvad/exceptions.py | Kunpors/dr.pors- | e1b9727c96add31af9c2a1a4b27a058b506748a6 | [
"BSD-3-Clause"
] | 1 | 2020-05-24T16:10:16.000Z | 2020-05-24T16:10:16.000Z | hvad/exceptions.py | Kunpors/dr.pors- | e1b9727c96add31af9c2a1a4b27a058b506748a6 | [
"BSD-3-Clause"
] | null | null | null | hvad/exceptions.py | Kunpors/dr.pors- | e1b9727c96add31af9c2a1a4b27a058b506748a6 | [
"BSD-3-Clause"
] | 3 | 2019-10-12T15:14:31.000Z | 2021-12-13T13:25:12.000Z | """ Hvad-specific exceptions
Part of hvad public API.
"""
__all__ = ('WrongManager', )
| 33.689655 | 78 | 0.633572 |
c4b8e8abe6cf564f4eafc19481868c1a741b23e4 | 4,858 | py | Python | examples/ERP/classify_P300_bi.py | gcattan/pyRiemann-qiskit | a53f2f891f4d8726b97ed8f0baaf89f86b5ea731 | [
"BSD-3-Clause"
] | 7 | 2022-01-10T19:19:59.000Z | 2022-02-21T20:13:24.000Z | examples/ERP/classify_P300_bi.py | gcattan/pyRiemann-qiskit | a53f2f891f4d8726b97ed8f0baaf89f86b5ea731 | [
"BSD-3-Clause"
] | 28 | 2021-09-27T11:53:29.000Z | 2022-03-29T08:39:55.000Z | examples/ERP/classify_P300_bi.py | gcattan/pyRiemann-qiskit | a53f2f891f4d8726b97ed8f0baaf89f86b5ea731 | [
"BSD-3-Clause"
] | 2 | 2021-09-25T17:04:23.000Z | 2022-02-07T16:34:20.000Z | """
====================================================================
Classification of P300 datasets from MOABB
====================================================================
It demonstrates the QuantumClassifierWithDefaultRiemannianPipeline(). This
pipeline uses Riemannian Geometry, Tangent Space and a quantum SVM
classifier. MOABB is used to access many EEG datasets and also for the
evaluation and comparison with other classifiers.
In QuantumClassifierWithDefaultRiemannianPipeline():
If parameter "shots" is None then a classical SVM is used similar to the one
in scikit learn.
If "shots" is not None and IBM Qunatum token is provided with "q_account_token"
then a real Quantum computer will be used.
You also need to adjust the "n_components" in the PCA procedure to the number
of qubits supported by the real quantum computer you are going to use.
A list of real quantum computers is available in your IBM quantum account.
"""
# Author: Anton Andreev
# Modified from plot_classify_EEG_tangentspace.py of pyRiemann
# License: BSD (3-clause)
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from sklearn.pipeline import make_pipeline
from matplotlib import pyplot as plt
import warnings
import seaborn as sns
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from moabb import set_log_level
from moabb.datasets import bi2012
from moabb.evaluations import WithinSessionEvaluation
from moabb.paradigms import P300
from pyriemann_qiskit.classification import \
QuantumClassifierWithDefaultRiemannianPipeline
from sklearn.decomposition import PCA
print(__doc__)
##############################################################################
# getting rid of the warnings about the future
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore")
set_log_level("info")
##############################################################################
# Create Pipelines
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
##############################################################################
# We have to do this because the classes are called 'Target' and 'NonTarget'
# but the evaluation function uses a LabelEncoder, transforming them
# to 0 and 1
labels_dict = {"Target": 1, "NonTarget": 0}
paradigm = P300(resample=128)
datasets = [bi2012()] # MOABB provides several other P300 datasets
# reduce the number of subjects, the Quantum pipeline takes a lot of time
# if executed on the entire dataset
n_subjects = 5
for dataset in datasets:
dataset.subject_list = dataset.subject_list[0:n_subjects]
overwrite = True # set to True if we want to overwrite cached results
pipelines = {}
# A Riemannian Quantum pipeline provided by pyRiemann-qiskit
# You can choose between classical SVM and Quantum SVM.
pipelines["RG+QuantumSVM"] = QuantumClassifierWithDefaultRiemannianPipeline(
shots=None, # 'None' forces classic SVM
nfilter=2, # default 2
# default n_components=10, a higher value renders better performance with
# the non-qunatum SVM version used in qiskit
# On a real Quantum computer (n_components = qubits)
dim_red=PCA(n_components=5),
# params={'q_account_token': '<IBM Quantum TOKEN>'}
)
# Here we provide a pipeline for comparison:
# This is a standard pipeline similar to
# QuantumClassifierWithDefaultRiemannianPipeline, but with LDA classifier
# instead.
pipelines["RG+LDA"] = make_pipeline(
# applies XDawn and calculates the covariance matrix, output it matrices
XdawnCovariances(
nfilter=2,
classes=[labels_dict["Target"]],
estimator="lwf",
xdawn_estimator="scm"
),
TangentSpace(),
PCA(n_components=10),
LDA(solver="lsqr", shrinkage="auto"), # you can use other classifiers
)
print("Total pipelines to evaluate: ", len(pipelines))
evaluation = WithinSessionEvaluation(
paradigm=paradigm,
datasets=datasets,
suffix="examples",
overwrite=overwrite
)
results = evaluation.process(pipelines)
print("Averaging the session performance:")
print(results.groupby('pipeline').mean('score')[['score', 'time']])
##############################################################################
# Plot Results
# ----------------
#
# Here we plot the results to compare the two pipelines
fig, ax = plt.subplots(facecolor="white", figsize=[8, 4])
sns.stripplot(
data=results,
y="score",
x="pipeline",
ax=ax,
jitter=True,
alpha=0.5,
zorder=1,
palette="Set1",
)
sns.pointplot(data=results,
y="score",
x="pipeline",
ax=ax, zorder=1,
palette="Set1")
ax.set_ylabel("ROC AUC")
ax.set_ylim(0.3, 1)
plt.show()
| 32.604027 | 79 | 0.675793 |
c4bbfc60d6e8dc47bfaa0bd4199a1ffa222feb43 | 281 | py | Python | copy-the-content-of-one-array-into-another-in-the-reverse-order.py | kRituraj/python-programming | 8671e976ac31f4dfa81960c5f601187ef60e23f5 | [
"MIT"
] | null | null | null | copy-the-content-of-one-array-into-another-in-the-reverse-order.py | kRituraj/python-programming | 8671e976ac31f4dfa81960c5f601187ef60e23f5 | [
"MIT"
] | null | null | null | copy-the-content-of-one-array-into-another-in-the-reverse-order.py | kRituraj/python-programming | 8671e976ac31f4dfa81960c5f601187ef60e23f5 | [
"MIT"
] | 1 | 2021-05-16T14:38:47.000Z | 2021-05-16T14:38:47.000Z | from array import *
MyArray = [None] * 20
MyArray1 = [None] * 20
i = 0
while(i < 20):
MyArray[i] = i + 1
i = i + 1
j = 0
i = 19
while(j < 20):
MyArray1[j] = MyArray[i]
j = j + 1
i = i - 1
i = 0
while(i < 20):
print(MyArray1[i],end = " ")
i = i + 1 | 12.217391 | 32 | 0.466192 |
c4bc61d832484a34ae928af094d25db9bd925929 | 6,262 | py | Python | Findclone/aiofindclone.py | vypivshiy/Findclone_api | 97ec5f33929b5cd3bdf670d829596749c3797dbc | [
"MIT"
] | 5 | 2020-11-16T11:41:05.000Z | 2021-09-09T22:54:37.000Z | Findclone/aiofindclone.py | vypivshiy/Findclone_api | 97ec5f33929b5cd3bdf670d829596749c3797dbc | [
"MIT"
] | 2 | 2021-05-02T21:34:47.000Z | 2021-08-21T23:30:44.000Z | Findclone/aiofindclone.py | vypivshiy/Findclone_api | 97ec5f33929b5cd3bdf670d829596749c3797dbc | [
"MIT"
] | 2 | 2021-04-27T01:14:15.000Z | 2021-09-30T06:44:40.000Z | from aiohttp import ClientSession, FormData
from Findclone import __version__
from .models import Account, Profiles, Histories, get_builder
from .utils import random_string, paint_boxes
from .exceptions import a_error_handler, FindcloneError
from io import BufferedReader, BytesIO
| 40.928105 | 115 | 0.569946 |
c4bdeaaafb20d5a42c4e3425ac5c7f0f3346483c | 3,764 | py | Python | src/program/consumers.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/program/consumers.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/program/consumers.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | from channels.generic.websocket import JsonWebsocketConsumer
from camps.models import Camp
from .models import (
Event,
EventInstance,
Favorite,
EventLocation,
EventType,
EventTrack,
Speaker
)
| 32.730435 | 76 | 0.4822 |
c4beaf1419ac6190fbeaa0573def91c010634c7c | 9,985 | py | Python | rrs/tools/rrs_maintainer_history.py | WindRiver-OpenSourceLabs/layerindex-web | a7820077b0a2c7ad36c934e7c1e11f19fb336081 | [
"MIT"
] | null | null | null | rrs/tools/rrs_maintainer_history.py | WindRiver-OpenSourceLabs/layerindex-web | a7820077b0a2c7ad36c934e7c1e11f19fb336081 | [
"MIT"
] | null | null | null | rrs/tools/rrs_maintainer_history.py | WindRiver-OpenSourceLabs/layerindex-web | a7820077b0a2c7ad36c934e7c1e11f19fb336081 | [
"MIT"
] | 1 | 2017-09-27T17:09:29.000Z | 2017-09-27T17:09:29.000Z | #!/usr/bin/env python3
# Standalone script which rebuilds the history of maintainership
#
# Copyright (C) 2015 Intel Corporation
# Author: Anibal Limon <anibal.limon@linux.intel.com>
#
# Licensed under the MIT license, see COPYING.MIT for details
import sys
import os.path
import optparse
import logging
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__))))
from common import common_setup, get_logger, DryRunRollbackException
common_setup()
from layerindex import utils, recipeparse
utils.setup_django()
from django.db import transaction
import settings
from layerindex.models import Recipe, LayerBranch, LayerItem
from rrs.models import MaintenancePlan, Maintainer, RecipeMaintainerHistory, RecipeMaintainer, RecipeMaintenanceLink
from django.core.exceptions import ObjectDoesNotExist
# FIXME we shouldn't be hardcoded to expect RECIPE_MAINTAINER to be set in this file,
# as it may be in the recipe in future
MAINTAINERS_INCLUDE_PATH = 'conf/distro/include/maintainers.inc'
"""
Try to get recipe maintainer from line, if not found return None
"""
"""
Get commit information from text.
Returns author_name, author_email, date and title.
"""
"""
Recreate Maintainership history from the beginning
"""
if __name__=="__main__":
parser = optparse.OptionParser(usage = """%prog [options]""")
parser.add_option("-p", "--plan",
help="Specify maintenance plan to operate on (default is all plans that have updates enabled)",
action="store", dest="plan", default=None)
parser.add_option("--fullreload",
help="Reload upgrade data from scratch",
action="store_true", dest="fullreload", default=False)
parser.add_option("-d", "--debug",
help = "Enable debug output",
action="store_const", const=logging.DEBUG, dest="loglevel",
default=logging.INFO)
parser.add_option("--dry-run",
help = "Do not write any data back to the database",
action="store_true", dest="dry_run", default=False)
logger = get_logger("MaintainerUpdate", settings)
options, args = parser.parse_args(sys.argv)
logger.setLevel(options.loglevel)
maintainer_history(options, logger)
| 42.489362 | 141 | 0.591688 |
c4bf8ee657abc953898db54f30fd5e418bfece30 | 16,297 | py | Python | snakemake/persistence.py | scholer/snakemake | 99de496322f4813fea590ee50607be8042f176d5 | [
"MIT"
] | null | null | null | snakemake/persistence.py | scholer/snakemake | 99de496322f4813fea590ee50607be8042f176d5 | [
"MIT"
] | null | null | null | snakemake/persistence.py | scholer/snakemake | 99de496322f4813fea590ee50607be8042f176d5 | [
"MIT"
] | null | null | null | __author__ = "Johannes Kster"
__copyright__ = "Copyright 2015-2019, Johannes Kster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import shutil
import signal
import marshal
import pickle
import json
import time
from base64 import urlsafe_b64encode, b64encode
from functools import lru_cache, partial
from itertools import filterfalse, count
from pathlib import Path
from snakemake.logging import logger
from snakemake.jobs import jobfiles
from snakemake.utils import listfiles
def version_changed(self, job, file=None):
"""Yields output files with changed versions of bool if file given."""
return _bool_or_gen(self._version_changed, job, file=file)
def code_changed(self, job, file=None):
"""Yields output files with changed code of bool if file given."""
return _bool_or_gen(self._code_changed, job, file=file)
def input_changed(self, job, file=None):
"""Yields output files with changed input of bool if file given."""
return _bool_or_gen(self._input_changed, job, file=file)
def params_changed(self, job, file=None):
"""Yields output files with changed params of bool if file given."""
return _bool_or_gen(self._params_changed, job, file=file)
def _record(self, subject, json_value, id):
recpath = self._record_path(subject, id)
os.makedirs(os.path.dirname(recpath), exist_ok=True)
with open(recpath, "w") as f:
json.dump(json_value, f)
def _delete_record(self, subject, id):
try:
recpath = self._record_path(subject, id)
os.remove(recpath)
recdirs = os.path.relpath(os.path.dirname(recpath), start=subject)
if recdirs != ".":
os.removedirs(recdirs)
except OSError as e:
if e.errno != 2: # not missing
raise e
| 34.165618 | 108 | 0.57753 |
c4bfcafbe1935fac111f0c9a05300ded2101080b | 1,418 | py | Python | Pasture_Growth_Modelling/initialisation_support/dryland_ibasal.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 914b6912c5f5b522107aa9406fb3d823e61c2ebe | [
"Apache-2.0"
] | null | null | null | Pasture_Growth_Modelling/initialisation_support/dryland_ibasal.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 914b6912c5f5b522107aa9406fb3d823e61c2ebe | [
"Apache-2.0"
] | null | null | null | Pasture_Growth_Modelling/initialisation_support/dryland_ibasal.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 914b6912c5f5b522107aa9406fb3d823e61c2ebe | [
"Apache-2.0"
] | null | null | null | """
Author: Matt Hanson
Created: 23/11/2020 11:06 AM
"""
import ksl_env
# add basgra nz functions
ksl_env.add_basgra_nz_path()
from supporting_functions.plotting import plot_multiple_results
from check_basgra_python.support_for_tests import establish_org_input, get_lincoln_broadfield, get_woodward_weather, _clean_harvest
from input_output_keys import matrix_weather_keys_pet
from basgra_python import run_basgra_nz
if __name__ == '__main__':
ibasals = [0,0.1,0.15,.2,0.3]
data = {
'IBASAL:{}'.format(e): run_nonirr_lincoln_low_basil(e) for e in ibasals
}
plot_multiple_results(data, out_vars=['BASAL', 'DM', 'YIELD','per_paw'])
| 33.761905 | 131 | 0.722849 |
c4c0c23efe0af691e686cdc320886e050cb8e361 | 636 | py | Python | 0x05/solve/ex1-0x05.py | tuannm-1876/sec-exercises | d8ea08bc02003af3722e0553060ed370ed395b33 | [
"MIT"
] | null | null | null | 0x05/solve/ex1-0x05.py | tuannm-1876/sec-exercises | d8ea08bc02003af3722e0553060ed370ed395b33 | [
"MIT"
] | null | null | null | 0x05/solve/ex1-0x05.py | tuannm-1876/sec-exercises | d8ea08bc02003af3722e0553060ed370ed395b33 | [
"MIT"
] | null | null | null | import urllib
import urllib2
url = "http://ctfq.sweetduet.info:10080/~q6/"
if __name__ == "__main__":
main() | 30.285714 | 115 | 0.550314 |
c4c2b89beca88decbb59d2f0d2ce782d1dc5da96 | 2,100 | py | Python | gen_methods.py | mweeden2/desert_game | aaf561d11687455a23982771315b15a9bf5b1a86 | [
"MIT"
] | null | null | null | gen_methods.py | mweeden2/desert_game | aaf561d11687455a23982771315b15a9bf5b1a86 | [
"MIT"
] | null | null | null | gen_methods.py | mweeden2/desert_game | aaf561d11687455a23982771315b15a9bf5b1a86 | [
"MIT"
] | null | null | null | # created by Matt Weeden
# 7/8/16
import classes as c
| 38.181818 | 90 | 0.438571 |
c4c33c445b5f51d288c06db4115d54abafa3991d | 352 | py | Python | entity_resolution/interfaces/IRecord.py | GeoJamesJones/ArcGIS-Senzing-Prototype | ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b | [
"MIT"
] | null | null | null | entity_resolution/interfaces/IRecord.py | GeoJamesJones/ArcGIS-Senzing-Prototype | ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b | [
"MIT"
] | null | null | null | entity_resolution/interfaces/IRecord.py | GeoJamesJones/ArcGIS-Senzing-Prototype | ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import Dict, Any, List
| 18.526316 | 40 | 0.636364 |
c4c366fd382e30c8721cf1d840c44a8f46e40d4d | 822 | py | Python | mkt/stats/helpers.py | Joergen/zamboni | 20a0e22b75cf986aceeb8c4d8c25abb948d97096 | [
"BSD-3-Clause"
] | null | null | null | mkt/stats/helpers.py | Joergen/zamboni | 20a0e22b75cf986aceeb8c4d8c25abb948d97096 | [
"BSD-3-Clause"
] | null | null | null | mkt/stats/helpers.py | Joergen/zamboni | 20a0e22b75cf986aceeb8c4d8c25abb948d97096 | [
"BSD-3-Clause"
] | 1 | 2021-03-13T00:33:12.000Z | 2021-03-13T00:33:12.000Z | from django.utils.http import urlquote
from jingo import register
import jinja2
from access import acl
| 23.485714 | 75 | 0.717762 |
c4c4d67f988add89a513610e9e3367a81daf5283 | 593 | py | Python | code_examples/package_example/my_scripts/network/connect_telnet.py | natenka/natenka.github.io | 74c56be74f2c9b15a4c9b523a1622453ae2064af | [
"MIT"
] | 18 | 2017-02-19T15:58:54.000Z | 2022-02-13T22:15:19.000Z | code_examples/package_example/my_scripts/network/connect_telnet.py | natenka/natenka.github.io | 74c56be74f2c9b15a4c9b523a1622453ae2064af | [
"MIT"
] | 1 | 2020-02-24T23:14:15.000Z | 2020-02-24T23:14:15.000Z | code_examples/package_example/my_scripts/network/connect_telnet.py | natenka/natenka.github.io | 74c56be74f2c9b15a4c9b523a1622453ae2064af | [
"MIT"
] | 27 | 2017-05-03T15:38:41.000Z | 2022-02-08T02:53:38.000Z | import telnetlib
import time
| 21.178571 | 80 | 0.63575 |
c4c5720115308ec3559711c2319791d1086d71cd | 837 | py | Python | fullstack/migrations/0004_officeholder.py | TylerFisher/full-stack-react | dae5df2b85944e66a9ad0c64cad3e83b7cb1e173 | [
"MIT"
] | 9 | 2019-01-26T20:09:24.000Z | 2021-02-28T12:09:17.000Z | fullstack/migrations/0004_officeholder.py | dariyamizzou/full-stack-react | dae5df2b85944e66a9ad0c64cad3e83b7cb1e173 | [
"MIT"
] | 3 | 2020-02-11T23:49:18.000Z | 2021-06-10T21:13:36.000Z | fullstack/migrations/0004_officeholder.py | dariyamizzou/full-stack-react | dae5df2b85944e66a9ad0c64cad3e83b7cb1e173 | [
"MIT"
] | 1 | 2019-03-09T18:33:12.000Z | 2019-03-09T18:33:12.000Z | # Generated by Django 2.1.5 on 2019-01-27 22:45
from django.db import migrations, models
import django.db.models.deletion
import uuid
| 32.192308 | 114 | 0.612903 |
c4c60b936c323211b4cb0582eec9c25e08a076db | 8,765 | py | Python | tools/evaluate_2D.py | ZJULiHongxin/two-hand-pose-est | e531faacd9cdddcb716b614b832038d079b9663f | [
"MIT"
] | null | null | null | tools/evaluate_2D.py | ZJULiHongxin/two-hand-pose-est | e531faacd9cdddcb716b614b832038d079b9663f | [
"MIT"
] | null | null | null | tools/evaluate_2D.py | ZJULiHongxin/two-hand-pose-est | e531faacd9cdddcb716b614b832038d079b9663f | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import platform
import numpy as np
import time
import os
import torch
import torch.backends.cudnn as cudnn
import _init_paths
from config import cfg
from config import update_config
from utils.utils import get_model_summary
from ptflops import get_model_complexity_info
from fp16_utils.fp16util import network_to_half
from core.loss import BoneLengthLoss, JointAngleLoss, JointsMSELoss
import dataset
from dataset.build import trans
from models import A2JPoseNet
from utils.misc import plot_performance
import matplotlib
if platform.system() == 'Linux':
matplotlib.use('Agg')
else:
matplotlib.use('Tkagg')
# python evaluate_2D.py --cfg ../experiments/InterHand/exp_test.yaml --model_path ../output/InterHand/exp_test/model_best.pth.tar --gpu 3 --batch_size 32
main() | 35.48583 | 153 | 0.595893 |
c4c6cbab6dda38ae0a7e7c3ca88f87ebfef34635 | 4,323 | py | Python | skyportal/model_util.py | jadalilleboe/skyportal | 501b7a09e239f03213499df297620c6a8a214cd9 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T10:32:04.000Z | 2022-01-14T10:32:04.000Z | skyportal/model_util.py | jadalilleboe/skyportal | 501b7a09e239f03213499df297620c6a8a214cd9 | [
"BSD-3-Clause"
] | null | null | null | skyportal/model_util.py | jadalilleboe/skyportal | 501b7a09e239f03213499df297620c6a8a214cd9 | [
"BSD-3-Clause"
] | 1 | 2021-11-28T21:00:02.000Z | 2021-11-28T21:00:02.000Z | from social_tornado.models import TornadoStorage
from skyportal.models import DBSession, ACL, Role, User, Token, Group
from skyportal.enum_types import LISTENER_CLASSES, sqla_enum_types
from baselayer.app.env import load_env
all_acl_ids = [
'Become user',
'Comment',
'Annotate',
'Manage users',
'Manage sources',
'Manage groups',
'Manage shifts',
'Manage allocations',
'Manage observing runs',
'Upload data',
'System admin',
'Post taxonomy',
'Delete taxonomy',
'Classify',
] + [c.get_acl_id() for c in LISTENER_CLASSES]
role_acls = {
'Super admin': all_acl_ids,
'Group admin': [
'Annotate',
'Comment',
'Manage shifts',
'Manage sources',
'Upload data',
'Post taxonomy',
'Manage users',
'Classify',
'Manage observing runs',
],
'Full user': [
'Annotate',
'Comment',
'Upload data',
'Classify',
'Manage observing runs',
],
'View only': [],
}
env, cfg = load_env()
def make_super_user(username):
"""Initializes a super user with full permissions."""
setup_permissions() # make sure permissions already exist
add_user(username, roles=['Super admin'], auth=True)
def provision_token():
"""Provision an initial administrative token."""
admin = add_user(
'provisioned_admin',
roles=['Super admin'],
first_name="provisioned",
last_name="admin",
)
token_name = 'Initial admin token'
token = (
Token.query.filter(Token.created_by == admin).filter(Token.name == token_name)
).first()
if token is None:
token_id = create_token(all_acl_ids, user_id=admin.id, name=token_name)
token = Token.query.get(token_id)
return token
def provision_public_group():
"""If public group name is set in the config file, create it."""
env, cfg = load_env()
public_group_name = cfg['misc.public_group_name']
if public_group_name:
pg = Group.query.filter(Group.name == public_group_name).first()
if pg is None:
DBSession().add(Group(name=public_group_name))
DBSession().commit()
def setup_permissions():
"""Create default ACLs/Roles needed by application.
If a given ACL or Role already exists, it will be skipped."""
all_acls = [ACL.create_or_get(a) for a in all_acl_ids]
DBSession().add_all(all_acls)
DBSession().commit()
for r, acl_ids in role_acls.items():
role = Role.create_or_get(r)
role.acls = [ACL.query.get(a) for a in acl_ids]
DBSession().add(role)
DBSession().commit()
| 27.188679 | 88 | 0.630118 |
c4c7aa58a5a7074c7dc6f26bfd9244c8183f237e | 7,909 | py | Python | pages/views.py | SmartDataWithR/CovidHelper | 21f8c3f3d81da0b5ec32b228c711e96f9d5c168e | [
"MIT"
] | null | null | null | pages/views.py | SmartDataWithR/CovidHelper | 21f8c3f3d81da0b5ec32b228c711e96f9d5c168e | [
"MIT"
] | 9 | 2020-03-27T10:33:35.000Z | 2022-03-12T00:20:47.000Z | pages/views.py | SmartDataWithR/CovidHelper | 21f8c3f3d81da0b5ec32b228c711e96f9d5c168e | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
from ipware import get_client_ip
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.conf import settings
from .forms import SearchForm
from users.models import CustomUser
import geopy
from geopy.distance import geodesic
import pandas as pd
import json
from django.utils.translation import gettext as _, activate
# required for IP to numeric
import socket
import struct
# import file for ip's to language mapping
df_ip_lang = pd.read_csv('pages/lng_map.csv', names=['ip_from', 'ip_to', 'country_code', 'country_name', 'lang_code'] )
def searchLocation(request):
form = SearchForm(request)
print(form)
if request.method=='POST':
form = SearchForm(request.POST)
return render(request, 'pages/home.html', {'form': form})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'account/password_set.html', {
'form': form
})
def privacy(request):
return render(request, 'pages/privacy.html')
def imprint(request):
return render(request, 'pages/imprint.html')
def terms(request):
return render(request, 'pages/terms_conditions.html')
def cookie_policy(request):
return render(request, 'pages/cookie_policy.html')
| 48.820988 | 449 | 0.677709 |
c4c7ad4c67500251c669c2383f921ba86cff8d33 | 1,568 | py | Python | image-classification/evaluate_classification.py | rush2406/vipriors-challenges-toolkit | ff2d6b944ff4aebb0d3ec9bb9fb8d8459850ccb6 | [
"BSD-3-Clause"
] | 56 | 2020-03-12T19:33:56.000Z | 2022-03-10T14:44:43.000Z | image-classification/evaluate_classification.py | rush2406/vipriors-challenges-toolkit | ff2d6b944ff4aebb0d3ec9bb9fb8d8459850ccb6 | [
"BSD-3-Clause"
] | 42 | 2020-04-12T10:13:24.000Z | 2021-10-11T11:27:24.000Z | image-classification/evaluate_classification.py | rush2406/vipriors-challenges-toolkit | ff2d6b944ff4aebb0d3ec9bb9fb8d8459850ccb6 | [
"BSD-3-Clause"
] | 20 | 2020-04-01T11:00:37.000Z | 2022-03-04T00:25:42.000Z | """
Use this script to evaluate your model. It stores metrics in the file
`scores.txt`.
Input:
predictions (str): filepath. Should be a file that matches the submission
format;
groundtruths (str): filepath. Should be an annotation file.
Usage:
evaluate_classification.py <groundtruths> <predictions> <output_dir>
"""
import numpy as np
import pandas as pd
import os
import sys
OUTPUT_FILE = 'scores.txt'
if __name__ == '__main__':
args = sys.argv[1:]
evaluate_from_files(args[0], args[1], args[2]) | 27.034483 | 81 | 0.64477 |
c4cf02c8517026bb6fefdd3d522f1e274f5ebb1b | 3,777 | py | Python | __init__.py | j0rd1smit/obsidian-albert-plugin | 9f17949bba638bd10e534fc50e89ca0bb63e0bb0 | [
"MIT"
] | 1 | 2021-10-29T20:05:23.000Z | 2021-10-29T20:05:23.000Z | __init__.py | j0rd1smit/obsidian-albert-plugin | 9f17949bba638bd10e534fc50e89ca0bb63e0bb0 | [
"MIT"
] | 2 | 2022-02-11T22:06:06.000Z | 2022-02-12T09:14:43.000Z | __init__.py | j0rd1smit/obsidian-albert-plugin | 9f17949bba638bd10e534fc50e89ca0bb63e0bb0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""A simple plugin that makes it possible to search your Obsidian vault.
This extension makes it possible to search your Obsidian vault. For more information please visit https://github.com/j0rd1smit/obsidian-albert-plugin.
Synopsis: ob <query>"""
from albert import *
import os
from time import sleep
from pathlib import Path
import sys
import json
__title__ = "Obsidian"
__version__ = "0.0.1"
__triggers__ = "ob "
__authors__ = "J0rd1smit"
__exec_deps__ = []
__py_deps__ = []
PATH_TO_CONFIG_FOLDER = Path.home() / ".config/albert/obsidian-plugin"
PATH_TO_CONFIG_DATA = PATH_TO_CONFIG_FOLDER / "config.json"
PATH_TO_ICON = os.path.dirname(__file__) + "/plugin.png"
iconPath = iconLookup("albert")
| 25.18 | 150 | 0.62457 |
c4d099c36b19642df7fc49addbd229bce868b35a | 5,433 | py | Python | day16.py | luciansmith/adventOfCode | f46ee22aa29b7c0bc1890717463f49664abe3399 | [
"MIT"
] | null | null | null | day16.py | luciansmith/adventOfCode | f46ee22aa29b7c0bc1890717463f49664abe3399 | [
"MIT"
] | null | null | null | day16.py | luciansmith/adventOfCode | f46ee22aa29b7c0bc1890717463f49664abe3399 | [
"MIT"
] | null | null | null | import numpy as np
import binascii
example = False
hexval = "020D74FCE27E600A78020200DC298F1070401C8EF1F21A4D6394F9F48F4C1C00E3003500C74602F0080B1720298C400B7002540095003DC00F601B98806351003D004F66011148039450025C00B2007024717AFB5FBC11A7E73AF60F660094E5793A4E811C0123CECED79104ECED791380069D2522B96A53A81286B18263F75A300526246F60094A6651429ADB3B0068937BCF31A009ADB4C289C9C66526014CB33CB81CB3649B849911803B2EB1327F3CFC60094B01CBB4B80351E66E26B2DD0530070401C82D182080803D1C627C330004320C43789C40192D002F93566A9AFE5967372B378001F525DDDCF0C010A00D440010E84D10A2D0803D1761045C9EA9D9802FE00ACF1448844E9C30078723101912594FEE9C9A548D57A5B8B04012F6002092845284D3301A8951C8C008973D30046136001B705A79BD400B9ECCFD30E3004E62BD56B004E465D911C8CBB2258B06009D802C00087C628C71C4001088C113E27C6B10064C01E86F042181002131EE26C5D20043E34C798246009E80293F9E530052A4910A7E87240195CC7C6340129A967EF9352CFDF0802059210972C977094281007664E206CD57292201349AA4943554D91C9CCBADB80232C6927DE5E92D7A10463005A4657D4597002BC9AF51A24A54B7B33A73E2CE005CBFB3B4A30052801F69DB4B08F3B6961024AD4B43E6B319AA020020F15E4B46E40282CCDBF8CA56802600084C788CB088401A8911C20ECC436C2401CED0048325CC7A7F8CAA912AC72B7024007F24B1F789C0F9EC8810090D801AB8803D11E34C3B00043E27C6989B2C52A01348E24B53531291C4FF4884C9C2C10401B8C9D2D875A0072E6FB75E92AC205CA0154CE7398FB0053DAC3F43295519C9AE080250E657410600BC9EAD9CA56001BF3CEF07A5194C013E00542462332DA4295680"
if (example):
# hexval = "D2FE28"
hexval = "38006F45291200"
hexval = "9C0141080250320F1802104A08"
#stole this bit from https://www.geeksforgeeks.org/python-ways-to-convert-hex-into-binary/ method 2.
n = int(hexval, 16)
binval = ''
while n > 0:
binval = str(n % 2) + binval
n = n >> 1
leading_zeros = '0' * (len(hexval)*4 - len(binval))
binval = leading_zeros + binval
print(binval)
packet = readOneOperation(binval, 0)
print(packet)
print(countVersions(packet))
print(calculateValue(packet))
| 37.468966 | 1,347 | 0.651574 |
c4d1976decbf19efd19f6e692e1b54b98d652ff7 | 2,422 | py | Python | List_5/Task_1/instructions.py | Szpila123/Advanced_python_course | cd45506dec621768d59b2245aa0ae30b758160cc | [
"MIT"
] | null | null | null | List_5/Task_1/instructions.py | Szpila123/Advanced_python_course | cd45506dec621768d59b2245aa0ae30b758160cc | [
"MIT"
] | null | null | null | List_5/Task_1/instructions.py | Szpila123/Advanced_python_course | cd45506dec621768d59b2245aa0ae30b758160cc | [
"MIT"
] | null | null | null | import expressions
import abc
import copy
| 28.833333 | 153 | 0.623865 |
c4d236eae71088db952059a4c21b0e805b6bad1c | 2,228 | py | Python | components/icdc-sheepdog/sheepdog/utils/parse.py | CBIIT/icdc-docker | 5dc78b96a8d885b3fa427c55b9cc19f4771910fa | [
"Apache-2.0"
] | 2 | 2019-06-10T15:30:51.000Z | 2020-01-18T23:24:13.000Z | components/icdc-sheepdog/sheepdog/utils/parse.py | CBIIT/icdc-docker | 5dc78b96a8d885b3fa427c55b9cc19f4771910fa | [
"Apache-2.0"
] | null | null | null | components/icdc-sheepdog/sheepdog/utils/parse.py | CBIIT/icdc-docker | 5dc78b96a8d885b3fa427c55b9cc19f4771910fa | [
"Apache-2.0"
] | 1 | 2022-03-31T09:52:46.000Z | 2022-03-31T09:52:46.000Z | """
TODO
"""
from collections import Counter
import simplejson
import yaml
import flask
from sheepdog.errors import (
UserError,
)
def oph_raise_for_duplicates(object_pairs):
"""
Given an list of ordered pairs, contstruct a dict as with the normal JSON
``object_pairs_hook``, but raise an exception if there are duplicate keys
with a message describing all violations.
"""
counter = Counter(p[0] for p in object_pairs)
duplicates = [p for p in counter.iteritems() if p[1] > 1]
if duplicates:
raise ValueError(
'The document contains duplicate keys: {}'
.format(','.join(d[0] for d in duplicates))
)
return {pair[0]: pair[1] for pair in object_pairs}
def parse_json(raw):
"""
Return a python representation of a JSON document.
Args:
raw (str): string of raw JSON content
Raises:
UserError: if any exception is raised parsing the JSON body
.. note:: Uses :func:`oph_raise_for_duplicates` in parser.
"""
try:
return simplejson.loads(
raw, object_pairs_hook=oph_raise_for_duplicates
)
except Exception as e:
raise UserError('Unable to parse json: {}'.format(e))
def parse_request_json(expected_types=(dict, list)):
"""
Return a python representation of a JSON POST body.
Args:
raw (str): string of raw JSON content
Return:
TODO
Raises:
UserError: if any exception is raised parsing the JSON body
UserError: if the result is not of the expected type
If raw is not provided, pull the body from global request object.
"""
parsed = parse_json(flask.request.get_data())
if not isinstance(parsed, expected_types):
raise UserError('JSON parsed from request is an invalid type: {}'
.format(parsed.__class__.__name__))
return parsed
def parse_request_yaml():
"""
Return a python representation of a YAML POST body. Raise UserError if any
exception is raised parsing the YAML body.
"""
try:
return yaml.safe_load(flask.request.get_data())
except Exception as e:
raise UserError('Unable to parse yaml: {}'.format(e))
| 26.52381 | 78 | 0.653501 |
c4d2c7e50ad2259a426f3b12ba7e12df56ef68ae | 298 | py | Python | frames/rocket/rocket_frames.py | rkinwork/dvmn_async-console-game | c8072a47a9cac1772c099436c91649ebf544dc70 | [
"Unlicense"
] | null | null | null | frames/rocket/rocket_frames.py | rkinwork/dvmn_async-console-game | c8072a47a9cac1772c099436c91649ebf544dc70 | [
"Unlicense"
] | null | null | null | frames/rocket/rocket_frames.py | rkinwork/dvmn_async-console-game | c8072a47a9cac1772c099436c91649ebf544dc70 | [
"Unlicense"
] | null | null | null | from pathlib import Path
def get_rockets_frames():
"""Init rocket animation frames."""
frames_files = ['rocket_frame_1.txt', 'rocket_frame_2.txt']
frames = [(Path(__file__).resolve().parent / frame_file_name).read_text() for frame_file_name in frames_files]
return tuple(frames)
| 29.8 | 114 | 0.728188 |
c4d337e94d586f1aff49cce9e64e842f4e6c17d0 | 1,128 | py | Python | JuHPLC/views.py | FZJ-INM5/JuHPLC | efaf9b8f5d7f0c9a8ad687d0f143e161f523db7c | [
"Unlicense"
] | 1 | 2018-09-24T06:59:11.000Z | 2018-09-24T06:59:11.000Z | JuHPLC/views.py | FZJ-INM5/JuHPLC | efaf9b8f5d7f0c9a8ad687d0f143e161f523db7c | [
"Unlicense"
] | 1 | 2021-02-05T22:23:57.000Z | 2021-02-05T22:23:57.000Z | JuHPLC/views.py | FZJ-INM5/JuHPLC | efaf9b8f5d7f0c9a8ad687d0f143e161f523db7c | [
"Unlicense"
] | null | null | null | from JuHPLC.Views.NewChromatogram import *
from JuHPLC.SerialCommunication.MicroControllerManager import MicroControllerManager
# Create your views here.
| 37.6 | 117 | 0.58422 |
c4d49dd9a5d7b29748f177746f4735919de54df6 | 3,829 | py | Python | sequana/datatools.py | vladsaveliev/sequana | f6ee7fa7fb47ec179ceedf24684ba861a244656d | [
"BSD-3-Clause"
] | null | null | null | sequana/datatools.py | vladsaveliev/sequana | f6ee7fa7fb47ec179ceedf24684ba861a244656d | [
"BSD-3-Clause"
] | null | null | null | sequana/datatools.py | vladsaveliev/sequana | f6ee7fa7fb47ec179ceedf24684ba861a244656d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Retrieve data from sequana library"""
import os
import easydev
import glob
import collections
def sequana_data(filename=None, where=None):
"""Return full path of a sequana resource data file.
:param str filename: a valid filename to be found
:param str where: one of the registered data directory (see below)
:return: the path of file. See also here below in the case where
filename is set to "*".
.. code-block:: python
from sequana import sequana_data
filename = sequana_data("test.bam")
Type the function name with "*" parameter to get a list of
available files. Withe where argument set, the function returns a
list of files. Without the where argument, a dictionary is returned where
keys correspond to the registered directories::
filenames = sequana_data("*", where="images")
Registered directories are:
- data
- testing
- data/adapters
- images
.. note:: this does not handle wildcards. The * means retrieve all files.
"""
sequana_path = easydev.get_package_location('sequana')
sharedir = os.sep.join([sequana_path , "sequana", 'resources'])
directories = ['data', 'testing', 'data/adapters', 'images', 'scripts']
if filename == "*":
found = collections.defaultdict(list)
if where is not None:
directories = [where]
for thisdir in directories:
for filename in glob.glob(sharedir + "/%s/*" % thisdir):
filename = os.path.split(filename)[1]
to_ignore = ["__init__.py", "__pycache__"]
if filename.endswith('.pyc') or filename in to_ignore:
pass
else:
found[thisdir].append(os.path.split(filename)[1])
if where is not None:
return found[where]
return found
if filename is None:
for thisdir in directories:
print('From %s directory:' % thisdir)
for filename in glob.glob(sharedir + "/%s/*" % thisdir):
filename = os.path.split(filename)[1]
to_ignore = ["__init__.py", "__pycache__"]
if filename.endswith('.pyc') or filename in to_ignore:
pass
else:
print(' - sequana("%s", "%s")' % (os.path.split(filename)[1], thisdir))
raise ValueError("Choose a valid file from the list above")
# in the code one may use / or \
if where:
filename = os.sep.join([sharedir, where, filename])
else:
# try to introspect the different directories
# return filename if found otherwise raise error
for thisdir in directories:
if _get_valid_file(filename, thisdir):
return _get_valid_file(filename, thisdir)
raise Exception("unknown file %s. Type sequana_data() to get a list of valid names" % filename)
return filename
| 35.453704 | 103 | 0.604074 |
c4d694c5a7929d60a82f4266c36fe92a6487d6e2 | 910 | py | Python | example/models.py | nim65s/django-jugemaj | 771fb5ddb7ceaa3f6b8aa4178c95ab249a8ed406 | [
"BSD-2-Clause"
] | null | null | null | example/models.py | nim65s/django-jugemaj | 771fb5ddb7ceaa3f6b8aa4178c95ab249a8ed406 | [
"BSD-2-Clause"
] | 15 | 2017-06-08T08:12:36.000Z | 2022-03-21T20:03:02.000Z | example/models.py | nim65s/django-jugemaj | 771fb5ddb7ceaa3f6b8aa4178c95ab249a8ed406 | [
"BSD-2-Clause"
] | null | null | null | """Django models for the example app."""
from django.db import models
from wikidata.client import Client # type: ignore
LANGS = ["fr", "en"] # ordered list of langages to check on wikidata
| 30.333333 | 83 | 0.657143 |
c4d859124d413014c3e68fa4b2d4e0363025ecc6 | 79 | py | Python | build_tests/python_opencv.py | AustinSchuh/971-Robot-Code | 99abc66fd2d899c0bdab338dc6f57dc5def9be8d | [
"Apache-2.0"
] | 39 | 2021-06-18T03:22:30.000Z | 2022-03-21T15:23:43.000Z | build_tests/python_opencv.py | AustinSchuh/971-Robot-Code | 99abc66fd2d899c0bdab338dc6f57dc5def9be8d | [
"Apache-2.0"
] | 10 | 2021-06-18T03:22:19.000Z | 2022-03-18T22:14:15.000Z | build_tests/python_opencv.py | AustinSchuh/971-Robot-Code | 99abc66fd2d899c0bdab338dc6f57dc5def9be8d | [
"Apache-2.0"
] | 4 | 2021-08-19T19:20:04.000Z | 2022-03-08T07:33:18.000Z | #!/usr/bin/python3
import cv2
if __name__ == '__main__':
cv2.SIFT_create()
| 11.285714 | 26 | 0.683544 |
c4d91f431add3c50639d5dcc111d7ed136e9e222 | 1,096 | py | Python | tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_print_report_notebook.py | yasuyuky/pytorch-pfn-extras | febea6ded644d3b7a099ac557f06567a04b3b838 | [
"MIT"
] | 243 | 2020-05-12T01:15:46.000Z | 2022-03-21T22:07:57.000Z | tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_print_report_notebook.py | yasuyuky/pytorch-pfn-extras | febea6ded644d3b7a099ac557f06567a04b3b838 | [
"MIT"
] | 495 | 2020-05-12T06:45:12.000Z | 2022-03-31T07:14:02.000Z | tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_print_report_notebook.py | yasuyuky/pytorch-pfn-extras | febea6ded644d3b7a099ac557f06567a04b3b838 | [
"MIT"
] | 37 | 2020-05-12T02:16:07.000Z | 2021-08-11T06:00:16.000Z | import io
import pytest
import pytorch_pfn_extras as ppe
from pytorch_pfn_extras.training.extensions import _ipython_module_available
from pytorch_pfn_extras.training.extensions.log_report import _pandas_available
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| 29.621622 | 79 | 0.702555 |
c4d94b82e516bfce1860e1958db385ce1c937172 | 2,012 | py | Python | examples/python/00-list-devices.py | vishal-prgmr/tiscamera | 0960b3d0fc87e784e63d12aab6badccde566813d | [
"Apache-2.0"
] | null | null | null | examples/python/00-list-devices.py | vishal-prgmr/tiscamera | 0960b3d0fc87e784e63d12aab6badccde566813d | [
"Apache-2.0"
] | null | null | null | examples/python/00-list-devices.py | vishal-prgmr/tiscamera | 0960b3d0fc87e784e63d12aab6badccde566813d | [
"Apache-2.0"
] | 1 | 2019-07-15T07:51:04.000Z | 2019-07-15T07:51:04.000Z | #!/usr/bin/env python3
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example will show you how to list information about the available devices
#
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_devices():
"""
Print information about all available devices
"""
source = Gst.ElementFactory.make("tcambin")
serials = source.get_device_serials()
for single_serial in serials:
# This returns someting like:
# (True,
# name='DFK Z12GP031',
# identifier='The Imaging Source Europe GmbH-11410533',
# connection_type='aravis')
# The identifier is the name given by the backend
# The connection_type identifies the backend that is used.
# Currently 'aravis', 'v4l2', 'libusb' and 'unknown' exist
(return_value, model,
identifier, connection_type) = source.get_device_info(single_serial)
# return value would be False when a non-existant serial is used
# since we are iterating get_device_serials this should not happen
if return_value:
print("Model: {} Serial: {} Type: {}".format(model,
single_serial,
connection_type))
if __name__ == "__main__":
Gst.init(sys.argv) # init gstreamer
list_devices()
| 31.4375 | 80 | 0.656064 |
c4d963d3c3b56154e5edbed53e3b10e518fd66e7 | 2,260 | py | Python | regression/sgd.py | sahitpj/MachineLearning | 2ce5a337ec432daff64a216df6847ef834bcb8d7 | [
"MIT"
] | 2 | 2019-01-23T15:51:29.000Z | 2019-02-01T16:50:33.000Z | regression/sgd.py | sahitpj/MachineLearning | 2ce5a337ec432daff64a216df6847ef834bcb8d7 | [
"MIT"
] | null | null | null | regression/sgd.py | sahitpj/MachineLearning | 2ce5a337ec432daff64a216df6847ef834bcb8d7 | [
"MIT"
] | null | null | null | from .linear_torch import TorchGradientDescentAutogradRegression
import torch, math, random | 33.235294 | 96 | 0.581858 |
c4daf560e5af757855362756ff7ad96e183b2138 | 11,798 | py | Python | pystratis/api/node/tests/test_node.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 8 | 2021-06-30T20:44:22.000Z | 2021-12-07T14:42:22.000Z | pystratis/api/node/tests/test_node.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 2 | 2021-07-01T11:50:18.000Z | 2022-01-25T18:39:49.000Z | pystratis/api/node/tests/test_node.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 4 | 2021-07-01T04:36:42.000Z | 2021-09-17T10:54:19.000Z | import pytest
import ast
from pytest_mock import MockerFixture
from pystratis.api.node import Node
from pystratis.api.node.responsemodels import *
from pystratis.api import FullNodeState, FeatureInitializationState, LogRule
from pystratis.core.networks import StraxMain, CirrusMain
| 34.296512 | 121 | 0.653416 |
c4dc19d5ba016b2467c209f8659fe6f4ccf38d21 | 1,719 | py | Python | imapper/pose/confidence.py | amonszpart/iMapper | f8a1f7972548fc3759fdb67bcd186aaf7ac3ce49 | [
"Apache-2.0"
] | 18 | 2019-11-29T14:51:30.000Z | 2022-03-26T17:04:04.000Z | imapper/pose/confidence.py | amonszpart/iMapper | f8a1f7972548fc3759fdb67bcd186aaf7ac3ce49 | [
"Apache-2.0"
] | null | null | null | imapper/pose/confidence.py | amonszpart/iMapper | f8a1f7972548fc3759fdb67bcd186aaf7ac3ce49 | [
"Apache-2.0"
] | 2 | 2019-08-02T14:59:23.000Z | 2019-10-27T14:23:27.000Z | import numpy as np
def get_conf_thresholded(conf, thresh_log_conf, dtype_np):
"""Normalizes a confidence score to (0..1).
Args:
conf (float):
Unnormalized confidence.
dtype_np (type):
Desired return type.
Returns:
confidence (np.float32):
Normalized joint confidence.
"""
# 1. / (1. + np.exp(-5000. * conf + 5))
# https://www.desmos.com/calculator/olqbvoffua
# + 9.5: 0.0019 => 0.5
# + 5 : 0.0010 => 0.5
# + 6.5: 0.0013 => 0.5
return np.where(
conf < dtype_np(0.),
dtype_np(0.),
dtype_np(1.) /
(dtype_np(1.) + np.exp(dtype_np(-5000.) * conf + dtype_np(9.5)))
).astype(dtype_np)
def get_confs(query_2d_full, frame_id, thresh_log_conf, mx_conf, dtype_np):
"""
Args:
query_2d_full (stealth.logic.skeleton.Skeleton):
Skeleton with confidences.
frame_id (int):
Frame id.
Returns:
confs (List[float]):
Confidences at frame_id.
"""
confs = np.zeros(query_2d_full.poses.shape[-1],
dtype=dtype_np)
is_normalized = query_2d_full.is_confidence_normalized()
if query_2d_full.has_confidence(frame_id):
for joint, conf in query_2d_full.confidence[frame_id].items():
cnf = dtype_np(conf) \
if is_normalized \
else get_conf_thresholded(conf, thresh_log_conf, dtype_np)
if mx_conf is not None and mx_conf < cnf:
mx_conf = dtype_np(cnf)
confs[joint] = dtype_np(cnf)
if mx_conf is None:
return confs
else:
assert isinstance(mx_conf, dtype_np)
return confs, mx_conf
| 27.725806 | 75 | 0.578243 |
c4dc5f0661f1b298f41b5c9d7c600c79ade6c896 | 3,028 | py | Python | notes/lessons/lesson_1/dog_breed.py | jpenna/course-v3 | 492823b8a5981306bb2f78951b24786ea8d70b83 | [
"Apache-2.0"
] | null | null | null | notes/lessons/lesson_1/dog_breed.py | jpenna/course-v3 | 492823b8a5981306bb2f78951b24786ea8d70b83 | [
"Apache-2.0"
] | null | null | null | notes/lessons/lesson_1/dog_breed.py | jpenna/course-v3 | 492823b8a5981306bb2f78951b24786ea8d70b83 | [
"Apache-2.0"
] | null | null | null | from fastai.vision import *
from fastai.metrics import error_rate
# First model using pet images
###########################
####### Get dataset #######
###########################
# Batch size
bs = 64
# help(untar_data)
# print(URLs.PETS)
# URLs.PETS = https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet
# Downloads the images from a URL and untar it. Retruns a `path` object
path = untar_data(URLs.PETS)
path.ls() # List content in path
path_anno = path/'annotations'
path_img = path/'images'
fnames = get_image_files(path_img) # Get image files in path
# fnames[:5]
np.random.seed(2)
pattern = r'/([^/]+)_\d+.jpg$'
# ImageDataBunch: all the data you need to create a model
# How to get the labels? Check models.md#Labels for a few examples
data = ImageDataBunch.from_name_re(
path_img,
fnames,
pattern,
ds_tfms=get_transforms(), # Transform images: crop, resize, padding
size=224,
bs=bs
)
# Same name length, sizes, pixel values...
data.normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(7,6))
# Check number of classes/labels
print(data.classes) # Print labels
len(data.classes) # Print count
print(data.c) # Print count
###########################
######## Training #########
###########################
# Create the training object
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
# Training model
learn.model
# Trains
learn.fit_one_cycle(4)
# Save result
learn.save('stage-1')
###########################
######## Results ##########
###########################
interp = ClassificationInterpretation.from_learner(learn)
losses, idxs = interp.top_losses()
len(data.valid_ds) == len(losses) == len(idxs)
# Print top losses
interp.plot_top_losses(9, figsize=(15,11))
doc(interp.plot_top_losses)
# Print confusion matrix
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
# Show list of most confused categories
interp.most_confused(min_val=2)
###########################
######## 2nd Round ########
###########################
# Unfreeze to train more
learn.unfreeze()
learn.fit_one_cycle(1)
learn.load('stage-1')
# Prepare chart
learn.lr_find()
# Plot chart
learn.recorder.plot()
learn.unfreeze()
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4))
###########################
###### Change model #######
###########################
# Bigger images + smaller batch size
data = ImageDataBunch.from_name_re(path_img, fnames, pattern, ds_tfms=get_transforms(),
size=299, bs=bs//2).normalize(imagenet_stats)
# Use resnet50
learn = cnn_learner(data, models.resnet50, metrics=error_rate)
# Plot
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8)
learn.save('stage-1-50')
# Fine-tune
learn.unfreeze()
learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
# Use previous model if fine-tune did not help
learn.load('stage-1-50')
###########################
# Interpret results again #
###########################
interp = ClassificationInterpretation.from_learner(learn)
interp.most_confused(min_val=2)
| 22.264706 | 87 | 0.634412 |
c4df8a6e7325fdd395e4ab3a661e80f967f7ad2e | 10,343 | py | Python | Day 1/Demos/ili934xnew.py | thingslu/IoT-Bootcamp | f349af47e0edf716656232b4dbec57d4838263bb | [
"MIT"
] | null | null | null | Day 1/Demos/ili934xnew.py | thingslu/IoT-Bootcamp | f349af47e0edf716656232b4dbec57d4838263bb | [
"MIT"
] | null | null | null | Day 1/Demos/ili934xnew.py | thingslu/IoT-Bootcamp | f349af47e0edf716656232b4dbec57d4838263bb | [
"MIT"
] | null | null | null | """
Copyright (c) 2017 Jeffrey N. Magee
https://github.com/jeffmer/micropython-ili9341
Jan 6, 2018
MIT License
https://github.com/jeffmer/micropython-ili9341/blob/master/LICENSE
"""
# This is an adapted version of the ILI934X driver as below.
# It works with multiple fonts and also works with the esp32 H/W SPI implementation
# Also includes a word wrap print function
# Proportional fonts are generated by Peter Hinch's Font-to-py
# MIT License; Copyright (c) 2017 Jeffrey N. Magee
# This file is part of MicroPython ILI934X driver
# Copyright (c) 2016 - 2017 Radomir Dopieralski, Mika Tuupola
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# Project home:
# https://github.com/tuupola/micropython-ili934x
import time
import ustruct
import tt32
import framebuf
from micropython import const
_RDDSDR = const(0x0f) # Read Display Self-Diagnostic Result
_SLPOUT = const(0x11) # Sleep Out
_GAMSET = const(0x26) # Gamma Set
_DISPOFF = const(0x28) # Display Off
_DISPON = const(0x29) # Display On
_CASET = const(0x2a) # Column Address Set
_PASET = const(0x2b) # Page Address Set
_RAMWR = const(0x2c) # Memory Write
_RAMRD = const(0x2e) # Memory Read
_MADCTL = const(0x36) # Memory Access Control
_VSCRSADD = const(0x37) # Vertical Scrolling Start Address
_PIXSET = const(0x3a) # Pixel Format Set
_PWCTRLA = const(0xcb) # Power Control A
_PWCRTLB = const(0xcf) # Power Control B
_DTCTRLA = const(0xe8) # Driver Timing Control A
_DTCTRLB = const(0xea) # Driver Timing Control B
_PWRONCTRL = const(0xed) # Power on Sequence Control
_PRCTRL = const(0xf7) # Pump Ratio Control
_PWCTRL1 = const(0xc0) # Power Control 1
_PWCTRL2 = const(0xc1) # Power Control 2
_VMCTRL1 = const(0xc5) # VCOM Control 1
_VMCTRL2 = const(0xc7) # VCOM Control 2
_FRMCTR1 = const(0xb1) # Frame Rate Control 1
_DISCTRL = const(0xb6) # Display Function Control
_ENA3G = const(0xf2) # Enable 3G
_PGAMCTRL = const(0xe0) # Positive Gamma Control
_NGAMCTRL = const(0xe1) # Negative Gamma Control
_CHUNK = const(1024) #maximum number of pixels per spi write | 34.708054 | 100 | 0.552258 |
c4dfc7b426b8d7320aaf8852d42369736bd0a993 | 7,391 | py | Python | code/workflow/run_hpc_het.py | vtphan/HeteroplasmyWorkflow | cb5229b5331ac0c2ccc7b3b75c30cc81a377779e | [
"MIT"
] | 1 | 2019-09-03T06:30:40.000Z | 2019-09-03T06:30:40.000Z | code/workflow/run_hpc_het.py | vtphan/HeteroplasmyWorkflow | cb5229b5331ac0c2ccc7b3b75c30cc81a377779e | [
"MIT"
] | null | null | null | code/workflow/run_hpc_het.py | vtphan/HeteroplasmyWorkflow | cb5229b5331ac0c2ccc7b3b75c30cc81a377779e | [
"MIT"
] | null | null | null | import subprocess
import os
import sys
import datetime
import random
from configparser import ConfigParser
from datetime import datetime
import s03_heteroplasmy_likelihood, s04_sort_candidates, s05_select_sites, s06_location_conservation
import multiprocessing
if __name__ == '__main__':
if len(sys.argv) != 13:
print('Usage: python', sys.argv[0], 'ref', 'annotation', 'dist', 'read_file', 'output.html', 'random_id', 'READS_DIR', 'output_dir', 'log_file', 'alignment_quality', 'score_threshold', 'percentage_threshold')
sys.exit(0)
params = {
'ref': sys.argv[1],
'annotation': sys.argv[2],
'dist': sys.argv[3],
'read_file': sys.argv[4],
'out_html_name': sys.argv[5],
'random_id': sys.argv[6],
'READS_DIR': sys.argv[7],
'OUTPUT_DIR': sys.argv[8],
'LOG_FILE': sys.argv[9],
'alignment_quality': sys.argv[10],
'score_threshold': sys.argv[11],
'percentage_threshold': sys.argv[12],
}
process(params)
| 33.748858 | 216 | 0.576647 |
c4e1556612a7d26fd5a293cb1615148692fb9547 | 3,700 | py | Python | airflow/providers/siasg/dw/transfers/relatorio_para_mongo.py | CarlosAdp/airflow-providers-siasg | 9230e8b883797f70610a0ec11def4cb04ba4a4c2 | [
"MIT"
] | 1 | 2021-12-15T14:57:44.000Z | 2021-12-15T14:57:44.000Z | airflow/providers/siasg/dw/transfers/relatorio_para_mongo.py | CarlosAdp/airflow-providers-siasg | 9230e8b883797f70610a0ec11def4cb04ba4a4c2 | [
"MIT"
] | null | null | null | airflow/providers/siasg/dw/transfers/relatorio_para_mongo.py | CarlosAdp/airflow-providers-siasg | 9230e8b883797f70610a0ec11def4cb04ba4a4c2 | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Any, List
import json
import tempfile
from airflow.models.baseoperator import BaseOperator
from airflow.providers.mongo.hooks.mongo import MongoHook
import pandas
from airflow.providers.siasg.dw.hooks.dw import DWSIASGHook
| 32.173913 | 78 | 0.638378 |
c4e242f124dafd4c1bd85becaad0bedaf0cf455b | 5,422 | py | Python | utils/extractor.py | nwoodward/twarc | 4c7ff253ffdfd46a493ca1fcd75e6446667ae832 | [
"MIT"
] | 20 | 2019-12-31T17:30:16.000Z | 2022-03-20T20:14:16.000Z | utils/extractor.py | nwoodward/twarc | 4c7ff253ffdfd46a493ca1fcd75e6446667ae832 | [
"MIT"
] | 2 | 2020-04-17T16:22:55.000Z | 2020-06-20T16:05:03.000Z | utils/extractor.py | nwoodward/twarc | 4c7ff253ffdfd46a493ca1fcd75e6446667ae832 | [
"MIT"
] | 6 | 2019-01-16T17:51:31.000Z | 2021-08-21T20:29:13.000Z | #!/usr/bin/env python3
from datetime import datetime
import json
import os
import re
import argparse
import csv
import copy
import sys
import gzip
strptime = datetime.strptime
def parse(args):
with open(args.output, 'w+', encoding="utf-8") as output:
csv_writer = csv.writer(output, dialect=args.dialect)
csv_writer.writerow([a.title for a in args.attributes])
count = 0
tweets = set()
for filename, f in tweets_files(args.string, args.path):
print("parsing", filename)
with f(filename, 'rb') as data_file:
for line in data_file:
try:
json_object = json.loads(line.decode("utf-8"))
except ValueError:
print("Error in", filename, "entry incomplete.")
continue
#Check for duplicates
identity = json_object['id']
if identity in tweets:
continue
tweets.add(identity)
#Check for time restrictions.
if args.start or args.end:
tweet_time = strptime(json_object['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
if args.start and args.start > tweet_time:
continue
if args.end and args.end < tweet_time:
continue
#Check for hashtag.
if args.hashtag:
for entity in json_object['entities']["hashtags"]:
if entity['text'].lower() == args.hashtag:
break
else:
continue
count += extract(json_object, args, csv_writer)
print("Searched", len(tweets), "tweets and recorded", count, "items.")
print("largest id:", max(tweets))
def extract(json_object, args, csv_writer):
"""Extract and write found attributes."""
found = [[]]
for attribute in args.attributes:
item = attribute.getElement(json_object)
if len(item) == 0:
for row in found:
row.append("NA")
else:
found1 = []
for value in item:
if value is None:
value = "NA"
new = copy.deepcopy(found)
for row in new:
row.append(value)
found1.extend(new)
found = found1
for row in found:
csv_writer.writerow(row)
return len(found)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extracts attributes from tweets.')
parser.add_argument("attributes", nargs='*', help="Attributes to search for. Attributes inside nested inside other attributes should be seperated by a colon. Example: user:screen_name, entities:hashtags:text.")
parser.add_argument("-dialect", default="excel", help="Sets dialect for csv output. Defaults to excel. See python module csv.list_dialects()")
parser.add_argument("-string", default="", help="Regular expression for files to parse. Defaults to empty string.")
parser.add_argument("-path", default="./", help="Optional path to folder containing tweets. Defaults to current folder.")
parser.add_argument("-output", default="output.csv", help="Optional file to output results. Defaults to output.csv.")
parser.add_argument("-start", default="", help="Define start date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-end", default="", help="Define end date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-hashtag", default="", help="Define a hashtag that must be in parsed tweets.")
args = parser.parse_args()
if not args.path.endswith("/"):
args.path += "/"
args.start = strptime(args.start, '%m:%d:%Y') if args.start else False
args.end = strptime(args.end, '%m:%d:%Y') if args.end else False
args.attributes = [attriObject(i) for i in args.attributes]
args.string = re.compile(args.string)
args.hashtag = args.hashtag.lower()
parse(args)
| 39.576642 | 214 | 0.563445 |
c4e35d0568d412ee0973197494e60b17e7aa1939 | 2,143 | py | Python | scripts/bsvDeps.py | mhrtmnn/BSVTools | a42b84de5f7a6bc501467a24d3b35e85f670e50c | [
"MIT"
] | 7 | 2021-02-02T01:33:26.000Z | 2022-02-19T18:20:57.000Z | scripts/bsvDeps.py | mhrtmnn/BSVTools | a42b84de5f7a6bc501467a24d3b35e85f670e50c | [
"MIT"
] | 11 | 2021-01-07T11:32:05.000Z | 2022-01-13T11:21:03.000Z | scripts/bsvDeps.py | mhrtmnn/BSVTools | a42b84de5f7a6bc501467a24d3b35e85f670e50c | [
"MIT"
] | 10 | 2020-11-12T21:00:54.000Z | 2021-12-07T22:03:46.000Z | #!/usr/bin/python3
import sys
import glob
import os
import re
if __name__ == '__main__':
main() | 31.057971 | 134 | 0.508166 |
c4e39bd0b50f48ac5a5b147ee2436c04ced95c90 | 23,756 | py | Python | neuralmonkey/decoders/beam_search_decoder.py | kasnerz/neuralmonkey | 3552c79a32d24017440e17a0545f33014b66bb12 | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/decoders/beam_search_decoder.py | kasnerz/neuralmonkey | 3552c79a32d24017440e17a0545f33014b66bb12 | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/decoders/beam_search_decoder.py | kasnerz/neuralmonkey | 3552c79a32d24017440e17a0545f33014b66bb12 | [
"BSD-3-Clause"
] | null | null | null | """Beam search decoder.
This module implements the beam search algorithm for autoregressive decoders.
As any autoregressive decoder, this decoder works dynamically, which means
it uses the ``tf.while_loop`` function conditioned on both maximum output
length and list of finished hypotheses.
The beam search decoder uses four data strcutures during the decoding process.
``SearchState``, ``SearchResults``, ``BeamSearchLoopState``, and
``BeamSearchOutput``. The purpose of these is described in their own docstring.
These structures help the decoder to keep track of the decoding, enabling it
to be called e.g. during ensembling, when the content of the structures can be
changed and then fed back to the model.
The implementation mimics the API of the ``AutoregressiveDecoder`` class. There
are functions that prepare and return values that are supplied to the
``tf.while_loop`` function.
"""
# pylint: disable=too-many-lines
# Maybe move the definitions of the named tuple structures to a separate file?
from typing import Any, Callable, List, NamedTuple
# pylint: disable=unused-import
from typing import Optional
# pylint: enable=unused-import
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.autoregressive import (
AutoregressiveDecoder, LoopState)
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.tf_utils import (
append_tensor, gather_flat, get_state_shape_invariants, partial_transpose,
get_shape_list)
from neuralmonkey.vocabulary import (
Vocabulary, END_TOKEN_INDEX, PAD_TOKEN_INDEX)
# Constant we use in place of the np.inf
INF = 1e9
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
The criterion for stopping the loop is that either all hypotheses are
finished or a maximum number of steps has been reached. Here the number
of steps is the number of steps of the underlying decoder minus one,
because this function is evaluated after the decoder step has been
called and its step has been incremented. This is caused by the fact
that we call the decoder body function at the end of the beam body
function. (And that, in turn, is to support ensembling.)
Arguments:
args: A ``BeamSearchLoopState`` instance.
Returns:
A scalar boolean ``Tensor``.
"""
loop_state = BeamSearchLoopState(*args)
beam_step = loop_state.decoder_loop_state.feedables.step - 1
finished = loop_state.search_state.finished
max_step_cond = tf.less(beam_step, self.max_steps)
unfinished_cond = tf.logical_not(tf.reduce_all(finished))
return tf.logical_and(max_step_cond, unfinished_cond)
def decoding_loop(self) -> BeamSearchOutput:
"""Create the decoding loop.
This function mimics the behavior of the ``decoding_loop`` method of
the ``AutoregressiveDecoder``, except the initial loop state is created
outside this method because it is accessed and fed during ensembling.
TODO: The ``finalize_loop`` method and the handling of attention loop
states might be implemented in the future.
Returns:
This method returns a populated ``BeamSearchOutput`` object.
"""
final_loop_state = tf.while_loop(
self.loop_continue_criterion,
self.get_body(),
self.initial_loop_state,
shape_invariants=tf.contrib.framework.nest.map_structure(
get_state_shape_invariants, self.initial_loop_state))
# TODO: return att_loop_states properly
return BeamSearchOutput(
last_search_step_output=final_loop_state.search_results,
last_dec_loop_state=final_loop_state.decoder_loop_state,
last_search_state=final_loop_state.search_state,
attention_loop_states=[])
def get_body(self) -> Callable[[Any], BeamSearchLoopState]:
"""Return a body function for ``tf.while_loop``.
Returns:
A function that performs a single decoding step.
"""
decoder_body = self.parent_decoder.get_body(train_mode=False)
# pylint: disable=too-many-locals
def body(*args: Any) -> BeamSearchLoopState:
"""Execute a single beam search step.
An implementation of the beam search algorithm, which works as
follows:
1. Create a valid ``logprobs`` tensor which contains distributions
over the output tokens for each hypothesis in the beam. For
finished hypotheses, the log probabilities of all tokens except
the padding token are set to negative infinity.
2. Expand the beam by appending every possible token to every
existing hypothesis. Update the log probabilitiy sum of each
hypothesis and its length (add one for unfinished hypotheses).
For each hypothesis, compute the score using the length penalty
term.
3. Select the ``beam_size`` best hypotheses from the score pool.
This is implemented by flattening the scores tensor and using
the ``tf.nn.top_k`` function.
4. Reconstruct the beam by gathering elements from the original
data structures using the data indices computed in the previous
step.
5. Call the ``body`` function of the underlying decoder.
6. Populate a new ``BeamSearchLoopState`` object with the selected
values and with the newly obtained decoder loop state.
Note that this function expects the decoder to be called at least
once prior the first execution.
Arguments:
args: An instance of the ``BeamSearchLoopState`` structure.
(see the docs for this module)
Returns:
A ``BeamSearchLoopState`` after one step of the decoding.
"""
loop_state = BeamSearchLoopState(*args)
dec_loop_state = loop_state.decoder_loop_state
search_state = loop_state.search_state
search_results = loop_state.search_results
# mask the probabilities
# shape(logprobs) = [batch, beam, vocabulary]
logprobs = search_state.prev_logprobs
finished_mask = tf.expand_dims(
tf.to_float(search_state.finished), 2)
unfinished_logprobs = (1. - finished_mask) * logprobs
finished_row = tf.one_hot(
PAD_TOKEN_INDEX,
len(self.vocabulary),
dtype=tf.float32,
on_value=0.,
off_value=-INF)
finished_logprobs = finished_mask * finished_row
logprobs = unfinished_logprobs + finished_logprobs
# update hypothesis scores
# shape(hyp_probs) = [batch, beam, vocabulary]
hyp_probs = tf.expand_dims(search_state.logprob_sum, 2) + logprobs
# update hypothesis lengths
hyp_lengths = search_state.lengths + 1 - tf.to_int32(
search_state.finished)
# shape(scores) = [batch, beam, vocabulary]
scores = hyp_probs / tf.expand_dims(
self._length_penalty(hyp_lengths), 2)
# reshape to [batch, beam * vocabulary] for topk
scores_flat = tf.reshape(
scores, [-1, self.beam_size * len(self.vocabulary)])
# shape(both) = [batch, beam]
topk_scores, topk_indices = tf.nn.top_k(
scores_flat, k=self.beam_size)
topk_indices.set_shape([None, self.beam_size])
topk_scores.set_shape([None, self.beam_size])
next_word_ids = tf.mod(topk_indices, len(self.vocabulary))
next_beam_ids = tf.div(topk_indices, len(self.vocabulary))
# batch offset for tf.gather_nd
batch_offset = tf.tile(
tf.expand_dims(tf.range(self.batch_size), 1),
[1, self.beam_size])
batch_beam_ids = tf.stack([batch_offset, next_beam_ids], axis=2)
# gather the topk logprob_sums
next_beam_lengths = tf.gather_nd(hyp_lengths, batch_beam_ids)
next_beam_logprob_sum = tf.gather_nd(
tf.reshape(
hyp_probs, [-1, self.beam_size * len(self.vocabulary)]),
tf.stack([batch_offset, topk_indices], axis=2))
# mark finished beams
next_finished = tf.gather_nd(search_state.finished, batch_beam_ids)
next_just_finished = tf.equal(next_word_ids, END_TOKEN_INDEX)
next_finished = tf.logical_or(next_finished, next_just_finished)
# we need to flatten the feedables for the parent_decoder
next_feedables = tf.contrib.framework.nest.map_structure(
lambda x: gather_flat(x, batch_beam_ids,
self.batch_size, self.beam_size),
dec_loop_state.feedables)
next_feedables = next_feedables._replace(
input_symbol=tf.reshape(next_word_ids, [-1]),
finished=tf.reshape(next_finished, [-1]))
# histories have shape [len, batch, ...]
next_histories = tf.contrib.framework.nest.map_structure(
gather_fn, dec_loop_state.histories)
dec_loop_state = dec_loop_state._replace(
feedables=next_feedables,
histories=next_histories)
# CALL THE DECODER BODY FUNCTION
next_loop_state = decoder_body(*dec_loop_state)
next_search_state = SearchState(
logprob_sum=next_beam_logprob_sum,
prev_logprobs=tf.reshape(
tf.nn.log_softmax(next_loop_state.feedables.prev_logits),
[self.batch_size, self.beam_size, len(self.vocabulary)]),
lengths=next_beam_lengths,
finished=next_finished)
next_token_ids = tf.transpose(search_results.token_ids, [1, 2, 0])
next_token_ids = tf.gather_nd(next_token_ids, batch_beam_ids)
next_token_ids = tf.transpose(next_token_ids, [2, 0, 1])
next_output = SearchResults(
scores=topk_scores,
token_ids=append_tensor(next_token_ids, next_word_ids))
return BeamSearchLoopState(
search_state=next_search_state,
search_results=next_output,
decoder_loop_state=next_loop_state)
# pylint: enable=too-many-locals
return body
def _length_penalty(self, lengths: tf.Tensor) -> tf.Tensor:
"""Apply length penalty ("lp") term from Eq. 14.
https://arxiv.org/pdf/1609.08144.pdf
Arguments:
lengths: A ``Tensor`` of lengths of the hypotheses in the beam.
Returns:
A float ``Tensor`` with the length penalties for each hypothesis
in the beam.
"""
return ((5. + tf.to_float(lengths)) / 6.) ** self.length_normalization
def expand_to_beam(self, val: tf.Tensor, dim: int = 0) -> tf.Tensor:
"""Copy a tensor along a new beam dimension.
Arguments:
val: The ``Tensor`` to expand.
dim: The dimension along which to expand. Usually, the batch axis.
Returns:
The expanded tensor.
"""
orig_shape = get_shape_list(val)
if val.shape.ndims == 0:
return val
orig_shape[dim] *= self.beam_size
tile_shape = [1] * (len(orig_shape) + 1)
tile_shape[dim + 1] = self.beam_size
val = tf.tile(tf.expand_dims(val, 1), tile_shape)
val = tf.reshape(val, orig_shape)
return val
| 40.332767 | 79 | 0.641985 |
c4e3f0031795840237e6c42c72c6980d42b2396d | 926 | py | Python | setup.py | CBDRH/cdeid | bd3d94f98a61d876b5630ab80a655fd9ce46730c | [
"Apache-2.0"
] | null | null | null | setup.py | CBDRH/cdeid | bd3d94f98a61d876b5630ab80a655fd9ce46730c | [
"Apache-2.0"
] | null | null | null | setup.py | CBDRH/cdeid | bd3d94f98a61d876b5630ab80a655fd9ce46730c | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
with open('README.md', "r") as f:
readme = f.read()
with open('LICENSE') as f:
license_content = f.read()
setup(
name='cdeid',
version='0.1.2',
author='Leibo Liu',
author_email='liuleibo@gmail.com',
description='A Customized De-identification framework',
long_description_content_type='text/markdown',
long_description=readme,
url='https://github.com/CBDRH/cdeid',
keywords=['DE-IDENTIFICATION', 'NLP'],
install_requires=[
'spaCy>=2.3.2',
'stanza>=1.1.1',
'flair==0.8',
'mako>=1.1.3'
],
packages=find_packages(exclude=('tests', 'docs')),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
python_requires='>=3.7'
)
| 27.235294 | 61 | 0.610151 |
c4e5659911ab0a62801300dec90e8ce55a3d300b | 5,821 | py | Python | pylib/bbl_ingest.py | rafelafrance/sightings-database | cc4bea9d2754234c6a09732988aceeb929957e99 | [
"MIT"
] | 4 | 2018-10-19T15:04:21.000Z | 2021-07-26T12:07:01.000Z | pylib/bbl_ingest.py | rafelafrance/sightings-database | cc4bea9d2754234c6a09732988aceeb929957e99 | [
"MIT"
] | 1 | 2018-10-23T21:00:21.000Z | 2018-10-24T15:06:58.000Z | pylib/bbl_ingest.py | rafelafrance/sightings-database | cc4bea9d2754234c6a09732988aceeb929957e99 | [
"MIT"
] | 2 | 2018-08-08T17:08:57.000Z | 2019-04-04T15:13:55.000Z | """Ingest USGS Bird Banding Laboratory data."""
from pathlib import Path
import pandas as pd
from . import db, util
DATASET_ID = 'bbl'
RAW_DIR = Path('data') / 'raw' / DATASET_ID
BANDING = RAW_DIR / 'Banding'
ENCOUNTERS = RAW_DIR / 'Encounters'
RECAPTURES = RAW_DIR / 'Recaptures'
SPECIES = RAW_DIR / 'species.html'
ONE_MIN = 111.32 * 1000
TEN_MIN = 111.32 * 1000 * 10
EXACT = 0
def ingest():
"""Ingest USGS Bird Banding Laboratory data."""
db.delete_dataset_records(DATASET_ID)
to_taxon_id = get_taxa()
db.insert_dataset({
'dataset_id': DATASET_ID,
'title': 'Bird Banding Laboratory (BBL)',
'version': '2020.0',
'url': ('https://www.usgs.gov/centers/pwrc/science/'
'bird-banding-laboratory')})
to_place_id = {}
to_place_id = insert_banding_data(to_place_id, to_taxon_id)
to_place_id = insert_encounter_data(
ENCOUNTERS, to_place_id, to_taxon_id, 'encounter')
insert_encounter_data(RECAPTURES, to_place_id, to_taxon_id, 'recapture')
def get_taxa():
"""Build a taxa table to link to our taxa."""
codes = pd.read_html(str(SPECIES))[0]
codes = codes.rename(columns={
'Scientific Name': 'sci_name',
'Species Number': 'species_id'})
codes = codes[codes['sci_name'].notna()]
codes = codes.set_index('sci_name')['species_id'].to_dict()
sql = """SELECT taxon_id, sci_name FROM taxa WHERE "class"='aves';"""
taxa = pd.read_sql(sql, db.connect())
taxa = taxa.set_index('sci_name')['taxon_id'].to_dict()
to_taxon_id = {str(v).zfill(4): i for k, v in codes.items()
if (i := taxa.get(k))}
return to_taxon_id
def insert_banding_data(to_place_id, to_taxon_id):
"""Insert raw banding data."""
util.log(f'Inserting {DATASET_ID} banding data')
for path in sorted(BANDING.glob('*.csv')):
util.log(f'File {path}')
df = read_csv(
path, 'LON_DECIMAL_DEGREES', 'LAT_DECIMAL_DEGREES', 'banding')
df = filter_data(
df, to_taxon_id, 'BANDING_DATE', 'SPECIES_ID', 'COORD_PRECISION')
to_place_id = insert_places(df, to_place_id, 'COORD_PRECISION')
event_json = """ BAND_NUM BANDING_DATE TYPE """.split()
insert_events(df, event_json)
count_json = """
AGE_CODE SEX_CODE SPECIES_ID SPECIES_NAME TYPE """.split()
insert_counts(df, count_json)
return to_place_id
def insert_encounter_data(dir_, to_place_id, to_taxon_id, type_):
"""Insert raw encounter and recapture data."""
util.log(f'Inserting {DATASET_ID} {type_} data')
for path in sorted(dir_.glob('*.csv')):
util.log(f'File {path}')
df = read_csv(
path, 'E_LON_DECIMAL_DEGREES', 'E_LAT_DECIMAL_DEGREES', type_)
df = filter_data(
df, to_taxon_id,
'ENCOUNTER_DATE', 'B_SPECIES_ID', 'E_COORD_PRECISION')
to_place_id = insert_places(df, to_place_id, 'E_COORD_PRECISION')
event_json = """ BAND_NUM ENCOUNTER_DATE TYPE """.split()
insert_events(df, event_json)
count_json = """
B_AGE_CODE B_SEX_CODE B_SPECIES_ID B_SPECIES_NAME MIN_AGE_AT_ENC
ORIGINAL_BAND TYPE """.split()
insert_counts(df, count_json)
return to_place_id
def read_csv(path, lng, lat, type_):
"""Read in a CSV file."""
df = pd.read_csv(path, dtype='unicode').fillna('')
util.normalize_columns_names(df)
df = df.rename(columns={lng: 'lng', lat: 'lat'})
df['TYPE'] = type_
df['dataset_id'] = DATASET_ID
return df
def filter_data(df, to_taxon_id, event_date, species_id, coord_precision):
"""Remove records that will not work for our analysis."""
df['date'] = pd.to_datetime(df[event_date], errors='coerce')
has_date = df['date'].notna()
# Check if the scientific name is in our database
df['taxon_id'] = df[species_id].map(to_taxon_id)
has_taxon_id = df['taxon_id'].notna()
# Country and state are too big of an area
too_big = df[coord_precision].isin(['12', '72'])
df = df.loc[~too_big & has_taxon_id & has_date]
return df
def insert_places(df, to_place_id, coord_precision):
"""Insert place records."""
util.filter_lng_lat(df, 'lng', 'lat')
df['radius'] = TEN_MIN
df.loc[df[coord_precision] == '0', 'radius'] = EXACT
df.loc[df[coord_precision].isin(['1', '60']), 'radius'] = ONE_MIN
df['place_key'] = tuple(zip(df.lng, df.lat, df.radius))
places = df.drop_duplicates('place_key')
old_places = places['place_key'].isin(to_place_id)
places = places[~old_places]
places['place_id'] = db.create_ids(places, 'places')
places['place_json'] = util.json_object(places, [coord_precision])
places.loc[:, db.PLACE_FIELDS].to_sql(
'places', db.connect(), if_exists='append', index=False)
new_place_ids = places.set_index('place_key')['place_id'].to_dict()
to_place_id = {**to_place_id, **new_place_ids}
df['place_id'] = df['place_key'].map(to_place_id)
return to_place_id
def insert_events(df, event_json):
"""Insert event records."""
df['event_id'] = db.create_ids(df, 'events')
df['year'] = df['date'].dt.strftime('%Y')
df['day'] = df['date'].dt.strftime('%j')
df['started'] = None
df['ended'] = None
df['event_json'] = util.json_object(df, event_json)
df.loc[:, db.EVENT_FIELDS].to_sql(
'events', db.connect(), if_exists='append', index=False)
def insert_counts(df, count_json):
"""Insert count records."""
df['count_id'] = db.create_ids(df, 'counts')
df['count'] = 1
df['count_json'] = util.json_object(df, count_json)
df.loc[:, db.COUNT_FIELDS].to_sql(
'counts', db.connect(), if_exists='append', index=False)
if __name__ == '__main__':
ingest()
| 29.39899 | 77 | 0.641986 |
c4e58b861bb09eed1abcd83a5bccb57c68b95882 | 5,212 | py | Python | pyhdtoolkit/utils/cmdline.py | fsoubelet/PyhDToolk | aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66 | [
"MIT"
] | 5 | 2020-05-28T09:16:01.000Z | 2021-12-27T18:59:15.000Z | pyhdtoolkit/utils/cmdline.py | fsoubelet/PyhDToolk | aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66 | [
"MIT"
] | 71 | 2020-02-20T20:32:43.000Z | 2022-03-24T17:04:28.000Z | pyhdtoolkit/utils/cmdline.py | fsoubelet/PyhDToolk | aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66 | [
"MIT"
] | 2 | 2021-09-28T16:01:06.000Z | 2022-03-16T19:04:23.000Z | """
Module utils.cmdline
--------------------
Created on 2019.11.06
:author: Felix Soubelet (felix.soubelet@cern.ch)
Utility script to help run commands and access the commandline.
"""
import errno
import os
import signal
import subprocess
from typing import Mapping, Optional, Tuple
from loguru import logger
from pyhdtoolkit.utils.contexts import timeit
| 41.365079 | 110 | 0.620875 |
c4e5c92095ff07343908dc4ad5fe4a10a7b5ac03 | 498 | pyde | Python | sketches/noll/noll.pyde | kantel/processingpy | 74aae222e46f68d1c8f06307aaede3cdae65c8ec | [
"MIT"
] | 4 | 2018-06-03T02:11:46.000Z | 2021-08-18T19:55:15.000Z | sketches/noll/noll.pyde | kantel/processingpy | 74aae222e46f68d1c8f06307aaede3cdae65c8ec | [
"MIT"
] | null | null | null | sketches/noll/noll.pyde | kantel/processingpy | 74aae222e46f68d1c8f06307aaede3cdae65c8ec | [
"MIT"
] | 3 | 2019-12-23T19:12:51.000Z | 2021-04-30T14:00:31.000Z | from random import randint
margin = 5
| 22.636364 | 57 | 0.574297 |
c4e5fbaf6cd5fcd29917077a2405f0c945660808 | 992 | py | Python | minimalist_cms/cms_content/migrations/0004_auto_20190719_1242.py | wullerot/django-minimalist-cms | bd6795d9647f9db1d98e83398238c0e63aca3c1b | [
"MIT"
] | null | null | null | minimalist_cms/cms_content/migrations/0004_auto_20190719_1242.py | wullerot/django-minimalist-cms | bd6795d9647f9db1d98e83398238c0e63aca3c1b | [
"MIT"
] | null | null | null | minimalist_cms/cms_content/migrations/0004_auto_20190719_1242.py | wullerot/django-minimalist-cms | bd6795d9647f9db1d98e83398238c0e63aca3c1b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.10 on 2019-07-19 12:42
from django.db import migrations, models
import django.db.models.deletion
| 33.066667 | 164 | 0.633065 |
c4e7ad2ce4410048f7e3d9df95d8ae13cd8fa8fc | 3,174 | py | Python | original-paas/copy_to_container/www/spdpaas/src/celeryApp/celeryConfig.py | yishan1331/docker-practice | 91a1a434cbffc33790678af5e09de310386812d1 | [
"MIT"
] | null | null | null | original-paas/copy_to_container/www/spdpaas/src/celeryApp/celeryConfig.py | yishan1331/docker-practice | 91a1a434cbffc33790678af5e09de310386812d1 | [
"MIT"
] | null | null | null | original-paas/copy_to_container/www/spdpaas/src/celeryApp/celeryConfig.py | yishan1331/docker-practice | 91a1a434cbffc33790678af5e09de310386812d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
==============================================================================
created : 02/08/2021
Last update: 02/08/2021
Developer: Yishan Tsai
Lite Version 1 @Yishan08032019
Filename: celeryconfig.py
Description: about celery configuration
==============================================================================
"""
from kombu import Queue | 41.220779 | 236 | 0.639887 |
c4e7db74b0a777f921aa87993b291e973a2d6ac3 | 1,652 | py | Python | toontown/coghq/LawbotHQExterior.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 1 | 2020-09-27T22:12:47.000Z | 2020-09-27T22:12:47.000Z | toontown/coghq/LawbotHQExterior.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | null | null | null | toontown/coghq/LawbotHQExterior.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 2 | 2020-09-26T20:37:18.000Z | 2020-11-15T20:55:33.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from pandac.PandaModules import *
from toontown.battle import BattlePlace
from toontown.building import Elevator
from toontown.coghq import CogHQExterior
from toontown.dna.DNAParser import loadDNAFileAI
from libpandadna import DNAStorage
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
| 41.3 | 79 | 0.72276 |
c4e813434f242369956777c090c26e6a052fa049 | 457 | py | Python | etsy_convos/convos/migrations/0005_convothread_last_message_at.py | jessehon/etsy-convos | 34631f49ec745fa11611af6122a7522adc16d241 | [
"BSD-3-Clause"
] | 2 | 2018-12-17T15:47:43.000Z | 2020-02-29T19:47:30.000Z | etsy_convos/convos/migrations/0005_convothread_last_message_at.py | jessehon/etsy-convos | 34631f49ec745fa11611af6122a7522adc16d241 | [
"BSD-3-Clause"
] | 2 | 2017-07-12T02:45:33.000Z | 2017-11-02T00:37:59.000Z | etsy_convos/convos/migrations/0005_convothread_last_message_at.py | jessehon/etsy-convos | 34631f49ec745fa11611af6122a7522adc16d241 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 22.85 | 94 | 0.630197 |
c4e9c3ba14a1425fb2bb7e78afee5ef252184d66 | 3,704 | py | Python | pycoax/examples/40_eab.py | lowobservable/coax | 9714fdfb418dff56357b9a35d2da3a91b8a60ffe | [
"0BSD"
] | 21 | 2020-05-11T19:46:29.000Z | 2022-02-09T01:32:41.000Z | pycoax/examples/40_eab.py | lowobservable/coax-interface | 614f8a5f448b1f7e8298ced2585c178f4d7f435d | [
"0BSD"
] | null | null | null | pycoax/examples/40_eab.py | lowobservable/coax-interface | 614f8a5f448b1f7e8298ced2585c178f4d7f435d | [
"0BSD"
] | 5 | 2020-07-20T08:05:10.000Z | 2022-01-30T13:57:05.000Z | #!/usr/bin/env python
import sys
from itertools import chain
from common import open_example_serial_interface
from coax import read_feature_ids, parse_features, Feature, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, EABWriteAlternate, EABLoadMask
with open_example_serial_interface() as interface:
features = get_features(interface)
if Feature.EAB not in features:
sys.exit('No EAB feature found.')
eab_address = features[Feature.EAB]
print(f'EAB feature found at address {eab_address}')
# Protected Normal
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(80)])
regen_buffer = bytes.fromhex('e0 08 00 af 91 8e 93 84 82 93 84 83 00 ad 8e 91 8c 80 8b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 09')
interface.execute(WriteData(regen_buffer))
# Protected Intense
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(160)])
regen_buffer = bytes.fromhex('e8 08 00 af 91 8e 93 84 82 93 84 83 00 a8 8d 93 84 8d 92 84 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 09')
interface.execute(WriteData(regen_buffer))
# Normal EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(64)])
regen_buffer = bytes.fromhex('e0 08 00 ad 8e 91 8c 80 8b 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Blink EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(144)])
regen_buffer = bytes.fromhex('e0 08 00 a1 8b 88 8d 8a 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Reverse EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(224)])
regen_buffer = bytes.fromhex('e0 08 00 b1 84 95 84 91 92 84 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('80 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Underline EFA
interface.execute([LoadAddressCounterHi(2), LoadAddressCounterLo(48)])
regen_buffer = bytes.fromhex('e0 08 00 b4 8d 83 84 91 8b 88 8d 84 00 a4 a5 a0 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('c0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
| 49.386667 | 185 | 0.718952 |
c4ea2eeeaf4f202bab8eda79b95fc4612a9dbdac | 404 | py | Python | ApkInstall/Case/TV/DeviceConnect.py | LiuTianen/PackManage | 4b067954cc223baa14569a6f1517954b9cdb968f | [
"MIT"
] | null | null | null | ApkInstall/Case/TV/DeviceConnect.py | LiuTianen/PackManage | 4b067954cc223baa14569a6f1517954b9cdb968f | [
"MIT"
] | null | null | null | ApkInstall/Case/TV/DeviceConnect.py | LiuTianen/PackManage | 4b067954cc223baa14569a6f1517954b9cdb968f | [
"MIT"
] | null | null | null | # coding=utf-8
from Base.DevicesList import devicesList as dl
from Base.Common import Common
if __name__ == '__main__':
DevicesConnect().deviceConnect()
| 22.444444 | 46 | 0.626238 |
c4ef333c1b8a1206f386a828b321c850713abce0 | 3,692 | py | Python | angr/storage/memory_mixins/hex_dumper_mixin.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 6,132 | 2015-08-06T23:24:47.000Z | 2022-03-31T21:49:34.000Z | angr/storage/memory_mixins/hex_dumper_mixin.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 2,272 | 2015-08-10T08:40:07.000Z | 2022-03-31T23:46:44.000Z | angr/storage/memory_mixins/hex_dumper_mixin.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 1,155 | 2015-08-06T23:37:39.000Z | 2022-03-31T05:54:11.000Z | import string
from ...errors import SimValueError
from . import MemoryMixin
| 46.734177 | 106 | 0.577194 |
c4ef979dcf699104f97d1724f4ddb27a38c7154c | 4,782 | py | Python | src/greplin/scales/formats.py | frenzymadness/scales | 0aced26eb050ceb98ee9d5d6cdca8db448666986 | [
"Apache-2.0"
] | 273 | 2015-01-01T19:04:25.000Z | 2022-02-13T18:16:28.000Z | src/greplin/scales/formats.py | frenzymadness/scales | 0aced26eb050ceb98ee9d5d6cdca8db448666986 | [
"Apache-2.0"
] | 14 | 2015-02-03T09:18:57.000Z | 2021-12-18T11:04:15.000Z | src/greplin/scales/formats.py | frenzymadness/scales | 0aced26eb050ceb98ee9d5d6cdca8db448666986 | [
"Apache-2.0"
] | 35 | 2015-01-20T20:04:30.000Z | 2020-12-30T20:43:04.000Z | # Copyright 2011 The scales Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatting methods for stats."""
from greplin import scales
import cgi
import six
import json
import operator
import re
OPERATORS = {
'>=': operator.ge,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'=': operator.eq,
'==': operator.eq,
'!=': operator.ne
}
OPERATOR = re.compile('(%s)' % '|'.join(list(OPERATORS.keys())))
def runQuery(statDict, query):
"""Filters for the given query."""
parts = [x.strip() for x in OPERATOR.split(query)]
assert len(parts) in (1, 3)
queryKey = parts[0]
result = {}
for key, value in six.iteritems(statDict):
if key == queryKey:
if len(parts) == 3:
op = OPERATORS[parts[1]]
try:
queryValue = type(value)(parts[2]) if value else parts[2]
except (TypeError, ValueError):
continue
if not op(value, queryValue):
continue
result[key] = value
elif isinstance(value, scales.StatContainer) or isinstance(value, dict):
child = runQuery(value, query)
if child:
result[key] = child
return result
def htmlHeader(output, path, serverName, query = None):
"""Writes an HTML header."""
if path and path != '/':
output.write('<title>%s - Status: %s</title>' % (serverName, path))
else:
output.write('<title>%s - Status</title>' % serverName)
output.write('''
<style>
body,td { font-family: monospace }
.level div {
padding-bottom: 4px;
}
.level .level {
margin-left: 2em;
padding: 1px 0;
}
span { color: #090; vertical-align: top }
.key { color: black; font-weight: bold }
.int, .float { color: #00c }
</style>
''')
output.write('<h1 style="margin: 0">Stats</h1>')
output.write('<h3 style="margin: 3px 0 18px">%s</h3>' % serverName)
output.write(
'<p><form action="#" method="GET">Filter: <input type="text" name="query" size="20" value="%s"></form></p>' %
(query or ''))
def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output)
def _htmlRenderDict(pathParts, statDict, output):
"""Render a dictionary as a table - recursing as necessary."""
keys = list(statDict.keys())
keys.sort()
links = []
output.write('<div class="level">')
for key in keys:
keyStr = cgi.escape(_utf8str(key))
value = statDict[key]
if hasattr(value, '__call__'):
value = value()
if hasattr(value, 'keys'):
valuePath = pathParts + (keyStr,)
if isinstance(value, scales.StatContainer) and value.isCollapsed():
link = '/status/' + '/'.join(valuePath)
links.append('<div class="key"><a href="%s">%s</a></div>' % (link, keyStr))
else:
output.write('<div class="key">%s</div>' % keyStr)
_htmlRenderDict(valuePath, value, output)
else:
output.write('<div><span class="key">%s</span> <span class="%s">%s</span></div>' %
(keyStr, type(value).__name__, cgi.escape(_utf8str(value)).replace('\n', '<br/>')))
if links:
for link in links:
output.write(link)
output.write('</div>')
def _utf8str(x):
"""Like str(x), but returns UTF8."""
if six.PY3:
return str(x)
if isinstance(x, six.binary_type):
return x
elif isinstance(x, six.text_type):
return x.encode('utf-8')
else:
return six.binary_type(x)
def jsonFormat(output, statDict = None, query = None, pretty = False):
"""Formats as JSON, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
indent = 2 if pretty else None
# At first, assume that strings are in UTF-8. If this fails -- if, for example, we have
# crazy binary data -- then in order to get *something* out, we assume ISO-8859-1,
# which maps each byte to a unicode code point.
try:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent)
except UnicodeDecodeError:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent, encoding='iso-8859-1')
output.write(serialized)
output.write('\n')
| 30.075472 | 115 | 0.648055 |
c4f08226a69d6d813d6c2054e9344810e8a63453 | 5,911 | py | Python | src/weight_graph.py | kavdi/data-structures | cc0be35afa279b92178b1cbaf43a033176cac095 | [
"MIT"
] | null | null | null | src/weight_graph.py | kavdi/data-structures | cc0be35afa279b92178b1cbaf43a033176cac095 | [
"MIT"
] | null | null | null | src/weight_graph.py | kavdi/data-structures | cc0be35afa279b92178b1cbaf43a033176cac095 | [
"MIT"
] | 1 | 2017-11-21T23:39:44.000Z | 2017-11-21T23:39:44.000Z | """Implement a weighted graph."""
| 32.657459 | 79 | 0.552022 |
c4f11548c4eb952f26c0dd99e9fd5c3f5a59fc56 | 376 | py | Python | sqlitedb/settings.py | BelovN/orm | 3dc9633a703369b99069a64fab3795c233a665cc | [
"Apache-2.0"
] | 1 | 2021-02-10T13:49:36.000Z | 2021-02-10T13:49:36.000Z | sqlitedb/settings.py | BelovN/orm | 3dc9633a703369b99069a64fab3795c233a665cc | [
"Apache-2.0"
] | null | null | null | sqlitedb/settings.py | BelovN/orm | 3dc9633a703369b99069a64fab3795c233a665cc | [
"Apache-2.0"
] | null | null | null |
DB_NAME = 'database.db'
| 22.117647 | 56 | 0.672872 |
c4f2275c8950f55ba51fa504b3d7b04b4200e057 | 170 | py | Python | hon/commands/clean.py | swquinn/hon | 333332029ee884a8822d38024659d5d7da64ff1a | [
"MIT"
] | null | null | null | hon/commands/clean.py | swquinn/hon | 333332029ee884a8822d38024659d5d7da64ff1a | [
"MIT"
] | 14 | 2019-06-23T01:49:55.000Z | 2021-02-22T01:26:51.000Z | hon/commands/clean.py | swquinn/hon | 333332029ee884a8822d38024659d5d7da64ff1a | [
"MIT"
] | null | null | null | import click
from ..cli import with_context
| 18.888889 | 71 | 0.764706 |
c4f2f8270cc6cb9ae4cc9517cbe1d9549b6020f2 | 3,818 | py | Python | Kerning/Steal Kerning Groups from Font.py | justanotherfoundry/Glyphs-Scripts | f28aeab0224ae19ace4a86cf363e7990985199b7 | [
"Apache-2.0"
] | 283 | 2015-01-07T12:35:35.000Z | 2022-03-29T06:10:44.000Z | Kerning/Steal Kerning Groups from Font.py | justanotherfoundry/Glyphs-Scripts | f28aeab0224ae19ace4a86cf363e7990985199b7 | [
"Apache-2.0"
] | 203 | 2015-01-26T18:43:08.000Z | 2022-03-04T01:47:58.000Z | Kerning/Steal Kerning Groups from Font.py | justanotherfoundry/Glyphs-Scripts | f28aeab0224ae19ace4a86cf363e7990985199b7 | [
"Apache-2.0"
] | 96 | 2015-01-19T20:58:03.000Z | 2022-03-29T06:10:56.000Z | #MenuTitle: Steal Kerning Groups from Font
"""Copy kerning groups from one font to another."""
from __future__ import print_function
import vanilla
GroupsCopy()
| 36.018868 | 146 | 0.674175 |
c4f326034d179b78b8dc04c9d2ea83b5265251c0 | 12,584 | py | Python | eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/extdiff.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/extdiff.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/extdiff.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 1 | 2020-07-25T21:10:26.000Z | 2020-07-25T21:10:26.000Z | # extdiff.py - external diff program support for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows you to configure new diff commands, so
you do not need to type :hg:`extdiff -p kdiff3` always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f "+next" \\
"+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
Tool arguments can include variables that are expanded at runtime::
$parent1, $plabel1 - filename, descriptive label of first parent
$child, $clabel - filename, descriptive label of child revision
$parent2, $plabel2 - filename, descriptive label of second parent
$root - repository root
$parent is an alias for $parent1.
The extdiff extension will look in your [diff-tools] and [merge-tools]
sections for diff tool arguments, when none are specified in [extdiff].
::
[extdiff]
kdiff3 =
[diff-tools]
kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
You can use -I/-X and list of file or directory names like normal
:hg:`diff` command. The extdiff extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short, nullid
from mercurial import scmutil, scmutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
if not wfn in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener.write(wfn, data)
if 'x' in fctx.flags():
util.setflags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn),
os.lstat(dest).st_mtime))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actuall diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
args = ' '.join(diffopts)
do3way = '$parent2' in args
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = scmutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.p2()
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
matcher = scmutil.match(repo[node2], pats, opts)
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
if do3way:
mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
rev1a = '@%d' % repo[node1a].rev()
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
rev1b = '@%d' % repo[node1b].rev()
else:
dir1b = None
rev1b = ''
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
rev2 = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
rev2 = '@%d' % repo[node2].rev()
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
label1a = rev1a
label1b = rev1b
label2 = rev2
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(tmproot, dir1a, common_file)
label1a = common_file + rev1a
if not os.path.isfile(dir1a):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(tmproot, dir1b, common_file)
label1b = common_file + rev1b
if not os.path.isfile(dir1b):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
label2 = common_file + rev2
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
plabel1=label1a, plabel2=label1b,
clabel=label2, child=dir2,
root=repo.root)
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
if not do3way and not re.search(regex, args):
args += ' $parent1 $child'
args = re.sub(regex, quote, args)
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot, out=ui.fout)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.lstat(copy_fn).st_mtime != mtime:
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '',
_('comparison program to run'), _('CMD')),
('o', 'option', [],
_('pass option to comparison program'), _('OPT')),
('r', 'rev', [],
_('revision'), _('REV')),
('c', 'change', '',
_('change made by revision'), _('REV')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
| 38.133333 | 79 | 0.603703 |
c4f3b9174179bea9c3e2586b2b624604e84c4eda | 2,352 | py | Python | scripts/idapython/idapy_detect_exitats.py | felkal/fuzzware | 905d9df84dd36bcc3805986064fb7ee4426792a7 | [
"Apache-2.0"
] | 106 | 2022-01-19T21:33:21.000Z | 2022-03-25T12:03:07.000Z | scripts/idapython/idapy_detect_exitats.py | felkal/fuzzware | 905d9df84dd36bcc3805986064fb7ee4426792a7 | [
"Apache-2.0"
] | 1 | 2022-03-29T11:43:40.000Z | 2022-03-29T11:43:40.000Z | scripts/idapython/idapy_detect_exitats.py | felkal/fuzzware | 905d9df84dd36bcc3805986064fb7ee4426792a7 | [
"Apache-2.0"
] | 22 | 2022-01-20T02:05:17.000Z | 2022-03-30T11:48:59.000Z | import idaapi
from idaapi import *
inifinite_loops = [
b"\x00\xbf\xfd\xe7", # loop: nop; b loop
b"\xfe\xe7", # loop: b loop
]
whitelist = [
"Reset_Handler",
"main"
]
dump_exit_ats()
| 35.104478 | 104 | 0.581633 |
c4f425e58c4e41f24eec096d7a9387bcf27859e2 | 3,619 | py | Python | 2020/src/day11.py | pantaryl/adventofcode | d53255c55be0447f3cef7bd93818c41b0eeae997 | [
"MIT"
] | 2 | 2021-12-01T05:23:25.000Z | 2021-12-11T05:58:00.000Z | 2020/src/day11.py | pantaryl/adventofcode | d53255c55be0447f3cef7bd93818c41b0eeae997 | [
"MIT"
] | null | null | null | 2020/src/day11.py | pantaryl/adventofcode | d53255c55be0447f3cef7bd93818c41b0eeae997 | [
"MIT"
] | null | null | null | from collections import defaultdict
from helpers import memoize
from copy import deepcopy
with open("../input/day11.txt", 'r') as inputFile:
data = [x.rstrip() for x in inputFile.readlines()]
#data = [int(x) for x in data]
map = {}
yMax = len(data)
xMax = len(data[0])
for y in range(yMax):
line = data[y]
for x in range(xMax):
map[(x, y)] = line[x]
# Part 1
oldMap = deepcopy(map)
for i in range(5000):
changed = False
newMap = deepcopy(oldMap)
for x in range(xMax):
for y in range(yMax):
if oldMap[(x, y)] == "L" and anyAdjacentOccupied(x, y, oldMap) is False:
newMap[(x, y)] = "#"
changed = True
elif oldMap[(x, y)] == "#" and alsoOccupied(x, y, oldMap):
newMap[(x, y)] = "L"
changed = True
if changed is False:
occupied = 0
for _, value in newMap.items():
occupied += 1 if value == "#" else 0
print(occupied)
break
else:
oldMap = newMap
#printMap(oldMap)
#print()
#print()
# Part 2
oldMap = deepcopy(map)
for i in range(500000):
changed = False
newMap = deepcopy(oldMap)
for x in range(xMax):
for y in range(yMax):
if oldMap[(x, y)] == "L" and anyAdjacentOccupied2(x, y, oldMap) is False:
newMap[(x, y)] = "#"
changed = True
elif oldMap[(x, y)] == "#" and alsoOccupied2(x, y, oldMap):
newMap[(x, y)] = "L"
changed = True
if changed is False:
occupied = 0
for _, value in newMap.items():
occupied += 1 if value == "#" else 0
print(occupied)
break
else:
oldMap = newMap
#printMap(oldMap)
#print()
#print()
#input() | 28.496063 | 85 | 0.481625 |
c4f49778e071131f827e01660acacad3ade6d9e2 | 3,235 | py | Python | jaqs/util/dtutil.py | WestXu/JAQS | 3c9389afab518f188b8628af72297d750c07dfb1 | [
"Apache-2.0"
] | 602 | 2017-11-21T00:39:40.000Z | 2022-03-16T06:13:08.000Z | jaqs/util/dtutil.py | WestXu/JAQS | 3c9389afab518f188b8628af72297d750c07dfb1 | [
"Apache-2.0"
] | 63 | 2017-12-08T08:21:16.000Z | 2020-03-07T13:57:35.000Z | jaqs/util/dtutil.py | WestXu/JAQS | 3c9389afab518f188b8628af72297d750c07dfb1 | [
"Apache-2.0"
] | 365 | 2017-11-21T01:38:36.000Z | 2022-03-30T15:55:30.000Z | # encoding: utf-8
import datetime
import numpy as np
import pandas as pd
def get_next_period_day(current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
current_dt = convert_int_to_datetime(current)
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
# offset = offsets.Day
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
# offset = offsets.MonthBegin
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
next_dt = current_dt + offset
if extra_offset:
next_dt = next_dt + extra_offset * pd.tseries.offsets.BDay()
nxt = convert_datetime_to_int(next_dt)
return nxt
def convert_int_to_datetime(dt):
"""Convert int date (%Y%m%d) to datetime.datetime object."""
if isinstance(dt, pd.Series):
dt = dt.astype(str)
elif isinstance(dt, int):
dt = str(dt)
return pd.to_datetime(dt, format="%Y%m%d")
def shift(date, n_weeks=0):
"""Shift date backward or forward for n weeks.
Parameters
----------
date : int or datetime
The date to be shifted.
n_weeks : int, optional
Positive for increasing date, negative for decreasing date.
Default 0 (no shift).
Returns
-------
res : int or datetime
"""
delta = pd.Timedelta(weeks=n_weeks)
is_int = isinstance(date, (int, np.integer))
if is_int:
dt = convert_int_to_datetime(date)
else:
dt = date
res = dt + delta
if is_int:
res = convert_datetime_to_int(res)
return res
| 24.884615 | 93 | 0.562597 |
c4f7c343c369e93c89a12020beddb2954ee8b536 | 485 | py | Python | api/pub/sensor/ds18b20.py | rtaft/pi-sensor-dashboard | e7f711e8ecd9e4c32976583c32dbc716b165d56a | [
"MIT"
] | null | null | null | api/pub/sensor/ds18b20.py | rtaft/pi-sensor-dashboard | e7f711e8ecd9e4c32976583c32dbc716b165d56a | [
"MIT"
] | null | null | null | api/pub/sensor/ds18b20.py | rtaft/pi-sensor-dashboard | e7f711e8ecd9e4c32976583c32dbc716b165d56a | [
"MIT"
] | null | null | null | import flask
from flask import request
import flask_restful as restful
from marshmallow import Schema, fields, validate
from api.helpers import success, created
from api.exceptions import NotFound
from sensors.ds18b20 import lookup
| 26.944444 | 60 | 0.756701 |
c4f823b53a35ff0fc520cfeb34cd4f27b3264abb | 2,877 | py | Python | accenv/lib/python3.4/site-packages/IPython/html/notebook/handlers.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
] | null | null | null | accenv/lib/python3.4/site-packages/IPython/html/notebook/handlers.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
] | null | null | null | accenv/lib/python3.4/site-packages/IPython/html/notebook/handlers.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
] | null | null | null | """Tornado handlers for the live notebook view.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from tornado import web
HTTPError = web.HTTPError
from ..base.handlers import IPythonHandler
from ..utils import url_path_join
#-----------------------------------------------------------------------------
# Handlers
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
_notebook_id_regex = r"(?P<notebook_id>\w+-\w+-\w+-\w+-\w+)"
_notebook_name_regex = r"(?P<notebook_name>.+\.ipynb)"
default_handlers = [
(r"/new", NewHandler),
(r"/%s" % _notebook_id_regex, NamedNotebookHandler),
(r"/%s" % _notebook_name_regex, NotebookRedirectHandler),
(r"/%s/copy" % _notebook_id_regex, NotebookCopyHandler),
]
| 31.271739 | 88 | 0.534237 |
c4f84b78cd23bfc6c4e94d2d3b58c3a8e6dd5d94 | 34,172 | py | Python | deepchem/models/tensorgraph/tests/test_layers_eager.py | avimanyu786/deepchem | c5a7c6fff0597b5d896c865efdacec4fa75b00c6 | [
"MIT"
] | null | null | null | deepchem/models/tensorgraph/tests/test_layers_eager.py | avimanyu786/deepchem | c5a7c6fff0597b5d896c865efdacec4fa75b00c6 | [
"MIT"
] | null | null | null | deepchem/models/tensorgraph/tests/test_layers_eager.py | avimanyu786/deepchem | c5a7c6fff0597b5d896c865efdacec4fa75b00c6 | [
"MIT"
] | 1 | 2019-05-19T14:22:32.000Z | 2019-05-19T14:22:32.000Z | import deepchem as dc
import numpy as np
import tensorflow as tf
import deepchem.models.tensorgraph.layers as layers
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
| 37.264995 | 94 | 0.654747 |
c4f8a7e27a6b1a8b93095262140e88ebc073c0f4 | 790 | py | Python | py/py_0049_prime_permutations.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0049_prime_permutations.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0049_prime_permutations.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 49: Prime permutations
# https://projecteuler.net/problem=49
#
# The arithmetic sequence, 1487, 4817, 8147, in which each of the terms
# increases by 3330, is unusual in two ways: (i) each of the three terms are
# prime, and, (ii) each of the 4-digit numbers are permutations of one
# another. There are no arithmetic sequences made up of three 1-, 2-, or
# 3-digit primes, exhibiting this property, but there is one other 4-digit
# increasing sequence. What 12-digit number do you form by concatenating the
# three terms in this sequence?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 49
timed.caller(dummy, n, i, prob_id)
| 30.384615 | 77 | 0.711392 |
c4f93be850b5b4fb3f0bf11c18b271c4dd267dcc | 8,879 | py | Python | rqmonitor/cli.py | trodery/rqmonitor | 65831337591afe6887dec2dbb37a28d84f881f35 | [
"Apache-2.0"
] | null | null | null | rqmonitor/cli.py | trodery/rqmonitor | 65831337591afe6887dec2dbb37a28d84f881f35 | [
"Apache-2.0"
] | null | null | null | rqmonitor/cli.py | trodery/rqmonitor | 65831337591afe6887dec2dbb37a28d84f881f35 | [
"Apache-2.0"
] | null | null | null | """
This reference script has been taken from rq-dashboard with some modifications
"""
import importlib
import logging
import os
import sys
from urllib.parse import quote as urlquote, urlunparse
from redis.connection import (URL_QUERY_ARGUMENT_PARSERS,
UnixDomainSocketConnection,
SSLConnection)
from urllib.parse import urlparse, parse_qs, unquote
import click
from flask import Flask, Response, request
from rqmonitor.defaults import RQ_MONITOR_REDIS_URL, RQ_MONITOR_REFRESH_INTERVAL
from rqmonitor.version import VERSION
from rqmonitor.bp import monitor_blueprint
logger = logging.getLogger("werkzeug")
def add_basic_auth(blueprint, username, password, realm="RQ Monitor"):
"""Add HTTP Basic Auth to a blueprint.
Note this is only for casual use!
"""
def create_app_with_blueprint(config=None, username=None, password=None,
url_prefix='', blueprint=monitor_blueprint):
"""Return Flask app with default configuration and registered blueprint."""
app = Flask(__name__)
# Override with any settings in config file, if given.
if config:
app.config.from_object(importlib.import_module(config))
# Override from a configuration file in the env variable, if present.
if "RQ_MONITOR_SETTINGS" in os.environ:
app.config.from_envvar("RQ_MONITOR_SETTINGS")
# Optionally add basic auth to blueprint and register with app.
if username:
add_basic_auth(blueprint, username, password)
app.register_blueprint(blueprint, url_prefix=url_prefix)
return app
def check_url(url, decode_components=False):
"""
Taken from redis-py for basic check before passing URL to redis-py
Kept here to show error before launching app
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates
a SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, ``username`` and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied
are parsed as float values. The arguments ``socket_keepalive`` and
``retry_on_timeout`` are parsed to boolean values that accept
True/False, Yes/No values to indicate state. Invalid types cause a
``UserWarning`` to be raised. In the case of conflicting arguments,
querystring arguments always win.
"""
url = urlparse(url)
url_options = {}
for name, value in (parse_qs(url.query)).items():
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
logger.warning(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
username = unquote(url.username) if url.username else None
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
username = url.username or None
password = url.password or None
path = url.path
hostname = url.hostname
# We only support redis://, rediss:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'username': username,
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
elif url.scheme in ('redis', 'rediss'):
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'username': username,
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
else:
valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://'))
raise ValueError('Redis URL must specify one of the following '
'schemes (%s)' % valid_schemes)
return True
if __name__ == '__main__':
main() | 32.52381 | 94 | 0.64692 |
c4fab5fadfd556ed33d5c8cd6a4689c230aa1b08 | 54 | py | Python | src/client/__init__.py | kyehyukahn/scp-prototype | 4e92b47ab82068a154c407c22e8c396196a31942 | [
"Apache-2.0"
] | 1 | 2018-04-10T11:00:59.000Z | 2018-04-10T11:00:59.000Z | src/client/__init__.py | kyehyukahn/scp-prototype | 4e92b47ab82068a154c407c22e8c396196a31942 | [
"Apache-2.0"
] | null | null | null | src/client/__init__.py | kyehyukahn/scp-prototype | 4e92b47ab82068a154c407c22e8c396196a31942 | [
"Apache-2.0"
] | null | null | null | from .client import send_message, MessageInfo # noqa
| 27 | 53 | 0.796296 |
c4fb08b069b3dd12f5ffa2176644b9d7ad76a516 | 4,376 | py | Python | src/comparing_scoring_seasons.py | davgav123/Mining_NBA | e302246c26d373d4b5e0defed07d70a5d09242d9 | [
"MIT"
] | null | null | null | src/comparing_scoring_seasons.py | davgav123/Mining_NBA | e302246c26d373d4b5e0defed07d70a5d09242d9 | [
"MIT"
] | null | null | null | src/comparing_scoring_seasons.py | davgav123/Mining_NBA | e302246c26d373d4b5e0defed07d70a5d09242d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from pathlib import Path
import pandas as pd
from numpy import around
if __name__ == "__main__":
# Harden's PPG is from 2018-19 season
# Bryant's PPG is from 2005-06 season
# Jordan's PPG is from 1986-87 season
per_game_df = pd.read_csv(Path('../data/compare_players_per_game.csv'))
per_48_df = pd.read_csv(Path('../data/compare_players_per_48.csv'))
per_100_df = pd.read_csv(Path('../data/compare_players_per_100_poss.csv'))
avg_TS_for_2018_19_season = 0.560 # source: https://www.basketball-reference.com/leagues/NBA_2019.html#all_misc_stats
avg_TS_for_2005_06_season = 0.536 # source: https://www.basketball-reference.com/leagues/NBA_2006.html#all_misc_stats
avg_TS_for_1986_87_season = 0.538 # source: https://www.basketball-reference.com/leagues/NBA_1987.html#all_misc_stats
# per game
per_game_harden = per_game_df[per_game_df['Player'] == 'James Harden']
per_game_bryant = per_game_df[per_game_df['Player'] == 'Kobe Bryant']
per_game_jordan = per_game_df[per_game_df['Player'] == 'Michael Jordan']
harden_ppg = per_game_harden['PTS'].values[0]
bryant_ppg = per_game_bryant['PTS'].values[0]
jordan_ppg = per_game_jordan['PTS'].values[0]
# shooting stats
harden_efg = per_game_harden['eFG%'].values[0]
bryant_efg = per_game_bryant['eFG%'].values[0]
jordan_efg = per_game_jordan['eFG%'].values[0]
harden_ts = per_game_harden['TS%'].values[0]
bryant_ts = per_game_bryant['TS%'].values[0]
jordan_ts = per_game_jordan['TS%'].values[0]
# number of games
harden_g = per_game_harden['G'].values[0]
bryant_g = per_game_bryant['G'].values[0]
jordan_g = per_game_jordan['G'].values[0]
# minutes per game
harden_mpg = per_game_harden['MP'].values[0]
bryant_mpg = per_game_bryant['MP'].values[0]
jordan_mpg = per_game_jordan['MP'].values[0]
# per 48
per_48_harden = per_48_df[per_48_df['Player'] == 'James Harden']
per_48_bryant = per_48_df[per_48_df['Player'] == 'Kobe Bryant']
per_48_jordan = per_48_df[per_48_df['Player'] == 'Michael Jordan']
harden_pp48 = per_48_harden['PTS'].values[0]
bryant_pp48 = per_48_bryant['PTS'].values[0]
jordan_pp48 = per_48_jordan['PTS'].values[0]
# per 100
per_100_harden = per_100_df[per_100_df['Player'] == 'James Harden']
per_100_bryant = per_100_df[per_100_df['Player'] == 'Kobe Bryant']
per_100_jordan = per_100_df[per_100_df['Player'] == 'Michael Jordan']
harden_pp100 = per_100_harden['PTS'].values[0]
bryant_pp100 = per_100_bryant['PTS'].values[0]
jordan_pp100 = per_100_jordan['PTS'].values[0]
print('James Harden in 2018-19: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(harden_g, harden_ppg, harden_efg, harden_ts, harden_mpg))
print('He was {} more efficient than the average player in was that season'
.format(around(harden_ts - avg_TS_for_2018_19_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(harden_pp48, harden_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('Kobe Bryant in 2005-06: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(bryant_g, bryant_ppg, bryant_efg, bryant_ts, bryant_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(bryant_ts - avg_TS_for_2005_06_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(bryant_pp48, bryant_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('Michael Jordan in 1986-87: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(jordan_g, jordan_ppg, jordan_efg, jordan_ts, jordan_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(jordan_ts - avg_TS_for_1986_87_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(jordan_pp48, jordan_pp100))
| 46.553191 | 121 | 0.64511 |
c4fb53e938362d1e973ac1420b0eecef8f5d6f2b | 5,939 | py | Python | modules/citymap.py | sebastianbernasek/dataincubator | de29e86c917f107650d03d3331109cb992a6881c | [
"FTL",
"CC-BY-4.0"
] | null | null | null | modules/citymap.py | sebastianbernasek/dataincubator | de29e86c917f107650d03d3331109cb992a6881c | [
"FTL",
"CC-BY-4.0"
] | null | null | null | modules/citymap.py | sebastianbernasek/dataincubator | de29e86c917f107650d03d3331109cb992a6881c | [
"FTL",
"CC-BY-4.0"
] | null | null | null | from os.path import join
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
from matplotlib.dates import YearLocator, DateFormatter
from matplotlib.animation import FuncAnimation
def initialize_city_limits(self, color='w', edgecolor='k', lw=.5, **kwargs):
""" Add city limits to axes. """
self.citylimits.plot(ax=self.ax, color=color, edgecolor=edgecolor, lw=lw, **kwargs)
def initialize_zip_codes(self, **kwargs):
""" Add zipcodes to axes. """
# build shader
shader = self.build_shader(0)
# shade zipcode polygons
shader.plot(column='VALUE', cmap=plt.cm.Greys, vmin=0, vmax=0, ax=self.ax, **kwargs)
self.shade(0)
# set date marker
if self.timeline:
self.tax.plot(self.timeseries.columns[0], .5, '.k', markersize=10)
| 30.45641 | 92 | 0.59606 |
c4fbbf35cb97942fd780038b58bdfd3ad398e637 | 248 | py | Python | w1data/metadata.py | swork/w1-datalogger | 26191d57ff1c05e5c6e9de90870c5c63916f9a8c | [
"MIT"
] | null | null | null | w1data/metadata.py | swork/w1-datalogger | 26191d57ff1c05e5c6e9de90870c5c63916f9a8c | [
"MIT"
] | null | null | null | w1data/metadata.py | swork/w1-datalogger | 26191d57ff1c05e5c6e9de90870c5c63916f9a8c | [
"MIT"
] | null | null | null | import logging, sys
logger = logging.getLogger(__name__)
| 31 | 76 | 0.741935 |
c4fbcb6844c497c33e0f139f7c92bb18735dd23a | 837 | py | Python | easyvista/setup.py | GreyNoise-Intelligence/insightconnect-plugins | 2ba3121d42fd96e1267bb095bc76b962678c1f56 | [
"MIT"
] | null | null | null | easyvista/setup.py | GreyNoise-Intelligence/insightconnect-plugins | 2ba3121d42fd96e1267bb095bc76b962678c1f56 | [
"MIT"
] | null | null | null | easyvista/setup.py | GreyNoise-Intelligence/insightconnect-plugins | 2ba3121d42fd96e1267bb095bc76b962678c1f56 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name="easyvista-rapid7-plugin",
version="1.0.0",
description="EasyVista Service Manager platform supports even the most complex requirements, while bringing a new level of simplicity, agility, and mobility required to make cloud based IT Service Management (ITSM) software easy to use and easy to deliver. Using the EasyVista plugin for Rapid7 InsightConnect, users can manage the creation, update, search and closure of incident, service request, problem or event tickets",
author="rapid7",
author_email="",
url="",
packages=find_packages(),
install_requires=['insightconnect-plugin-runtime'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/icon_easyvista']
)
| 55.8 | 431 | 0.741935 |