hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22f9996e35b6cbbaeea6e8c3929b7498dd603017 | 4,471 | py | Python | ppq/utils/round.py | xiguadong/ppq | 6c71adb3c2a8ca95967f101724b5e4b3e6f761ff | [
"Apache-2.0"
] | null | null | null | ppq/utils/round.py | xiguadong/ppq | 6c71adb3c2a8ca95967f101724b5e4b3e6f761ff | [
"Apache-2.0"
] | null | null | null | ppq/utils/round.py | xiguadong/ppq | 6c71adb3c2a8ca95967f101724b5e4b3e6f761ff | [
"Apache-2.0"
] | null | null | null | from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal
from math import ceil, floor, log2
from typing import Union
import torch
from ppq.core import RoundingPolicy
def ppq_numerical_round(value: float,
policy: RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> int:
"""
reference: https://en.wikipedia.org/wiki/Rounding
decimal defination:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Args:
value (float): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
int: [description]
"""
assert isinstance(value, float), 'numerical round only takes effect on float number.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_EVEN))
elif policy == RoundingPolicy.ROUND_HALF_UP:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_DOWN)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_UP)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
if value > 0: return floor(value + 0.5)
else: return ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_UP:
return ceil(value)
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_tensor_round(value: torch.Tensor,
policy:RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> torch.Tensor:
"""
reference: https://en.wikipedia.org/wiki/Rounding
Args:
value (torch.Tensor): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
torch.Tensor: [description]
"""
assert isinstance(value, torch.Tensor), 'tensor round only takes effect on torch tensor.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
# default rounding policy of torch is ROUND_TO_NEAR_EVEN
# try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round())
# However it may generate unexpected results due to version difference.
return value.round()
elif policy == RoundingPolicy.ROUND_UP:
return value.ceil()
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return torch.sign(value) * torch.ceil(value.abs() - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return torch.sign(value) * torch.floor(value.abs() + 0.5)
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
return torch.ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_UP:
return torch.floor(value + 0.5)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
raise NotImplementedError(f'Torch Tensor can not use this rounding policy({policy}) try ROUND_HALF_EVEN instead.')
else:
raise ValueError('Unexpected rounding policy found.')
| 45.622449 | 138 | 0.698054 |
22f9fe832c0a98e82946d0744a46553bfba443ca | 11,944 | py | Python | python/repair/train.py | maropu/scavenger | 03a935968f4aa507d4d98c8ca528195b770757d9 | [
"Apache-2.0"
] | null | null | null | python/repair/train.py | maropu/scavenger | 03a935968f4aa507d4d98c8ca528195b770757d9 | [
"Apache-2.0"
] | 2 | 2019-12-22T13:29:07.000Z | 2020-01-07T11:55:41.000Z | python/repair/train.py | maropu/scavenger | 03a935968f4aa507d4d98c8ca528195b770757d9 | [
"Apache-2.0"
] | 1 | 2020-10-26T20:07:28.000Z | 2020-10-26T20:07:28.000Z | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import time
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
from repair.utils import elapsed_time, get_option_value, setup_logger
_logger = setup_logger()
# List of internal configurations
_option = namedtuple('_option', 'key default_value type_class validator err_msg')
_opt_boosting_type = \
_option('model.lgb.boosting_type', 'gbdt', str,
lambda v: v in ['gbdt', 'dart', 'goss', 'rf'], "`{}` should be in ['gbdt', 'dart', 'goss', 'rf']")
_opt_class_weight = \
_option('model.lgb.class_weight', 'balanced', str, None, None)
_opt_learning_rate = \
_option('model.lgb.learning_rate', 0.01, float,
lambda v: v > 0.0, '`{}` should be positive')
_opt_max_depth = \
_option('model.lgb.max_depth', 7, int, None, None)
_opt_max_bin = \
_option('model.lgb.max_bin', 255, int, None, None)
_opt_reg_alpha = \
_option('model.lgb.reg_alpha', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_min_split_gain = \
_option('model.lgb.min_split_gain', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_n_estimators = \
_option('model.lgb.n_estimators', 300, int,
lambda v: v > 0, '`{}` should be positive')
_opt_importance_type = \
_option('model.lgb.importance_type', 'gain', str,
lambda v: v in ['split', 'gain'], "`{}` should be in ['split', 'gain']")
_opt_n_splits = \
_option('model.cv.n_splits', 3, int,
lambda v: v >= 3, '`{}` should be greater than 2')
_opt_timeout = \
_option('model.hp.timeout', 0, int, None, None)
_opt_max_evals = \
_option('model.hp.max_evals', 100000000, int,
lambda v: v > 0, '`{}` should be positive')
_opt_no_progress_loss = \
_option('model.hp.no_progress_loss', 50, int,
lambda v: v > 0, '`{}` should be positive')
train_option_keys = [
_opt_boosting_type.key,
_opt_class_weight.key,
_opt_learning_rate.key,
_opt_max_depth.key,
_opt_max_bin.key,
_opt_reg_alpha.key,
_opt_min_split_gain.key,
_opt_n_estimators.key,
_opt_importance_type.key,
_opt_n_splits.key,
_opt_timeout.key,
_opt_max_evals.key,
_opt_no_progress_loss.key
]
| 40.62585 | 120 | 0.654638 |
22fa19437d01af6a56a8a1b30127d97248a1bdcd | 519 | py | Python | howl/roomsensor/urls.py | volzotan/django-howl | 3b11c530da95d152844934da09592619b3d4497f | [
"MIT"
] | null | null | null | howl/roomsensor/urls.py | volzotan/django-howl | 3b11c530da95d152844934da09592619b3d4497f | [
"MIT"
] | null | null | null | howl/roomsensor/urls.py | volzotan/django-howl | 3b11c530da95d152844934da09592619b3d4497f | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, url
from roomsensor import views
urlpatterns = patterns('',
url(r'^$', views.index, name='roomsensor'),
# ex: /roomsensor/name/
url(r'^(?P<roomsensor_name>\w+)/$', views.display, name='roomsensor_display'),
url(r'^(?P<roomsensor_name>\w+)/read/$', views.read, name='roomsensor_read'),
# JSON data for graph creation
url(r'^(?P<roomsensor_name>\w+)/rawdata/(?P<datapoints>\d+)/(?P<compression_factor>\d+)/$', views.rawdata, name='roomsensor_rawdata'),
) | 37.071429 | 138 | 0.672447 |
22fadcf738c9cad6b1e0cd6d9126f92326318681 | 1,088 | py | Python | main.py | vu-telab/DAKOTA-moga-post-processing-tool | 2f41561bd8ca44c693e5994f7f68a1edc1a82361 | [
"MIT"
] | null | null | null | main.py | vu-telab/DAKOTA-moga-post-processing-tool | 2f41561bd8ca44c693e5994f7f68a1edc1a82361 | [
"MIT"
] | 4 | 2017-02-06T18:20:25.000Z | 2017-02-06T20:50:34.000Z | main.py | caseynbrock/DAKOTA-moga-post-processing-tool | 2f41561bd8ca44c693e5994f7f68a1edc1a82361 | [
"MIT"
] | null | null | null | # main.py
#
# currently just an example script I use to test my optimization_results module
#
# WARNING: design point numbers 0-indexed in pandas database, but
# eval_id column is the original 1-indexed value given by DAKOTA
import optimization_results as optr
if __name__=='__main__':
main()
| 35.096774 | 97 | 0.596507 |
22fbfb719886ba73384d6d380084bceb6dabf90b | 2,127 | py | Python | Topaz/Core.py | Rhodolite/Gem.py.UnitTest | eaa8b6855bcfbb12f67e7eb146928814543ef9d4 | [
"MIT"
] | null | null | null | Topaz/Core.py | Rhodolite/Gem.py.UnitTest | eaa8b6855bcfbb12f67e7eb146928814543ef9d4 | [
"MIT"
] | null | null | null | Topaz/Core.py | Rhodolite/Gem.py.UnitTest | eaa8b6855bcfbb12f67e7eb146928814543ef9d4 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2017 Joy Diamond. All rights reserved.
#
| 35.45 | 97 | 0.661965 |
22fc97fb3dafaa3d0c68a5549bbe8a39af3d15d4 | 7,031 | py | Python | app.py | kosovojs/wikibooster | 70a9d9d7bf41be9fa5e58d40fba216d9b6df008d | [
"MIT"
] | null | null | null | app.py | kosovojs/wikibooster | 70a9d9d7bf41be9fa5e58d40fba216d9b6df008d | [
"MIT"
] | 17 | 2019-07-08T15:32:18.000Z | 2021-01-03T10:30:55.000Z | app.py | kosovojs/wikibooster | 70a9d9d7bf41be9fa5e58d40fba216d9b6df008d | [
"MIT"
] | 1 | 2019-08-28T21:23:48.000Z | 2019-08-28T21:23:48.000Z | import flask
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS, cross_origin
from flask import render_template
import mwoauth
import requests_oauthlib
import os
import yaml
import mwapi
from tasks.main import Tasks
from save import Save
from db import DB
from typo.fix import TypoFix
app = Flask(__name__, static_folder="./frontend/build/static", template_folder="./frontend/build")
#app = Flask(__name__)
CORS(app)
user_agent = 'WikiBooster'
__dir__ = os.path.dirname(__file__)
configFile = open(os.path.join(__dir__, 'config.yaml'))
app.config.update(yaml.safe_load(configFile))
#http://127.0.0.1:5000/task/lvwiki/1/Helna Mrnija
#
if __name__ == '__main__':
app.run(debug=True) | 29.542017 | 156 | 0.729768 |
22fcb38b78558c9add6900dca954fd92ecf359b7 | 1,483 | py | Python | pre_embed.py | shelleyyyyu/few_shot | 0fe54444e820fe3201927e6363682913b6d61028 | [
"Apache-2.0"
] | 253 | 2018-08-29T18:59:00.000Z | 2022-03-15T04:53:47.000Z | pre_embed.py | shelleyyyyu/few_shot | 0fe54444e820fe3201927e6363682913b6d61028 | [
"Apache-2.0"
] | 18 | 2018-10-24T09:49:44.000Z | 2022-03-31T14:39:37.000Z | pre_embed.py | shelleyyyyu/few_shot | 0fe54444e820fe3201927e6363682913b6d61028 | [
"Apache-2.0"
] | 38 | 2018-10-17T07:43:25.000Z | 2022-03-05T12:20:33.000Z | import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
if __name__ == '__main__':
transX('Wiki') | 32.23913 | 95 | 0.55091 |
22fd80b994ca4f5c482661c444d74e7a50232ab0 | 7,673 | py | Python | botc/gamemodes/troublebrewing/FortuneTeller.py | Xinverse/BOTC-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2020-06-21T17:20:17.000Z | 2020-06-21T17:20:17.000Z | botc/gamemodes/troublebrewing/FortuneTeller.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2020-07-07T03:47:44.000Z | 2020-07-07T03:47:44.000Z | botc/gamemodes/troublebrewing/FortuneTeller.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2022-02-18T00:42:19.000Z | 2022-02-18T00:42:19.000Z | """Contains the Fortune Teller Character class"""
import json
import random
import discord
import datetime
from botc import Action, ActionTypes, Townsfolk, Character, Storyteller, RedHerring, \
RecurringAction, Category, StatusList
from botc.BOTCUtils import GameLogic
from ._utils import TroubleBrewing, TBRole
import globvars
with open('botc/gamemodes/troublebrewing/character_text.json') as json_file:
character_text = json.load(json_file)[TBRole.fortuneteller.value.lower()]
with open('botutils/bot_text.json') as json_file:
bot_text = json.load(json_file)
butterfly = bot_text["esthetics"]["butterfly"]
with open('botc/game_text.json') as json_file:
strings = json.load(json_file)
fortune_teller_nightly = strings["gameplay"]["fortune_teller_nightly"]
copyrights_str = strings["misc"]["copyrights"]
yes = strings["gameplay"]["yes"]
no = strings["gameplay"]["no"]
good_link = strings["images"]["good"]
evil_link = strings["images"]["evil"]
| 41.032086 | 115 | 0.644598 |
22fdcdf03da29d4d6e3f5e50e7e03925c3c15cdd | 10,849 | py | Python | src/schmetterling/build/tests/test_maven.py | bjuvensjo/schmetterling | 0cdbfe4f379a081d9d4711dd21866b90983365cf | [
"Apache-2.0"
] | null | null | null | src/schmetterling/build/tests/test_maven.py | bjuvensjo/schmetterling | 0cdbfe4f379a081d9d4711dd21866b90983365cf | [
"Apache-2.0"
] | null | null | null | src/schmetterling/build/tests/test_maven.py | bjuvensjo/schmetterling | 0cdbfe4f379a081d9d4711dd21866b90983365cf | [
"Apache-2.0"
] | null | null | null | from unittest.mock import call, MagicMock, patch
from schmetterling.build.maven import build_multi_modules
from schmetterling.build.maven import create_build_result
from schmetterling.build.maven import create_command
from schmetterling.build.maven import create_multi_modules
from schmetterling.build.maven import create_state
from schmetterling.build.maven import get_maven_infos
from schmetterling.build.maven import get_maven_repos
from schmetterling.build.maven import get_multi_modules
from schmetterling.build.state import BuildState, Build
from schmetterling.setup.state import Repo
| 33.381538 | 107 | 0.461702 |
22fe0847296c50b27120f9c55084e9eba84b2a5a | 1,753 | py | Python | Copados y Clases/Mastermind_DEBUG.py | FdelMazo/7540rw-Algo1 | 8900604873195df9e902ead6bcb67723a8b654c8 | [
"MIT"
] | 1 | 2021-11-20T18:41:34.000Z | 2021-11-20T18:41:34.000Z | Copados y Clases/Mastermind_DEBUG.py | FdelMazo/7540rw-Algo1 | 8900604873195df9e902ead6bcb67723a8b654c8 | [
"MIT"
] | null | null | null | Copados y Clases/Mastermind_DEBUG.py | FdelMazo/7540rw-Algo1 | 8900604873195df9e902ead6bcb67723a8b654c8 | [
"MIT"
] | null | null | null | #Sacar las lineas con DEBUG para que el juego funcione
import random
DIGITOS = 4
def mastermind():
"""Funcion principal del juego Mastermind"""
print("Bienvenido al Mastermind!")
print("Instrucciones: Tenes que adivinar un codigo de {} digitos distintos. Tu cantidad de aciertos son los numeros que estan correctamente posicionados, tu cantidad de coincidencias son los numeros bien elegidos pero mal posicionados. Suerte!".format(DIGITOS))
codigo = elegir_codigo()
intentos = 1
propuesta = input("Que codigo propones? (o pone 'Me retiro') ")
retirarse = "Me retiro"
while propuesta != codigo and propuesta != retirarse:
intentos+=1
aciertos, coincidencias = analizar_propuesta(propuesta, codigo)
print ("Tu propuesta ({}) tiene {} aciertos y {} coincidencias.".format(propuesta,aciertos,coincidencias))
propuesta = input("Propone otro codigo: ")
if propuesta == retirarse:
print ("El codigo era: {}".format(codigo))
else:
print ("Ganaste! Ganaste en {} intentos".format(intentos))
def elegir_codigo():
"""Elige un codigo de DIGITOS digitos al azar"""
digitos= ("0","1","2","3","4","5","6","7","8","9")
codigo = ""
for i in range(DIGITOS):
candidato = random.choice(digitos)
print("[DEBUG] candidato:", candidato)
while candidato in codigo:
candidato = random.choice(digitos)
codigo = codigo + candidato
print("[DEBUG] el codigo va siendo", codigo)
return codigo
def analizar_propuesta(propuesta, codigo):
"""Determina aciertos y coincidencias"""
aciertos = 0
coincidencias = 0
for i in range(DIGITOS):
if propuesta[i] == codigo[i]:
aciertos += 1
elif propuesta[i] in codigo:
coincidencias += 1
return aciertos,coincidencias
mastermind() | 37.297872 | 263 | 0.697091 |
22feb380588bd77256d844c8ff999d4f5568fa43 | 1,499 | py | Python | setup.py | ovnicraft/runa | 4834b7467314c51c3e8e010b47a10bdfae597a5b | [
"MIT"
] | 5 | 2018-02-02T13:12:55.000Z | 2019-12-21T04:21:10.000Z | setup.py | ovnicraft/runa | 4834b7467314c51c3e8e010b47a10bdfae597a5b | [
"MIT"
] | 1 | 2017-12-18T15:49:13.000Z | 2017-12-18T15:49:13.000Z | setup.py | ovnicraft/runa | 4834b7467314c51c3e8e010b47a10bdfae597a5b | [
"MIT"
] | 1 | 2020-03-17T03:50:19.000Z | 2020-03-17T03:50:19.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0", "suds2==0.7.1"]
setup_requirements = [
# TODO(ovnicraft): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name="runa",
version="0.2.10",
description="Librera para uso de WS del Bus Gubernamental de Ecuador",
long_description=readme + "\n\n" + history,
author="Cristian Salamea",
author_email="cristian.salamea@gmail.com",
url="https://github.com/ovnicraft/runa",
packages=find_packages(include=["runa"]),
entry_points={"console_scripts": ["runa=runa.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="runa webservices ecuador bgs",
classifiers=[
"Development Status :: 3 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 28.826923 | 79 | 0.662442 |
22ffabcfd90f7354812821f61ad46409c8d4a120 | 15,233 | py | Python | PyPortal_User_Interface/code.py | RichardA1/Adafruit_Learning_System_Guides | 7d06d8a126f357a431384c3af73339cb46f44c19 | [
"MIT"
] | 1 | 2022-01-31T21:55:48.000Z | 2022-01-31T21:55:48.000Z | PyPortal_User_Interface/code.py | aadisalimani/Adafruit_Learning_System_Guides | 1b18cfcd6d426bf018545fd7b4102a8196c11c16 | [
"MIT"
] | null | null | null | PyPortal_User_Interface/code.py | aadisalimani/Adafruit_Learning_System_Guides | 1b18cfcd6d426bf018545fd7b4102a8196c11c16 | [
"MIT"
] | null | null | null | import time
import board
import displayio
import busio
from analogio import AnalogIn
import neopixel
import adafruit_adt7410
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
from adafruit_button import Button
import adafruit_touchscreen
from adafruit_pyportal import PyPortal
# ------------- Inputs and Outputs Setup ------------- #
# init. the temperature sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
adt = adafruit_adt7410.ADT7410(i2c_bus, address=0x48)
adt.high_resolution = True
# init. the light sensor
light_sensor = AnalogIn(board.LIGHT)
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=1)
WHITE = 0xffffff
RED = 0xff0000
YELLOW = 0xffff00
GREEN = 0x00ff00
BLUE = 0x0000ff
PURPLE = 0xff00ff
BLACK = 0x000000
# ---------- Sound Effects ------------- #
soundDemo = '/sounds/sound.wav'
soundBeep = '/sounds/beep.wav'
soundTab = '/sounds/tab.wav'
# ------------- Other Helper Functions------------- #
# Helper for cycling through a number set of 1 to x.
# ------------- Screen Setup ------------- #
pyportal = PyPortal()
display = board.DISPLAY
display.rotation = 270
# Backlight function
# Value between 0 and 1 where 0 is OFF, 0.5 is 50% and 1 is 100% brightness.
# Set the Backlight
set_backlight(0.3)
# Touchscreen setup
# ------Rotate 270:
screen_width = 240
screen_height = 320
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_YD, board.TOUCH_YU,
board.TOUCH_XR, board.TOUCH_XL,
calibration=((5200, 59000),
(5800, 57000)),
size=(screen_width, screen_height))
# ------------- Display Groups ------------- #
splash = displayio.Group(max_size=15) # The Main Display Group
view1 = displayio.Group(max_size=15) # Group for View 1 objects
view2 = displayio.Group(max_size=15) # Group for View 2 objects
view3 = displayio.Group(max_size=15) # Group for View 3 objects
# ------------- Setup for Images ------------- #
# Display an image until the loop starts
pyportal.set_background('/images/loading.bmp')
bg_group = displayio.Group(max_size=1)
splash.append(bg_group)
icon_group = displayio.Group(max_size=1)
icon_group.x = 180
icon_group.y = 120
icon_group.scale = 1
view2.append(icon_group)
# This will handel switching Images and Icons
def set_image(group, filename):
"""Set the image file for a given goup for display.
This is most useful for Icons or image slideshows.
:param group: The chosen group
:param filename: The filename of the chosen image
"""
print("Set image to ", filename)
if group:
group.pop()
if not filename:
return # we're done, no icon desired
image_file = open(filename, "rb")
image = displayio.OnDiskBitmap(image_file)
try:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter())
except TypeError:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter(),
position=(0, 0))
group.append(image_sprite)
set_image(bg_group, "/images/BGimage.bmp")
# ---------- Text Boxes ------------- #
# Set the font and preload letters
font = bitmap_font.load_font("/fonts/Helvetica-Bold-16.bdf")
font.load_glyphs(b'abcdefghjiklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890- ()')
# Default Label styling:
TABS_X = 5
TABS_Y = 50
# Text Label Objects
feed1_label = Label(font, text="Text Wondow 1", color=0xE39300, max_glyphs=200)
feed1_label.x = TABS_X
feed1_label.y = TABS_Y
view1.append(feed1_label)
feed2_label = Label(font, text="Text Wondow 2", color=0xFFFFFF, max_glyphs=200)
feed2_label.x = TABS_X
feed2_label.y = TABS_Y
view2.append(feed2_label)
sensors_label = Label(font, text="Data View", color=0x03AD31, max_glyphs=200)
sensors_label.x = TABS_X
sensors_label.y = TABS_Y
view3.append(sensors_label)
sensor_data = Label(font, text="Data View", color=0x03AD31, max_glyphs=100)
sensor_data.x = TABS_X+15
sensor_data.y = 170
view3.append(sensor_data)
text_hight = Label(font, text="M", color=0x03AD31, max_glyphs=10)
# return a reformatted string with word wrapping using PyPortal.wrap_nicely
# ---------- Display Buttons ------------- #
# Default button styling:
BUTTON_HEIGHT = 40
BUTTON_WIDTH = 80
# We want three buttons across the top of the screen
TAPS_HEIGHT = 40
TAPS_WIDTH = int(screen_width/3)
TAPS_Y = 0
# We want two big buttons at the bottom of the screen
BIG_BUTTON_HEIGHT = int(screen_height/3.2)
BIG_BUTTON_WIDTH = int(screen_width/2)
BIG_BUTTON_Y = int(screen_height-BIG_BUTTON_HEIGHT)
# This group will make it easy for us to read a button press later.
buttons = []
# Main User Interface Buttons
button_view1 = Button(x=0, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View1", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view1) # adding this button to the buttons group
button_view2 = Button(x=TAPS_WIDTH, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View2", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view2) # adding this button to the buttons group
button_view3 = Button(x=TAPS_WIDTH*2, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View3", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view3) # adding this button to the buttons group
button_switch = Button(x=0, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Switch", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_switch) # adding this button to the buttons group
button_2 = Button(x=BIG_BUTTON_WIDTH, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Button", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_2) # adding this button to the buttons group
# Add all of the main buttons to the spalsh Group
for b in buttons:
splash.append(b.group)
# Make a button to change the icon image on view2
button_icon = Button(x=150, y=60,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Icon", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_icon) # adding this button to the buttons group
# Add this button to view2 Group
view2.append(button_icon.group)
# Make a button to play a sound on view2
button_sound = Button(x=150, y=170,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Sound", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_sound) # adding this button to the buttons group
# Add this button to view2 Group
view3.append(button_sound.group)
#pylint: disable=global-statement
#pylint: enable=global-statement
# Set veriables and startup states
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
hideLayer(view2)
hideLayer(view3)
view_live = 1
icon = 1
icon_name = "Ruby"
button_mode = 1
switch_state = 0
button_switch.label = "OFF"
button_switch.selected = True
# Update out Labels with display text.
text_box(feed1_label, TABS_Y,
"The text on this screen is wrapped so that all of it fits nicely into a \
text box that is ### x ###.", 30)
text_box(feed1_label, TABS_Y,
'The text on this screen is wrapped so that all of it fits nicely into a \
text box that is {} x {}.'
.format(feed1_label.bounding_box[2], feed1_label.bounding_box[3]*2), 30)
text_box(feed2_label, TABS_Y, 'Tap on the Icon button to meet a new friend.', 18)
text_box(sensors_label, TABS_Y,
"This screen can display sensor readings and tap Sound to play a WAV file.", 28)
board.DISPLAY.show(splash)
# ------------- Code Loop ------------- #
while True:
touch = ts.touch_point
light = light_sensor.value
tempC = round(adt.temperature)
tempF = tempC * 1.8 + 32
sensor_data.text = 'Touch: {}\nLight: {}\n Temp: {}F'.format(touch, light, tempF)
# ------------- Handle Button Press Detection ------------- #
if touch: # Only do this if the screen is touched
# loop with buttons using enumerate() to number each button group as i
for i, b in enumerate(buttons):
if b.contains(touch): # Test each button to see if it was pressed
print('button%d pressed' % i)
if i == 0 and view_live != 1: # only if view1 is visable
pyportal.play_file(soundTab)
switch_view(1)
while ts.touch_point:
pass
if i == 1 and view_live != 2: # only if view2 is visable
pyportal.play_file(soundTab)
switch_view(2)
while ts.touch_point:
pass
if i == 2 and view_live != 3: # only if view3 is visable
pyportal.play_file(soundTab)
switch_view(3)
while ts.touch_point:
pass
if i == 3:
pyportal.play_file(soundBeep)
# Toggle switch button type
if switch_state == 0:
switch_state = 1
b.label = "ON"
b.selected = False
pixel.fill(WHITE)
print("Swich ON")
else:
switch_state = 0
b.label = "OFF"
b.selected = True
pixel.fill(BLACK)
print("Swich OFF")
# for debounce
while ts.touch_point:
pass
print("Swich Pressed")
if i == 4:
pyportal.play_file(soundBeep)
# Momentary button type
b.selected = True
print('Button Pressed')
button_mode = numberUP(button_mode, 5)
if button_mode == 1:
pixel.fill(RED)
elif button_mode == 2:
pixel.fill(YELLOW)
elif button_mode == 3:
pixel.fill(GREEN)
elif button_mode == 4:
pixel.fill(BLUE)
elif button_mode == 5:
pixel.fill(PURPLE)
switch_state = 1
button_switch.label = "ON"
button_switch.selected = False
# for debounce
while ts.touch_point:
pass
print("Button released")
b.selected = False
if i == 5 and view_live == 2: # only if view2 is visable
pyportal.play_file(soundBeep)
b.selected = True
while ts.touch_point:
pass
print("Icon Button Pressed")
icon = numberUP(icon, 3)
if icon == 1:
icon_name = "Ruby"
elif icon == 2:
icon_name = "Gus"
elif icon == 3:
icon_name = "Billie"
b.selected = False
text_box(feed2_label, TABS_Y,
"Every time you tap the Icon button the icon image will \
change. Say hi to {}!".format(icon_name), 18)
set_image(icon_group, "/images/"+icon_name+".bmp")
if i == 6 and view_live == 3: # only if view3 is visable
b.selected = True
while ts.touch_point:
pass
print("Sound Button Pressed")
pyportal.play_file(soundDemo)
b.selected = False
| 35.508159 | 89 | 0.594433 |
fe00cf45d1015948865b349bcd27a15e243e3e66 | 7,741 | py | Python | btse_futures/order.py | yottatix/btse-python | 1c5019d0a68dff797afc70c4cc32c1950c28af4e | [
"MIT"
] | null | null | null | btse_futures/order.py | yottatix/btse-python | 1c5019d0a68dff797afc70c4cc32c1950c28af4e | [
"MIT"
] | null | null | null | btse_futures/order.py | yottatix/btse-python | 1c5019d0a68dff797afc70c4cc32c1950c28af4e | [
"MIT"
] | null | null | null | import json
from btse_futures.constants import OrderType, Side, TimeInForce
| 33.510823 | 274 | 0.612324 |
fe00feaeeab5dd9b94bc8b6fc0a0dcbedc801a5d | 2,037 | py | Python | tests/mock_responses.py | md-reddevil/blinkpy | 3c7892385352079227c6251eb88257870bea0bb3 | [
"MIT"
] | null | null | null | tests/mock_responses.py | md-reddevil/blinkpy | 3c7892385352079227c6251eb88257870bea0bb3 | [
"MIT"
] | null | null | null | tests/mock_responses.py | md-reddevil/blinkpy | 3c7892385352079227c6251eb88257870bea0bb3 | [
"MIT"
] | null | null | null | """Simple mock responses definitions."""
from blinkpy.helpers.util import BlinkURLHandler
import blinkpy.helpers.constants as const
LOGIN_RESPONSE = {
'region': {'mock': 'Test'},
'networks': {
'1234': {'name': 'test', 'onboarded': True}
},
'authtoken': {'authtoken': 'foobar123', 'message': 'auth'}
}
def mocked_session_send(*args, **kwargs):
"""Mock session."""
prepped = args[0]
url = prepped.url
header = prepped.headers
method = prepped.method
if method == 'GET':
expected_token = LOGIN_RESPONSE['authtoken']['authtoken']
if header['TOKEN_AUTH'] != expected_token:
response = {'message': 'Not Authorized', 'code': 400}
status = 400
elif url == 'use_bad_response':
response = {'foo': 'bar'}
status = 200
elif url == 'reauth':
response = {'message': 'REAUTH', 'code': 777}
status = 777
else:
response = {'test': 'foo'}
status = 200
elif method == 'POST':
if url in (const.LOGIN_URL, const.LOGIN_BACKUP_URL):
response = LOGIN_RESPONSE
status = 200
elif url == 'http://wrong.url/' or url is None:
response = {'message': 'Error', 'code': 404}
status = 404
else:
response = {'message': 'foo', 'code': 200}
status = 200
return MockResponse(response, status)
| 28.291667 | 65 | 0.573883 |
fe01b90ce53e119b08e13770e4500dbf262d962f | 2,061 | py | Python | fits_tools.py | steveschulze/Photometry | 3bc4ce457a270962321176d0e3e288b5a96cd34b | [
"BSD-2-Clause"
] | 6 | 2020-03-05T20:58:35.000Z | 2022-02-13T20:18:46.000Z | fits_tools.py | steveschulze/Photometry | 3bc4ce457a270962321176d0e3e288b5a96cd34b | [
"BSD-2-Clause"
] | 1 | 2020-03-10T00:03:46.000Z | 2020-03-10T00:03:46.000Z | fits_tools.py | steveschulze/Photometry | 3bc4ce457a270962321176d0e3e288b5a96cd34b | [
"BSD-2-Clause"
] | 1 | 2020-11-26T10:38:47.000Z | 2020-11-26T10:38:47.000Z | from astropy import coordinates as coord
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from misc import bcolors
import numpy as np
import os
def convert_hms_dd(RA, DEC):
'''
Convert HMS to DD system
'''
if (':' in RA) and (':' in DEC):
Coord_dd = coord.SkyCoord(RA, DEC, unit=(u.hour,u.degree), frame='icrs')
RA_dd = Coord_dd.ra.deg
Dec_dd = Coord_dd.dec.deg
elif (not (':' in RA) and not (':' in DEC)) and (('.' in RA) and ('.' in DEC)):
RA_dd, Dec_dd = float(RA), float(DEC)
else:
print(bcolors.FAIL + 'Coordinates have wrong format.' + bcolors.ENDC)
sys.exit()
return RA_dd, Dec_dd
def get_header(FILE, KEYWORD):
'''
Get keyword from fits file
'''
header = fits.getheader(FILE)
return header[KEYWORD]
def pix2arcsec(FITS):
'''
Get pixel scale
'''
hdu = fits.open(FITS)
if len(hdu) > 1:
header = fits.getheader(FITS, 0)
header += fits.getheader(FITS, 1)
else:
header = fits.getheader(FITS)
hdu_wcs = wcs.WCS(header)
return np.median(wcs.utils.proj_plane_pixel_scales(hdu_wcs)) * 3600
def sky2xy (FITS, RA=False, DEC=False, CAT=None):
'''
Coordinate transformation: sky -> xy
'''
if CAT == None:
if RA != False and DEC != False:
cmd=('sky2xy %s %s %s | grep -v off' %(FITS, RA, DEC))
program_call = os.popen(cmd)
xy = []
for line in program_call:
xy=np.array(line.strip().split()[-2:]).astype(float)
if len(xy) > 0:
return xy
else:
cmd =("more %s | awk '{print $1,$2}' > %s" %(CAT, CAT.replace(CAT.split('.')[-1], 'reg')))
os.system(cmd)
cmd = ("sky2xy %s @%s | grep -v off | awk '{print $5, $6}'" %(FITS, CAT.replace(CAT.split('.')[-1], 'reg')))
cat = os.popen(cmd)
xy = []
for line in cat:
xy.append(list(map(float, line.replace('\n', '').split())))
return np.array(xy)
def xy2sky (FITSFILE,X,Y):
'''
Coordinate transformation: xy -> sky
'''
program_call = os.popen('xy2sky %s %s %s' %(FITSFILE, X, Y))
sky = []
for line in program_call:
sky.append(line.strip().split()[:2])
return sky
| 21.247423 | 111 | 0.622028 |
fe028f3f35a9ad5d36908ec80630b139c6300e3c | 2,155 | py | Python | test_stbp_snn_eval.py | neurom-iot/n3ml | 39c6b50661f293d58b4b37ef613643860724bb24 | [
"MIT"
] | 11 | 2019-03-15T17:20:54.000Z | 2022-03-01T08:25:36.000Z | test_stbp_snn_eval.py | neurom-iot/n3ml | 39c6b50661f293d58b4b37ef613643860724bb24 | [
"MIT"
] | 7 | 2019-03-15T16:02:51.000Z | 2021-12-03T08:17:06.000Z | test_stbp_snn_eval.py | neurom-iot/n3ml | 39c6b50661f293d58b4b37ef613643860724bb24 | [
"MIT"
] | 9 | 2019-10-14T12:38:19.000Z | 2021-12-02T04:49:28.000Z | import argparse
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from n3ml.model import DynamicModel_STBP_SNN
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='data')
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--num_steps', default=15, type=int)
parser.add_argument('--pretrained', default='pretrained/stbp_dynamic_acc_9897.pt')
app(parser.parse_args())
| 28.355263 | 86 | 0.647332 |
fe02b43015e3d0762066c7be3eb1af3c04bff4d4 | 2,757 | py | Python | section_07_(files)/read_csv.py | govex/python-lessons | e692f48b6db008a45df0b941dee1e580f5a6c800 | [
"MIT"
] | 5 | 2019-10-25T20:47:22.000Z | 2021-12-07T06:37:22.000Z | section_07_(files)/read_csv.py | govex/python-lessons | e692f48b6db008a45df0b941dee1e580f5a6c800 | [
"MIT"
] | null | null | null | section_07_(files)/read_csv.py | govex/python-lessons | e692f48b6db008a45df0b941dee1e580f5a6c800 | [
"MIT"
] | 1 | 2021-07-20T18:56:15.000Z | 2021-07-20T18:56:15.000Z | # If you're new to file handling, be sure to check out with_open.py first!
# You'll also want to check out read_text.py before this example. This one is a bit more advanced.
with open('read_csv.csv', 'r') as states_file:
# Instead of leaving the file contents as a string, we're splitting the file into a list at every new line, and we save that list into the variable states
states = states_file.read().split("\n")
# Since this is a spreadsheet in comma separated values (CSV) format, we can think of states as a list of rows.
# But we'll need to split the columns into a list as well!
for index, state in enumerate(states):
states[index] = state.split(",")
# Now we have a nested list with all of the information!
# Our file looks like this:
# State, Population Estimate, Percent of Total population
# California, 38332521, 11.91%
# Texas, 26448193, 8.04%
# ...
# Our header row is at state[0], so we can use that to display the information in a prettier way.
for state in states[1:]: # We use [1:] so we skip the header row.
# state[0] is the first column in the row, which contains the name of the state.
print("\n---{0}---".format(state[0]))
for index, info in enumerate(state[1:]): # We use [1:] so we don't repeat the state name.
print("{0}:\t{1}".format(states[0][index+1], info))
# states is the full list of all of the states. It's a nested list. The outer list contains the rows, each inner list contains the columns in that row.
# states[0] refers to the header row of the list
# So states[0][0] would refer to "State", states[0][1] would refer to "Population Estimate", and states[0][2] would refer to "Percent of total population"
# state is one state within states. state is also a list, containing the name, population, and percentage of that particular state.
# So the first time through the loop, state[0] would refer to "California", state[1] would refer to 38332521, and state[2] would refer to 11.91%
# Since state is being create by the for loop in line 24, it gets a new value each time through.
# We're using enumerate to get the index (slicing number) of the column we're on, along with the information.
# That way we can pair the column name with the information, as shown in line 30.
# NOTE: Since we're slicing from [1:] in line 29, we need to increase the index by + 1, otherwise our headers will be off by one.
# Sample output:
# ---"California"---
# "Population Estimate": 38332521
# "Percent of Total population": "11.91%"
# ---"Texas"---
# "Population Estimate": 26448193
# "Percent of Total population": "8.04%"
# ---"New York"---
# "Population Estimate": 19651127
# "Percent of Total population": "6.19%"
| 48.368421 | 158 | 0.692057 |
fe03d9810588ad4d8d061ca21558f5e026141e64 | 2,334 | py | Python | kaggle_melanoma/schedulers.py | tinve/kaggle_melanoma | 6d2d16d62a394fd9cc2498bdf1a19ce60fe047eb | [
"MIT"
] | 8 | 2020-06-01T10:42:40.000Z | 2022-02-17T08:42:49.000Z | kaggle_melanoma/schedulers.py | tinve/kaggle_melanoma | 6d2d16d62a394fd9cc2498bdf1a19ce60fe047eb | [
"MIT"
] | null | null | null | kaggle_melanoma/schedulers.py | tinve/kaggle_melanoma | 6d2d16d62a394fd9cc2498bdf1a19ce60fe047eb | [
"MIT"
] | 2 | 2020-06-08T22:34:38.000Z | 2022-02-24T03:15:59.000Z | import math
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
func_zoo = {
"cosine_decay": lambda epoch, step, len_epoch, total_epoch: 0.5
* (math.cos(step * math.pi / (total_epoch * len_epoch)) + 1)
}
| 35.363636 | 114 | 0.641388 |
fe04e111d5ba3ee739293195694259fc26b56d25 | 30 | py | Python | data/data/__init__.py | PumpkinYing/GAT | 723a20fcd9f915123d46ef4ef03eeadb6910635a | [
"MIT"
] | null | null | null | data/data/__init__.py | PumpkinYing/GAT | 723a20fcd9f915123d46ef4ef03eeadb6910635a | [
"MIT"
] | null | null | null | data/data/__init__.py | PumpkinYing/GAT | 723a20fcd9f915123d46ef4ef03eeadb6910635a | [
"MIT"
] | null | null | null | from .dataset import load_data | 30 | 30 | 0.866667 |
fe056ef418d151035d2b9bd419b580cf756d0fd1 | 1,099 | py | Python | utils.py | federicosapienza/InboxNotionTelegramBot | 031d5e78cd352dfb692b93f3e0b421695f1dc18e | [
"MIT"
] | null | null | null | utils.py | federicosapienza/InboxNotionTelegramBot | 031d5e78cd352dfb692b93f3e0b421695f1dc18e | [
"MIT"
] | null | null | null | utils.py | federicosapienza/InboxNotionTelegramBot | 031d5e78cd352dfb692b93f3e0b421695f1dc18e | [
"MIT"
] | null | null | null | import json
import logging
logger = logging.getLogger(__name__)
with open('configuration.json') as f:
config = json.load(f)
TELEGRAM_TOKEN = config["telegram-bot-token"]
NOTION_TOKEN = config["notion-token"]
NOTION_TABLE_URL = config["inbox_table"]["table_url"]
def check_allowed_user(user_id):
"""
check if allowed user
:param user_id: telegram user id
:return True if user is valid , False otherwise
"""
valid_user = config["allowed_user_id"]
user_id = str(user_id)
return user_id == valid_user
def restrict_action(handled_action):
"""
Wrapper for creating a private bot
:param handled_action: the action to perform
"""
return check_private
| 27.475 | 107 | 0.674249 |
fe05b5a6d987129895e699ef1d4e1c22d1bf1542 | 472 | py | Python | enaml/core/byteplay/__init__.py | timgates42/enaml | 054efe6a4047d84f2fff718d656a64a2363884dc | [
"BSD-3-Clause-Clear"
] | null | null | null | enaml/core/byteplay/__init__.py | timgates42/enaml | 054efe6a4047d84f2fff718d656a64a2363884dc | [
"BSD-3-Clause-Clear"
] | null | null | null | enaml/core/byteplay/__init__.py | timgates42/enaml | 054efe6a4047d84f2fff718d656a64a2363884dc | [
"BSD-3-Clause-Clear"
] | null | null | null | #------------------------------------------------------------------------------
# Copyright (c) 2013-2018, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from ...compat import USE_WORDCODE
if USE_WORDCODE:
from .wbyteplay import *
else:
from .byteplay3 import *
| 33.714286 | 79 | 0.493644 |
fe068879b9f1513a9f5e49e88200ed64c8fa16f1 | 12,623 | py | Python | cassiopeia/datastores/riotapi/match.py | artemigkh/cassiopeia | fa78cb8f86ea21857916a707d04de6a05498033e | [
"MIT"
] | 1 | 2021-09-07T05:26:21.000Z | 2021-09-07T05:26:21.000Z | cassiopeia/datastores/riotapi/match.py | artemigkh/cassiopeia | fa78cb8f86ea21857916a707d04de6a05498033e | [
"MIT"
] | null | null | null | cassiopeia/datastores/riotapi/match.py | artemigkh/cassiopeia | fa78cb8f86ea21857916a707d04de6a05498033e | [
"MIT"
] | 1 | 2016-10-20T11:54:20.000Z | 2016-10-20T11:54:20.000Z | from time import time
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator, Union
import arrow
import datetime
import math
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Season, Queue, SEASON_IDS, QUEUE_IDS
from ...dto.match import MatchDto, MatchListDto, TimelineDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
| 45.735507 | 178 | 0.614751 |
fe073352dbed399802293822986fcaea27535a33 | 10,374 | py | Python | Lib/site-packages/hackedit/vendor/jedi/cache.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 1 | 2017-08-19T08:13:28.000Z | 2017-08-19T08:13:28.000Z | node_modules/nuclide/pkg/nuclide-python-rpc/VendorLib/jedi/cache.py | kevingatera/kgatewebapp | f0dbc50b7af2736e1f6c6f96f0a26fc7ff69db20 | [
"Unlicense"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/hackedit/vendor/jedi/cache.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | """
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- module caching (`load_parser` and `save_parser`), which uses pickle and is
really important to assure low load times of modules like ``numpy``.
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
import os
import sys
import json
import hashlib
import gc
import inspect
import shutil
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from jedi import settings
from jedi import common
from jedi import debug
_time_caches = {}
# for fast_parser, should not be deleted
parser_cache = {}
def clear_time_caches(delete_all=False):
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def time_cache(time_add_setting):
"""
s
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
"""
return _temp
def underscore_memoization(func):
"""
Decorator for methods::
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
Becomes::
class A(object):
@underscore_memoization
def x(self):
return 10
A now has an attribute ``_x`` written by this decorator.
"""
name = '_' + func.__name__
return wrapper
def memoize_method(method):
"""A normal memoize function."""
return wrapper
def memoize_function(obj):
""" A normal memoize function for memoizing free functions. """
cache = obj.cache = {}
return memoizer
def _invalidate_star_import_cache_module(module, only_main=False):
""" Important if some new modules are being reparsed """
try:
t, modules = _time_caches['star_import_cache_validity'][module]
except KeyError:
pass
else:
del _time_caches['star_import_cache_validity'][module]
def invalidate_star_import_cache(path):
"""On success returns True."""
try:
parser_cache_item = parser_cache[path]
except KeyError:
pass
else:
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
def load_parser(path):
"""
Returns the module or None, if it fails.
"""
p_time = os.path.getmtime(path) if path else None
try:
parser_cache_item = parser_cache[path]
if not path or p_time <= parser_cache_item.change_time:
return parser_cache_item.parser
else:
# In case there is already a module cached and this module
# has to be reparsed, we also need to invalidate the import
# caches.
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
except KeyError:
if settings.use_filesystem_cache:
return ParserPickling.load_parser(path, p_time)
# is a singleton
ParserPickling = ParserPickling()
| 29.724928 | 88 | 0.618662 |
fe07b0d65355435bfe80638b0233d70fcb2d730a | 6,277 | py | Python | sandia_hand/ros/sandia_hand_teleop/simple_grasp/simple_grasp.py | adarshrs/Drone-Simulator-for-ROS-Kinetic | a44eef1bcaacc55539325bba663f0c8abfd7c75b | [
"MIT"
] | null | null | null | sandia_hand/ros/sandia_hand_teleop/simple_grasp/simple_grasp.py | adarshrs/Drone-Simulator-for-ROS-Kinetic | a44eef1bcaacc55539325bba663f0c8abfd7c75b | [
"MIT"
] | null | null | null | sandia_hand/ros/sandia_hand_teleop/simple_grasp/simple_grasp.py | adarshrs/Drone-Simulator-for-ROS-Kinetic | a44eef1bcaacc55539325bba663f0c8abfd7c75b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Software License Agreement (Apache License)
#
# Copyright 2013 Open Source Robotics Foundation
# Author: Morgan Quigley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import roslib; roslib.load_manifest('sandia_hand_teleop')
import rospy
import sys
from sandia_hand_msgs.srv import SimpleGraspSrv, SimpleGraspSrvResponse, SimpleGraspWithSlew, SimpleGraspWithSlewResponse
from sandia_hand_msgs.msg import SimpleGrasp
from osrf_msgs.msg import JointCommands
g_jc_pub = None
g_jc = JointCommands()
g_prev_jc_target = JointCommands()
if __name__ == '__main__':
rospy.init_node('simple_grasp')
g_jc.name = ["f0_j0", "f0_j1", "f0_j2",
"f1_j0", "f1_j1", "f1_j2",
"f2_j0", "f2_j1", "f2_j2",
"f3_j0", "f3_j1", "f3_j2"]
g_jc.position = [0] * 12
g_prev_jc_target.position = [0] * 12
g_jc_pub = rospy.Publisher('joint_commands', JointCommands, queue_size=1) # same namespace
g_jc_srv = rospy.Service('simple_grasp', SimpleGraspSrv, grasp_srv)
g_sgws_srv = rospy.Service('simple_grasp_with_slew', SimpleGraspWithSlew, grasp_slew_srv)
g_jc_sub = rospy.Subscriber('simple_grasp', SimpleGrasp, grasp_cb)
print "simple grasp service is now running."
rospy.spin()
| 37.142012 | 121 | 0.58563 |
fe07d62ba16713663bde826dc0ce1fe3d2c478fc | 1,680 | py | Python | ui/ui_prestamo_libros.py | edzzn/Manejo_Liberia | c735d35b32fc53839acfc48d4e088e69983edf16 | [
"MIT"
] | null | null | null | ui/ui_prestamo_libros.py | edzzn/Manejo_Liberia | c735d35b32fc53839acfc48d4e088e69983edf16 | [
"MIT"
] | null | null | null | ui/ui_prestamo_libros.py | edzzn/Manejo_Liberia | c735d35b32fc53839acfc48d4e088e69983edf16 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PrestamoDeLibros.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 31.111111 | 79 | 0.689881 |
fe099e17f120425cb619611e6ff40d2da802127d | 3,572 | py | Python | src/zope/app/content/__init__.py | zopefoundation/zope.app.content | d4c0276ff90bceed2156d808ab6b42b85d7b3810 | [
"ZPL-2.1"
] | null | null | null | src/zope/app/content/__init__.py | zopefoundation/zope.app.content | d4c0276ff90bceed2156d808ab6b42b85d7b3810 | [
"ZPL-2.1"
] | 1 | 2017-04-22T19:53:21.000Z | 2017-04-23T16:44:58.000Z | src/zope/app/content/__init__.py | zopefoundation/zope.app.content | d4c0276ff90bceed2156d808ab6b42b85d7b3810 | [
"ZPL-2.1"
] | 1 | 2015-04-03T07:35:01.000Z | 2015-04-03T07:35:01.000Z | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Content Type convenience lookup functions."""
from zope.interface import provider
from zope.interface import providedBy
from zope.schema.interfaces import IVocabularyFactory
from zope.app.content.interfaces import IContentType
from zope.componentvocabulary.vocabulary import UtilityVocabulary
from zope.security.proxy import removeSecurityProxy
def queryType(object, interface):
"""Returns the object's interface which implements interface.
>>> from zope.interface import Interface
>>> class IContentType(Interface):
... pass
>>> from zope.interface import Interface, implementer, directlyProvides
>>> class I(Interface):
... pass
>>> class J(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> @implementer(I)
... class C(object):
... pass
>>> @implementer(J, I)
... class D(object):
... pass
>>> obj = C()
>>> c1_ctype = queryType(obj, IContentType)
>>> c1_ctype.__name__
'I'
>>> class I1(I):
... pass
>>> class I2(I1):
... pass
>>> class I3(Interface):
... pass
>>> @implementer(I1)
... class C1(object):
... pass
>>> obj1 = C1()
>>> c1_ctype = queryType(obj1, IContentType)
>>> c1_ctype.__name__
'I'
>>> @implementer(I2)
... class C2(object):
... pass
>>> obj2 = C2()
>>> c2_ctype = queryType(obj2, IContentType)
>>> c2_ctype.__name__
'I'
>>> @implementer(I3)
... class C3(object):
... pass
>>> obj3 = C3()
If Interface doesn't provide `IContentType`, `queryType` returns ``None``.
>>> c3_ctype = queryType(obj3, IContentType)
>>> c3_ctype
>>> c3_ctype is None
True
>>> class I4(I):
... pass
>>> directlyProvides(I4, IContentType)
>>> @implementer(I4)
... class C4(object):
... pass
>>> obj4 = C4()
>>> c4_ctype = queryType(obj4, IContentType)
>>> c4_ctype.__name__
'I4'
"""
# Remove the security proxy, so that we can introspect the type of the
# object's interfaces.
naked = removeSecurityProxy(object)
object_iro = providedBy(naked).__iro__
for iface in object_iro:
if interface.providedBy(iface):
return iface
return None
def queryContentType(object):
"""Returns the interface implemented by object which implements
:class:`zope.app.content.interfaces.IContentType`.
>>> from zope.interface import Interface, implementer, directlyProvides
>>> class I(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> @implementer(I)
... class C(object):
... pass
>>> obj = C()
>>> c1_ctype = queryContentType(obj)
>>> c1_ctype.__name__
'I'
"""
return queryType(object, IContentType)
| 27.060606 | 78 | 0.606663 |
fe0a261cca22dd0888b296d89b5ce6c47723b470 | 4,569 | py | Python | python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | null | null | null | python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | null | null | null | python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""Run the SAP EWM robot configurator."""
import sys
import signal
import traceback
import logging
import time
from robcoewmrobotconfigurator.ewm_robot_sync import EWMRobotSync
from robcoewmrobotconfigurator.robotconfigcontroller import RobotConfigurationController
from robcoewmrobotconfigurator.robco_robot_api import RobCoRobotAPI
_LOGGER = logging.getLogger(__name__)
def run_robotconfigurator():
"""Run one instance of the robot configurator."""
# Register handler to control main loop
loop_control = MainLoopController()
# Create CR watcher instances
k8s_rb = RobCoRobotAPI()
k8s_rc = RobotConfigurationController()
# Create EWM robot syncer instance
robotsync = EWMRobotSync(k8s_rc)
# Register callback functions
k8s_rb.register_callback('ConfigurationController', ['ADDED'], k8s_rc.robco_robot_cb)
k8s_rc.register_callback(
'EWMRobotSync', ['ADDED', 'MODIFIED', 'REPROCESS'], robotsync.robotconfiguration_cb)
# Start
k8s_rb.run()
k8s_rc.run(reprocess=True)
_LOGGER.info('SAP EWM Robot Configurator started')
try:
# Looping while K8S watchers are running
while loop_control.shutdown is False:
# Refresh bearer token when using OAuth
if robotsync.odataconfig.authorization == robotsync.odataconfig.AUTH_OAUTH:
robotsync.odatahandler.refresh_access_token()
# Check if K8S CR handler exception occured
for k, exc in k8s_rb.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobCoRobotAPI. Raising it in main '
'thread', k)
raise exc
for k, exc in k8s_rc.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobotConfigurationController. Raising '
'it in main thread', k)
raise exc
# Sleep maximum 1.0 second
loop_control.sleep(1.0)
except KeyboardInterrupt:
_LOGGER.info('Keyboard interrupt - terminating')
except SystemExit:
_LOGGER.info('System exit - terminating')
finally:
# Stop K8S CR watchers
_LOGGER.info('Stopping K8S CR watchers')
k8s_rb.stop_watcher()
k8s_rc.stop_watcher()
# Shutdown threadpool executor
robotsync.executor.shutdown()
if __name__ == '__main__':
# Create root logger if running as main program
ROOT_LOGGER = logging.getLogger()
ROOT_LOGGER.setLevel(logging.INFO)
# Create console handler and set level to info
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
# Create formatter
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to ch
CH.setFormatter(FORMATTER)
# Add ch to logger
ROOT_LOGGER.addHandler(CH)
# Run robot master
try:
run_robotconfigurator()
except Exception: # pylint: disable=broad-except
EXC_INFO = sys.exc_info()
_LOGGER.critical(
'Unexpected error "%s" - "%s" - TRACEBACK: %s', EXC_INFO[0], EXC_INFO[1],
traceback.format_exception(*EXC_INFO))
sys.exit('Application terminated with exception: "{}" - "{}"'.format(
EXC_INFO[0], EXC_INFO[1]))
| 33.595588 | 98 | 0.661633 |
fe0a42ffd316cd292e323db6162852aaf54d8093 | 37 | py | Python | website/addons/forward/views/__init__.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | website/addons/forward/views/__init__.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | website/addons/forward/views/__init__.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 1 | 2019-07-16T00:14:49.000Z | 2019-07-16T00:14:49.000Z | from . import config, widget # noqa
| 18.5 | 36 | 0.702703 |
fe0ae5c8386d6c3d6f937a81ff9888fef7e3e87d | 215 | py | Python | hwtest/automated/usb3_test.py | crvallance/wlanpi-hwtest | 8858ef6e8fa78767238b968b121b4d5ab2155701 | [
"MIT"
] | null | null | null | hwtest/automated/usb3_test.py | crvallance/wlanpi-hwtest | 8858ef6e8fa78767238b968b121b4d5ab2155701 | [
"MIT"
] | null | null | null | hwtest/automated/usb3_test.py | crvallance/wlanpi-hwtest | 8858ef6e8fa78767238b968b121b4d5ab2155701 | [
"MIT"
] | null | null | null | from hwtest.shell_utils import run_command
def test_linux_usb3hub():
"""
Test for Linux Foundation 3.0 root hub in `lsusb` output
"""
resp = run_command(["lsusb"])
assert "1d6b:0003" in resp
| 17.916667 | 60 | 0.665116 |
fe0d4c9278280b1296bb8358bef8f6502e5d0540 | 82,820 | py | Python | ninjabackend.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | ninjabackend.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | ninjabackend.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import backends
import environment, mesonlib
import build
import mlog
import dependencies
from mesonlib import File
from meson_install import InstallData
from build import InvalidArguments
from coredata import MesonException
import os, sys, pickle, re
import subprocess, shutil
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
else:
quote_char = "'"
execute_wrapper = ''
| 45.630854 | 176 | 0.589942 |
fe0def896199f4c5334b061cf90749e18fdcc0bd | 223 | py | Python | tests/strategies/common/test_cputime.py | y-tetsu/othello | 73eabfe22d6b44bbfa0b436e6287e3e7356620f4 | [
"MIT"
] | 10 | 2020-07-24T22:04:51.000Z | 2022-03-25T06:09:48.000Z | tests/strategies/common/test_cputime.py | y-tetsu/othello | 73eabfe22d6b44bbfa0b436e6287e3e7356620f4 | [
"MIT"
] | 12 | 2021-04-30T09:53:18.000Z | 2022-02-25T04:16:02.000Z | tests/strategies/common/test_cputime.py | y-tetsu/othello | 73eabfe22d6b44bbfa0b436e6287e3e7356620f4 | [
"MIT"
] | 1 | 2021-11-25T13:12:32.000Z | 2021-11-25T13:12:32.000Z | """Tests of cputime.py
"""
import unittest
from reversi.strategies.common import CPU_TIME
| 15.928571 | 46 | 0.690583 |
fe0ede7a40a877fbc5bae0945b61462c0561098f | 5,249 | py | Python | experiments/cifar10_recon.py | coopersigrist/RecurrentNeuralSystem- | bd5bb680ec7f2166547709195f7bb3cd52cca5e8 | [
"MIT"
] | 3 | 2021-03-03T20:08:34.000Z | 2021-03-19T15:27:58.000Z | experiments/cifar10_recon.py | coopersigrist/RecurrentNeuralSystem- | bd5bb680ec7f2166547709195f7bb3cd52cca5e8 | [
"MIT"
] | null | null | null | experiments/cifar10_recon.py | coopersigrist/RecurrentNeuralSystem- | bd5bb680ec7f2166547709195f7bb3cd52cca5e8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""ReNS experiments - CIFAR10
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1byZ4xTfCK2x1Rhkxpl-Vv4sqA-bo4bis
# SETUP
"""
#@title Insatlling Pyorch
# !pip install torch
# !pip install torchvision
#@title Import Dependencies
import numpy as np
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from tqdm import tqdm
from typing import Optional, Union, Tuple, List, Sequence, Iterable
import math
from scipy.spatial.distance import euclidean
from torch.nn.modules.utils import _pair
from torchvision import models
from sklearn.metrics import jaccard_score
import matplotlib.pyplot as plt
from models.models import RegularAutoEncoder, ModulatedAutoEncoder, PseudoRecAutoEncoder
"""# TRAINING"""
batch_size = 32
num_epochs = 5
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Load MNIST data.
train_data = dsets.CIFAR10(root = './data', train = True,
transform = transform, download = True)
test_data = dsets.CIFAR10(root = './data', train = False,
transform = transform)
train_gen = torch.utils.data.DataLoader(dataset = train_data,
batch_size = batch_size,
shuffle = True)
test_gen = torch.utils.data.DataLoader(dataset = test_data,
batch_size = batch_size,
shuffle = False)
reflexor_size = 500
image_size = 32
channels = 3
# net = recurrentLayer(784, 784, 10, 5, 10, 0)
net1 = RegularAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
net2 = ModulatedAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
net3 = PseudoRecAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
lr = .0001 # size of step
loss_function = nn.MSELoss()
# Unnormalize the image to display it
# Commented out IPython magic to ensure Python compatibility.
train_losses = [[],[],[]]
test_losses = [[],[],[]]
real_imgs = [[],[],[]]
reconstructed_imgs = [[],[],[]]
param_counts = np.ones(3)
steps = [[],[],[]]
for num, net in enumerate([net1, net2, net3]):
optimizer = torch.optim.Adam( net.parameters(), lr=lr)
param_counts[num] = (sum(p.numel() for p in net.parameters() if p.requires_grad))
for epoch in range(num_epochs):
for i ,(images,labels) in enumerate(train_gen):
#images = Variable(images.view(-1,28*28))
labels = Variable(images.view(-1,3 * image_size ** 2))
optimizer.zero_grad()
outputs = net(images)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 300 == 0:
temp_loss = loss.item()
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_data)//batch_size, temp_loss))
dupe = Variable(outputs[0].data, requires_grad=False)
# plt.imshow(img_fix(images[0]))
# plt.show()
# plt.imshow(img_fix(dupe.view(3, image_size, image_size)))
# plt.show()
train_losses[num].append(temp_loss)
steps[num].append((50000 * epoch) + ((i + 1) * batch_size))
real_imgs[num].append(img_fix(images[0]))
reconstructed_imgs[num].append(img_fix(dupe.view(3, image_size, image_size)))
# Test Data
score = 0
total = 0
for images,labels in test_gen:
#images = Variable(images.view(-1,784))
output = net(images)
score += loss_function(output, images.view(-1, 3 * image_size ** 2)).item()
test_losses[num].append((score))
plt.plot(steps[0], train_losses[0], label= "Baseline")
plt.plot(steps[1], train_losses[1], label= "Modulated")
plt.plot(steps[2], train_losses[2], label= "Recurrent with Modulation")
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.legend()
plt.show()
plt.plot(steps[0], test_losses[0], label= "Baseline")
plt.plot(steps[1], test_losses[1], label= "Modulated")
plt.plot(steps[2], test_losses[2], label= "Recurrent with Modulation")
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Testing loss history')
plt.legend()
plt.show()
for num,count in enumerate(param_counts):
param_counts[num] /= 1000
plt.bar(["Base", "Modulated", "ReNS"], param_counts)
plt.xlabel('Model')
plt.ylabel('# of thousands of Parameters')
plt.show()
from mpl_toolkits.axes_grid1 import ImageGrid
num_smaples = len(real_imgs[0])
for num in [0,1,2]:
fig = plt.figure(figsize=(20.,20.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(2, num_smaples), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for ax, im in zip(grid, real_imgs[num]+reconstructed_imgs[num]):
# Iterating over the grid returns the Axes.
ax.imshow(im)
ax.axis("off")
plt.show()
| 29.994286 | 98 | 0.649076 |
fe0f496060ed3aa777376eab607ac140da6babfa | 1,400 | py | Python | horizon/forms/__init__.py | ameoba/horizon | ff9e367c98a8bb79f10914abffaaa04b0a461819 | [
"Apache-2.0"
] | 2 | 2019-12-29T09:20:13.000Z | 2020-01-01T13:12:34.000Z | horizon/forms/__init__.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 10 | 2015-02-19T20:27:04.000Z | 2017-05-15T15:04:32.000Z | horizon/forms/__init__.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 4 | 2015-05-05T08:17:28.000Z | 2020-02-05T10:47:06.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# FIXME(gabriel): Legacy imports for API compatibility.
from django.forms import * # noqa
from django.forms import widgets
# Convenience imports for public API components.
from horizon.forms.base import DateForm # noqa
from horizon.forms.base import SelfHandlingForm # noqa
from horizon.forms.base import SelfHandlingMixin # noqa
from horizon.forms.fields import DynamicChoiceField # noqa
from horizon.forms.fields import DynamicTypedChoiceField # noqa
from horizon.forms.views import ModalFormMixin # noqa
from horizon.forms.views import ModalFormView # noqa
assert widgets
assert SelfHandlingMixin
assert SelfHandlingForm
assert DateForm
assert ModalFormView
assert ModalFormMixin
assert DynamicTypedChoiceField
assert DynamicChoiceField
| 36.842105 | 78 | 0.784286 |
fe10f333391851cb33d5c6c2715480481922b0d0 | 2,993 | py | Python | heat/tests/test_rpc_listener_client.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/tests/test_rpc_listener_client.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | heat/tests/test_rpc_listener_client.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 3 | 2018-07-19T17:43:37.000Z | 2019-11-15T22:13:30.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import oslo_messaging as messaging
from heat.rpc import api as rpc_api
from heat.rpc import listener_client as rpc_client
from heat.tests import common
| 42.15493 | 74 | 0.687604 |
fe116c1174a46647c502098395333cc909588b1c | 684 | py | Python | amadeus/travel/trip_parser_jobs/_status.py | akshitsingla/amadeus-python | d8f3595e556b674998156f98d8a318045bb4c21c | [
"MIT"
] | 125 | 2018-04-09T07:27:24.000Z | 2022-02-22T11:45:20.000Z | amadeus/travel/trip_parser_jobs/_status.py | akshitsingla/amadeus-python | d8f3595e556b674998156f98d8a318045bb4c21c | [
"MIT"
] | 58 | 2018-03-29T14:58:01.000Z | 2022-03-17T10:18:07.000Z | amadeus/travel/trip_parser_jobs/_status.py | akshitsingla/amadeus-python | d8f3595e556b674998156f98d8a318045bb4c21c | [
"MIT"
] | 58 | 2018-04-06T10:56:20.000Z | 2022-03-04T01:23:24.000Z | from amadeus.client.decorator import Decorator
| 28.5 | 76 | 0.627193 |
fe12421e5a8c03bfd1fbb0c021c5255e880a14d5 | 7,737 | py | Python | tools/third_party/iniconfig/testing/test_iniconfig.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 2,479 | 2018-05-28T14:51:29.000Z | 2022-03-30T14:41:18.000Z | tools/third_party/iniconfig/testing/test_iniconfig.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 7,642 | 2018-05-28T09:38:03.000Z | 2022-03-31T20:55:48.000Z | tools/third_party/iniconfig/testing/test_iniconfig.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 1,303 | 2018-05-29T14:50:02.000Z | 2022-03-30T17:30:42.000Z | import py
import pytest
from iniconfig import IniConfig, ParseError, __all__ as ALL
from iniconfig import iscommentline
from textwrap import dedent
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line': (
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
assert excinfo.value.lineno == 0
| 24.561905 | 73 | 0.586145 |
fe13276650bb177fc42299abc71b473c1a0414dc | 3,586 | py | Python | jskparser/jskparser/util.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 15 | 2015-12-15T18:33:50.000Z | 2021-09-29T11:48:54.000Z | jskparser/jskparser/util.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 11 | 2015-11-16T22:14:58.000Z | 2021-09-23T05:28:40.000Z | jskparser/jskparser/util.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 8 | 2015-11-16T21:50:08.000Z | 2021-03-23T15:15:34.000Z | import os
from subprocess import call
from . import glob2
pwd = os.path.dirname(__file__)
"""
handling javajskparser AST
"""
| 54.333333 | 209 | 0.663971 |
fe133101724c39453da53bbd1a90715fd62fd7e1 | 24,301 | py | Python | fiftyone/core/patches.py | SNeugber/fiftyone | a50be47bbbf189e4bbdcd631b93c4c9cbf41c6b7 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/patches.py | SNeugber/fiftyone | a50be47bbbf189e4bbdcd631b93c4c9cbf41c6b7 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/patches.py | SNeugber/fiftyone | a50be47bbbf189e4bbdcd631b93c4c9cbf41c6b7 | [
"Apache-2.0"
] | null | null | null | """
Patches views.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.utils as etau
import fiftyone.core.aggregations as foa
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.view as fov
_SINGLE_TYPES_MAP = {
fol.Detections: fol.Detection,
fol.Polylines: fol.Polyline,
}
_PATCHES_TYPES = (fol.Detections, fol.Polylines)
_NO_MATCH_ID = ""
def save(self, fields=None):
"""Overwrites the object patches in the source dataset with the
contents of the view.
If this view contains any additional fields that were not extracted
from the source dataset, these fields are not saved.
.. warning::
This will permanently delete any omitted, filtered, or otherwise
modified patches from the source dataset.
Args:
fields (None): an optional field or list of fields to save. If
specified, only these fields are overwritten
"""
if etau.is_str(fields):
fields = [fields]
super().save(fields=fields)
if fields is None:
fields = self._label_fields
else:
fields = [l for l in fields if l in self._label_fields]
#
# IMPORTANT: we sync the contents of `_patches_dataset`, not `self`
# here because the `save()` call above updated the dataset, which means
# this view may no longer have the same contents (e.g., if `skip()` is
# involved)
#
self._sync_source_root(fields)
def reload(self):
self._root_dataset.reload()
#
# Regenerate the patches dataset
#
# This assumes that calling `load_view()` when the current patches
# dataset has been deleted will cause a new one to be generated
#
self._patches_dataset.delete()
_view = self._patches_stage.load_view(self._source_collection)
self._patches_dataset = _view._patches_dataset
def _sync_source_sample(self, sample):
for field in self._label_fields:
self._sync_source_sample_field(sample, field)
class PatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` of patches from a
:class:`fiftyone.core.dataset.Dataset`.
Patches views contain an ordered collection of patch samples, each of which
contains a subset of a sample of the parent dataset corresponding to a
single object or logical grouping of of objects.
Patches retrieved from patches views are returned as :class:`PatchView`
objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToPatches` stage that
defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = PatchView
def make_patches_dataset(
sample_collection, field, keep_label_lists=False, name=None
):
"""Creates a dataset that contains one sample per object patch in the
specified field of the collection.
Fields other than ``field`` and the default sample fields will not be
included in the returned dataset. A ``sample_id`` field will be added that
records the sample ID from which each patch was taken.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field: the patches field, which must be of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
keep_label_lists (False): whether to store the patches in label list
fields of the same type as the input collection rather than using
their single label variants
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
if keep_label_lists:
field_type = sample_collection._get_label_field_type(field)
else:
field_type = _get_single_label_field_type(sample_collection, field)
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field(
field, fof.EmbeddedDocumentField, embedded_doc_type=field_type
)
patches_view = _make_patches_view(
sample_collection, field, keep_label_lists=keep_label_lists
)
_write_samples(dataset, patches_view)
return dataset
def _get_single_label_field_type(sample_collection, field):
label_type = sample_collection._get_label_field_type(field)
if label_type not in _SINGLE_TYPES_MAP:
raise ValueError("Unsupported label field type %s" % label_type)
return _SINGLE_TYPES_MAP[label_type]
def make_evaluation_dataset(sample_collection, eval_key, name=None):
"""Creates a dataset based on the results of the evaluation with the given
key that contains one sample for each true positive, false positive, and
false negative example in the input collection, respectively.
True positive examples will result in samples with both their ground truth
and predicted fields populated, while false positive/negative examples will
only have one of their corresponding predicted/ground truth fields
populated, respectively.
If multiple predictions are matched to a ground truth object (e.g., if the
evaluation protocol includes a crowd attribute), then all matched
predictions will be stored in the single sample along with the ground truth
object.
The returned dataset will also have top-level ``type`` and ``iou`` fields
populated based on the evaluation results for that example, as well as a
``sample_id`` field recording the sample ID of the example, and a ``crowd``
field if the evaluation protocol defines a crowd attribute.
.. note::
The returned dataset will contain patches for the contents of the input
collection, which may differ from the view on which the ``eval_key``
evaluation was performed. This may exclude some labels that were
evaluated and/or include labels that were not evaluated.
If you would like to see patches for the exact view on which an
evaluation was performed, first call
:meth:`load_evaluation_view() <fiftyone.core.collections.SampleCollection.load_evaluation_view>`
to load the view and then convert to patches.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
eval_key: an evaluation key that corresponds to the evaluation of
ground truth/predicted fields that are of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
# Parse evaluation info
eval_info = sample_collection.get_evaluation_info(eval_key)
pred_field = eval_info.config.pred_field
gt_field = eval_info.config.gt_field
if hasattr(eval_info.config, "iscrowd"):
crowd_attr = eval_info.config.iscrowd
else:
crowd_attr = None
pred_type = sample_collection._get_label_field_type(pred_field)
gt_type = sample_collection._get_label_field_type(gt_field)
# Setup dataset with correct schema
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
pred_field, fof.EmbeddedDocumentField, embedded_doc_type=pred_type
)
dataset.add_sample_field(
gt_field, fof.EmbeddedDocumentField, embedded_doc_type=gt_type
)
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field("type", fof.StringField)
dataset.add_sample_field("iou", fof.FloatField)
if crowd_attr is not None:
dataset.add_sample_field("crowd", fof.BooleanField)
# Add ground truth patches
gt_view = _make_eval_view(
sample_collection, eval_key, gt_field, crowd_attr=crowd_attr
)
_write_samples(dataset, gt_view)
# Merge matched predictions
_merge_matched_labels(dataset, sample_collection, eval_key, pred_field)
# Add unmatched predictions
unmatched_pred_view = _make_eval_view(
sample_collection, eval_key, pred_field, skip_matched=True
)
_add_samples(dataset, unmatched_pred_view)
return dataset
def _make_patches_view(sample_collection, field, keep_label_lists=False):
if sample_collection._is_frames:
raise ValueError(
"Creating patches views into frame views is not yet supported"
)
if sample_collection._is_frame_field(field):
raise ValueError(
"Frame label patches cannot be directly extracted; you must first "
"convert your video dataset to frames via `to_frames()`"
)
label_type = sample_collection._get_label_field_type(field)
if issubclass(label_type, _PATCHES_TYPES):
list_field = field + "." + label_type._LABEL_LIST_FIELD
else:
raise ValueError(
"Invalid label field type %s. Extracting patches is only "
"supported for the following types: %s"
% (label_type, _PATCHES_TYPES)
)
pipeline = [
{
"$project": {
"_id": True,
"_sample_id": "$_id",
"_media_type": True,
"filepath": True,
"metadata": True,
"tags": True,
field + "._cls": True,
list_field: True,
}
},
{"$unwind": "$" + list_field},
{"$set": {"_rand": {"$rand": {}}}},
{"$set": {"_id": "$" + list_field + "._id"}},
]
if keep_label_lists:
pipeline.append({"$set": {list_field: ["$" + list_field]}})
else:
pipeline.append({"$set": {field: "$" + list_field}})
return sample_collection.mongo(pipeline)
def _make_eval_view(
sample_collection, eval_key, field, skip_matched=False, crowd_attr=None
):
eval_type = field + "." + eval_key
eval_id = field + "." + eval_key + "_id"
eval_iou = field + "." + eval_key + "_iou"
view = _make_patches_view(sample_collection, field)
if skip_matched:
view = view.mongo(
[
{
"$match": {
"$expr": {
"$or": [
{"$eq": ["$" + eval_id, _NO_MATCH_ID]},
{"$not": {"$gt": ["$" + eval_id, None]}},
]
}
}
}
]
)
view = view.mongo(
[{"$set": {"type": "$" + eval_type, "iou": "$" + eval_iou}}]
)
if crowd_attr is not None:
crowd_path1 = "$" + field + "." + crowd_attr
# @todo remove Attributes usage
crowd_path2 = "$" + field + ".attributes." + crowd_attr + ".value"
view = view.mongo(
[
{
"$set": {
"crowd": {
"$cond": {
"if": {"$gt": [crowd_path1, None]},
"then": {"$toBool": crowd_path1},
"else": {
"$cond": {
"if": {"$gt": [crowd_path2, None]},
"then": {"$toBool": crowd_path2},
"else": None,
}
},
}
}
}
}
]
)
return _upgrade_labels(view, field)
def _upgrade_labels(view, field):
tmp_field = "_" + field
label_type = view._get_label_field_type(field)
return view.mongo(
[
{"$set": {tmp_field: "$" + field}},
{"$unset": field},
{
"$set": {
field: {
"_cls": label_type.__name__,
label_type._LABEL_LIST_FIELD: ["$" + tmp_field],
}
}
},
{"$unset": tmp_field},
]
)
def _merge_matched_labels(dataset, src_collection, eval_key, field):
field_type = src_collection._get_label_field_type(field)
list_field = field + "." + field_type._LABEL_LIST_FIELD
eval_id = eval_key + "_id"
eval_field = list_field + "." + eval_id
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.extend(
[
{"$project": {list_field: True}},
{"$unwind": "$" + list_field},
{
"$match": {
"$expr": {
"$and": [
{"$gt": ["$" + eval_field, None]},
{"$ne": ["$" + eval_field, _NO_MATCH_ID]},
]
}
}
},
{
"$group": {
"_id": {"$toObjectId": "$" + eval_field},
"_labels": {"$push": "$" + list_field},
}
},
{
"$project": {
field: {
"_cls": field_type.__name__,
field_type._LABEL_LIST_FIELD: "$_labels",
}
},
},
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "merge",
"whenNotMatched": "discard",
}
},
]
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _write_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append({"$out": dataset._sample_collection_name})
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _add_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append(
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "keepExisting",
"whenNotMatched": "insert",
}
}
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
| 32.186755 | 104 | 0.607876 |
fe13f782ba0630659072cb056a27d408b76a7090 | 1,973 | py | Python | {{cookiecutter.repo_name}}/setup.py | ocesaulo/cookiecutter-ocn_sci | d41e826f56ba67cfde878ffc8188d497214a5f5b | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/setup.py | ocesaulo/cookiecutter-ocn_sci | d41e826f56ba67cfde878ffc8188d497214a5f5b | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/setup.py | ocesaulo/cookiecutter-ocn_sci | d41e826f56ba67cfde878ffc8188d497214a5f5b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
{%- set license_classifiers = {
'MIT license': 'License :: OSI Approved :: MIT License',
'BSD license': 'License :: OSI Approved :: BSD License',
'ISC license': 'License :: OSI Approved :: ISC License (ISCL)',
'Apache Software License 2.0': 'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
} %}
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
tests_requirements = ['pytest'],
setup_requirements = ['pytest-runner']
requirements = [
# package requirements go here
]
setup(
name='{{ cookiecutter.repo_name }}',
version=__version__,
description="{{ cookiecutter.project_short_description }}",
long_description=readme,
author="{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
author_email='{{ cookiecutter.email }}',
url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}',
packages=find_packages(include=['{{ cookiecutter.repo_name }}'],
exclude=('docs', 'tests*',)),
{%- if cookiecutter.open_source_license in license_classifiers %}
license="{{ cookiecutter.open_source_license }}",
{%- endif %}
install_requires=install_requires,
dependency_links=dependency_links,
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
keywords='{{ cookiecutter.repo_name }}',
classifiers=[
'Programming Language :: Python :: 3.6',
]
)
| 34.614035 | 103 | 0.667511 |
fe14a23d28223212d47c4b4e15846d9b001de45c | 6,153 | py | Python | src/zope/app/debug/debug.py | zopefoundation/zope.app.debug | 4f31e98f6a633f089bf132dd55cb3ead0270887b | [
"ZPL-2.1"
] | null | null | null | src/zope/app/debug/debug.py | zopefoundation/zope.app.debug | 4f31e98f6a633f089bf132dd55cb3ead0270887b | [
"ZPL-2.1"
] | 2 | 2017-05-08T10:46:20.000Z | 2021-02-02T07:16:49.000Z | src/zope/app/debug/debug.py | zopefoundation/zope.app.debug | 4f31e98f6a633f089bf132dd55cb3ead0270887b | [
"ZPL-2.1"
] | 1 | 2015-04-03T07:36:10.000Z | 2015-04-03T07:36:10.000Z | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Code to initialize the application server
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
import base64
import time
import sys
from pdb import Pdb
from io import BytesIO
from zope.publisher.publish import publish as _publish, debug_call
from zope.publisher.browser import TestRequest, setDefaultSkin
from zope.app.publication.browser import BrowserPublication
from zope.app.appsetup import config, database
try:
from time import process_time as time_process_time # pragma: PY3
except ImportError:
from time import clock as time_process_time # pragma: PY2
try:
import urllib.parse as urllib # pragma: PY3
except ImportError:
import urllib # pragma: PY2
try:
text_type = unicode # pragma: PY2
except NameError:
text_type = str # pragma: PY3
| 29.868932 | 78 | 0.57988 |
fe1507ff94aad4e4172a286172e136314812d8b6 | 1,855 | py | Python | transfer_learning.py | terryli710/SIIM-ACR-Pneumothorax-Classification | 8b278a9885b71c919d7064b2df42863b53f7adf3 | [
"MIT"
] | null | null | null | transfer_learning.py | terryli710/SIIM-ACR-Pneumothorax-Classification | 8b278a9885b71c919d7064b2df42863b53f7adf3 | [
"MIT"
] | null | null | null | transfer_learning.py | terryli710/SIIM-ACR-Pneumothorax-Classification | 8b278a9885b71c919d7064b2df42863b53f7adf3 | [
"MIT"
] | 1 | 2020-05-14T06:16:12.000Z | 2020-05-14T06:16:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 22:42:54 2020
@author: mike
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications import VGG16
from tensorflow.keras import layers
from sklearn.preprocessing import OneHotEncoder
from skimage.transform import resize
import matplotlib.pyplot as plt
train_data = np.load("train_data.npy")
x_data = np.zeros((210,204,204,3))
y_data = np.zeros(210)
for i in range(210):
img = train_data[i,1:].reshape(1024,1024)
img_resized = resize(img,(204,204))
y_data[i] = train_data[i,0]
x_data[i,:,:,0] = img_resized.astype(int)
x_data[i,:,:,1] = img_resized.astype(int)
x_data[i,:,:,2] = img_resized.astype(int)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.2, random_state=42)
y_train = OneHotEncoder().fit_transform(y_train.reshape(-1,1)).toarray()
y_test = OneHotEncoder().fit_transform(y_test.reshape(-1,1)).toarray()
base_model = VGG16(include_top=False, weights='vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
input_shape=(204, 204, 3))
base_model.trainable = False
inputs = tf.keras.Input(shape=(204, 204, 3))
x = base_model(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
outputs = tf.keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),loss="binary_crossentropy",metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=16, epochs=5)
pred = model.predict(x_train)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[0],score[1]) | 26.884058 | 117 | 0.725067 |
fe15525a101c45bc65c1049e9b6ece9e4cd29f69 | 2,158 | py | Python | core/tests/test_polyflow/test_workflows/test_hyperband.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | core/tests/test_polyflow/test_workflows/test_hyperband.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | core/tests/test_polyflow/test_workflows/test_hyperband.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow.exceptions import ValidationError
from tests.utils import BaseTestCase, assert_equal_dict
from polyaxon.polyflow.matrix import V1Hyperband
from polyaxon.polyflow.optimization import V1Optimization, V1OptimizationMetric
| 35.377049 | 79 | 0.672845 |
fe1823b5cc5e17b94ed66896e05441088fc1ee56 | 1,503 | py | Python | Class Work oop.py | fatimatswanya/fatimaCSC102 | cab70bd696d39a9e16bcb57e0180e872be4f49bc | [
"MIT"
] | null | null | null | Class Work oop.py | fatimatswanya/fatimaCSC102 | cab70bd696d39a9e16bcb57e0180e872be4f49bc | [
"MIT"
] | null | null | null | Class Work oop.py | fatimatswanya/fatimaCSC102 | cab70bd696d39a9e16bcb57e0180e872be4f49bc | [
"MIT"
] | null | null | null |
studendt1 = Student('James Kaka', '021074', 'M','Amethyst','16', '49')
print(studendt1.getName())
studendt1.setName('James Gaga')
print(studendt1.getName())
Student.PAUNanthem() | 26.368421 | 93 | 0.632069 |
fe185aaa73619017a36f547b25642264993ebd15 | 1,820 | py | Python | clickhouse_sqlalchemy/drivers/reflection.py | Fozar/clickhouse-sqlalchemy | 88fd630856655cc470430b365dce7e85516abf62 | [
"MIT"
] | null | null | null | clickhouse_sqlalchemy/drivers/reflection.py | Fozar/clickhouse-sqlalchemy | 88fd630856655cc470430b365dce7e85516abf62 | [
"MIT"
] | null | null | null | clickhouse_sqlalchemy/drivers/reflection.py | Fozar/clickhouse-sqlalchemy | 88fd630856655cc470430b365dce7e85516abf62 | [
"MIT"
] | null | null | null | from sqlalchemy.engine import reflection
from clickhouse_sqlalchemy import Table, engines
| 33.703704 | 78 | 0.621978 |
fe18f53bb174876b9174543e0887f93aad3f8c21 | 6,686 | py | Python | tests/test_disque.py | abdul-khalid/pydisque | a9b5caa6dac0621a0174d168f4a04c88d0e2f8b5 | [
"MIT"
] | 1 | 2019-02-28T09:48:22.000Z | 2019-02-28T09:48:22.000Z | tests/test_disque.py | abdul-khalid/pydisque | a9b5caa6dac0621a0174d168f4a04c88d0e2f8b5 | [
"MIT"
] | null | null | null | tests/test_disque.py | abdul-khalid/pydisque | a9b5caa6dac0621a0174d168f4a04c88d0e2f8b5 | [
"MIT"
] | null | null | null | """
Unit Tests for the pydisque module.
Currently, most of these tests require a fresh instance of
Disque to be valid and pass.
"""
import unittest
import json
import time
import random
import six
from pydisque.client import Client
from redis.exceptions import ResponseError
if __name__ == '__main__':
unittest.main()
| 28.695279 | 76 | 0.588095 |
fe190819e431106bd53c08a681b3911ad9502e88 | 6,289 | py | Python | src/runner.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 619c5c0b17438d1014f7ca7e4ce13cc44c45de3c | [
"MIT"
] | 1 | 2020-11-17T16:09:13.000Z | 2020-11-17T16:09:13.000Z | src/runner.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 619c5c0b17438d1014f7ca7e4ce13cc44c45de3c | [
"MIT"
] | null | null | null | src/runner.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 619c5c0b17438d1014f7ca7e4ce13cc44c45de3c | [
"MIT"
] | 4 | 2019-07-05T02:03:02.000Z | 2022-01-21T22:12:16.000Z | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner.py ]
# Synopsis [ main program that runs the 'Naive Bayes' and 'Decision Tree' training / testing ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import csv
import argparse
import numpy as np
from data_loader import data_loader
from classifiers import naive_bayes_runner
from classifiers import decision_tree_runner
##################
# CONFIGURATIONS #
##################
##################
# ERROR HANDLING #
##################
#################
# OUTPUT WRITER #
#################
########
# MAIN #
########
"""
main function
"""
if __name__ == '__main__':
main()
| 38.582822 | 151 | 0.690253 |
fe195c652a959304ac79843bfd7f33439351fd89 | 7,393 | py | Python | igibson/metrics/agent.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | igibson/metrics/agent.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | igibson/metrics/agent.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | import copy
import numpy as np
import pybullet as p
from igibson.metrics.metric_base import MetricBase
| 38.305699 | 116 | 0.581631 |
fe1a8e41b9a6dd96ffc12066b0bee8e9c0b3b6b6 | 438 | py | Python | fontslice/__init__.py | Arahabica/font-subset-css | 393b9a452af49c2168c7a9f84983e4170937ea67 | [
"MIT"
] | null | null | null | fontslice/__init__.py | Arahabica/font-subset-css | 393b9a452af49c2168c7a9f84983e4170937ea67 | [
"MIT"
] | null | null | null | fontslice/__init__.py | Arahabica/font-subset-css | 393b9a452af49c2168c7a9f84983e4170937ea67 | [
"MIT"
] | null | null | null | import sys
from .main import (
_chunk_list,
_get_unicode_range_hash,
convert_unicode_range,
get_120_unicode_ranges,
get_unicode_ranges_from_text,
generate_css,
main,
)
__all__ = [
"_chunk_list",
"_get_unicode_range_hash",
"convert_unicode_range",
"get_120_unicode_ranges",
"get_unicode_ranges_from_text",
"generate_css",
"main",
]
if __name__ == "__main__":
sys.exit(main())
| 17.52 | 35 | 0.687215 |
fe1c00d5c2481798d64766027364e0e668d8c7bc | 59,866 | py | Python | src/ttkbootstrap/dialogs/dialogs.py | MrJaatt/ttkbootstrap | 4e837d64859e5a230ef0500faddbb2c384f5b9d4 | [
"MIT"
] | 1 | 2022-01-28T09:37:32.000Z | 2022-01-28T09:37:32.000Z | src/ttkbootstrap/dialogs/dialogs.py | MrJaatt/ttkbootstrap | 4e837d64859e5a230ef0500faddbb2c384f5b9d4 | [
"MIT"
] | null | null | null | src/ttkbootstrap/dialogs/dialogs.py | MrJaatt/ttkbootstrap | 4e837d64859e5a230ef0500faddbb2c384f5b9d4 | [
"MIT"
] | null | null | null | """
This module contains various base dialog base classes that can be
used to create custom dialogs for the end user.
These classes serve as the basis for the pre-defined static helper
methods in the `Messagebox`, and `Querybox` container classes.
"""
import calendar
import textwrap
from datetime import datetime
from tkinter import font
import ttkbootstrap as ttk
from ttkbootstrap import utility
from ttkbootstrap.icons import Icon
from ttkbootstrap.constants import *
from tkinter import BaseWidget
from ttkbootstrap.localization import MessageCatalog
| 33.65149 | 105 | 0.568219 |
a3a6ae7f4fab920589a878c0b0e9e7fa6a88c26a | 2,504 | py | Python | Google-Play-Store-App-Rating/code.py | venky4121994/ga-learner-dsmp-repo | 1bef03489931eece0d5ecb9ce0501dfeb558dc59 | [
"MIT"
] | null | null | null | Google-Play-Store-App-Rating/code.py | venky4121994/ga-learner-dsmp-repo | 1bef03489931eece0d5ecb9ce0501dfeb558dc59 | [
"MIT"
] | null | null | null | Google-Play-Store-App-Rating/code.py | venky4121994/ga-learner-dsmp-repo | 1bef03489931eece0d5ecb9ce0501dfeb558dc59 | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data.hist(['Rating'])
data = data[data['Rating']<=5]
data.hist(['Rating'])
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())
missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1)
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())
missing_data_1 = pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1)
print(missing_data_1)
# code ends here
# --------------
#Code starts here
plt.figure(figsize=(10,20))
catplot = sns.catplot(x = "Category", y = "Rating", data=data, kind="box",height=10)
catplot.set_xticklabels(rotation=90)
plt.title('Rating vs Category [BoxPlot]',size = 20)
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data['Installs'])
data['Installs'] = data['Installs'].str.replace('+','')
data['Installs'] = data['Installs'].str.replace(',','')
data['Installs'] = data['Installs'].astype('int32')
le = LabelEncoder()
data['Installs'] = le.fit_transform(data['Installs'])
graph = sns.regplot(data['Installs'],data['Rating'],data=data)
graph.set_title('Rating vs Installs [Boxplot]')
plt.show()
#Code ends here
# --------------
#Code starts here
print(data['Price'].value_counts())
data['Price'] = data['Price'].str.replace('$','')
data['Price'] = data['Price'].astype('float32')
graph2 = sns.regplot(data['Price'],data['Rating'],data=data)
graph2.set_title('Rating vs Price [RegPlot]')
#Code ends here
# --------------
#Code starts here
print(len(data['Genres'].unique()), "genres")
data['Genres'] = data['Genres'].str.split(';').str[0]
gr_mean = data[['Genres','Rating']].groupby(['Genres'],as_index=False).mean()
print(gr_mean.describe())
gr_mean=gr_mean.sort_values('Rating')
print(gr_mean.head(1))
print(gr_mean.head(1))
#Code ends here
# --------------
#Code starts here
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
data['Last Updated Days'] = (data['Last Updated'].max()-data['Last Updated']).dt.days
plt.figure(figsize = (10,10))
sns.regplot(x="Last Updated Days", y="Rating",color='lightpink',data=data)
plt.title('Rating vs Last Updated [Regplot]',size =20)
#Code ends here
| 25.55102 | 89 | 0.680112 |
a3a6e52033cd00d1b8f29b49e45d1f519baff3e9 | 6,597 | py | Python | converters/brat2iob.py | Banguiskode/nerds | 366420b2ec57bf790562de62a79f4973cbd6b3ed | [
"BSD-3-Clause"
] | 15 | 2019-12-05T18:40:22.000Z | 2021-02-20T05:34:50.000Z | converters/brat2iob.py | Banguiskode/nerds | 366420b2ec57bf790562de62a79f4973cbd6b3ed | [
"BSD-3-Clause"
] | null | null | null | converters/brat2iob.py | Banguiskode/nerds | 366420b2ec57bf790562de62a79f4973cbd6b3ed | [
"BSD-3-Clause"
] | 4 | 2019-12-30T13:03:05.000Z | 2021-02-16T13:08:09.000Z | import argparse
import operator
import os
import re
import shutil
import spacy
import tempfile
from nerds.utils import spans_to_tokens, get_logger
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
def apply_annotations(sentences, annotations, tokenizer):
""" Apply annotation spans to the sentence spans to create a list of tokens
and tags.
Args:
sentences (list((int, int, str))): list of sentence spans.
annotations (list((int, int, str))): list of annotation spans.
tokenizer (spacy LM): SpaCy EN language model.
Returns:
tokens_tags_list (list((list(str), list(str)))): list of list of token
tag pairs. Each list of token-tag pairs corresponds to a single
sentence.
"""
tokens_tags_list = []
for sent_start, sent_end, sent_text in sentences:
sent_annots = [a for a in annotations if a[0] >= sent_start and a[1] <= sent_end]
# convert document offsets to sentence offsets
sent_annots = [(s[0] - sent_start, s[1] - sent_start, s[2]) for s in sent_annots]
tokens, tags = spans_to_tokens(sent_text, sent_annots, tokenizer)
tokens_tags_list.append(zip(tokens, tags))
return tokens_tags_list
def convert_brat_to_iob(input_dir, output_file, nlp):
""" Convenience Convertor function.
Args:
input_dir (str): the directory where the BRAT .txt and .ann files
are located.
output_file (str): the full path name of file to write output in
IOB format to.
nlp (SpaCy LM): reference to the SpaCy EN model.
Returns:
None.
"""
fout = open(output_file, "w")
for text_file in os.listdir(input_dir):
# only process .txt and .ann pairs in specified directory
if not text_file.endswith(".txt"):
continue
annot_file = text_file[:-4] + ".ann"
if not os.path.exists(os.path.join(input_dir, annot_file)):
# do not process file if no corresponding .ann file
continue
# process file pair
logger.info("Processing file: {:s}".format(text_file))
sentences = segment_text_to_sentences(os.path.join(input_dir, text_file), nlp)
annotations = parse_text_annotations(os.path.join(input_dir, annot_file))
tokens_tags_list = apply_annotations(sentences, annotations, nlp)
for tokens_tags in tokens_tags_list:
for token, tag in tokens_tags:
fout.write("{:s}\t{:s}\n".format(token, tag))
fout.write("\n")
fout.close()
def do_self_test(nlp):
""" Simple self-test with small dataset to prove that this works okay. """
text = "Pierre Vinken, 61 years old, will join the board as a nonexecutive director, Nov. 29. Mr. Vinken is chairman of Elsevier N.V., the Dutch publishing group."
annotations = [
"T1 PER 0 13 Pierre Vinken",
"T2 PER 86 96 Mr. Vinken",
"T3 DATE 15 27 61 years old",
"T4 DATE 77 84 Nov. 29",
"T5 ORG 112 125 Elsevier N.V.",
"T6 NORP 131 136 Dutch"
]
input_dir = tempfile.mkdtemp(dir="/tmp")
ftext = open(os.path.join(input_dir, "test.txt"), "w")
ftext.write(text)
ftext.close()
fann = open(os.path.join(input_dir, "test.ann"), "w")
for line in annotations:
fann.write(line + "\n")
fann.close()
output_file = os.path.join(input_dir, "test.iob")
convert_brat_to_iob(input_dir, output_file, nlp)
fout = open(output_file, "r")
for line in fout:
logger.warn(line.strip())
shutil.rmtree(input_dir)
################################ main ################################
#
# usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t]
# Script to convert BRAT annotations to IOB (NERDS) format.
# optional arguments:
# -h, --help show this help message and exit
# -i INPUT_DIR, --input_dir INPUT_DIR
# Directory to store BRAT .txt and .ann files.
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# Output file to write IOB output to.
# -t, --test Runs self test.
######################################################################
parser = argparse.ArgumentParser(
description="Script to convert BRAT annotations to IOB (NERDS) format.")
parser.add_argument("-i", "--input_dir", help="Directory to store BRAT .txt and .ann files.")
parser.add_argument("-o", "--output_file", help="Output file to write IOB output to.")
parser.add_argument("-t", "--test", help="Runs self test.", action="store_true")
args = parser.parse_args()
logger = get_logger()
input_dir = args.input_dir
output_file = args.output_file
self_test = args.test
nlp = spacy.load("en")
if self_test:
logger.info("Executing self test...")
do_self_test(nlp)
else:
logger.info("Reading BRAT .txt and .ann files from: {:s}".format(input_dir))
logger.info("Writing IOB tokens/tags to file: {:s}".format(output_file))
convert_brat_to_iob(input_dir, output_file, nlp)
| 36.854749 | 167 | 0.618463 |
a3a738f0c10019d9229ed8e9b93898831920170d | 2,503 | py | Python | kraken/lib/util.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
] | 1 | 2022-02-03T14:41:58.000Z | 2022-02-03T14:41:58.000Z | kraken/lib/util.py | ephenum/kraken | 47be8f7ddcb7c7ad63bfc5636df1976a4e84a5f0 | [
"Apache-2.0"
] | null | null | null | kraken/lib/util.py | ephenum/kraken | 47be8f7ddcb7c7ad63bfc5636df1976a4e84a5f0 | [
"Apache-2.0"
] | 1 | 2022-01-19T10:53:20.000Z | 2022-01-19T10:53:20.000Z | """
Ocropus's magic PIL-numpy array conversion routines. They express slightly
different behavior from PIL.Image.toarray().
"""
import unicodedata
import numpy as np
from PIL import Image
__all__ = ['pil2array', 'array2pil']
def is_bitonal(im: Image.Image) -> bool:
"""
Tests a PIL.Image for bitonality.
Args:
im (PIL.Image.Image): Image to test
Returns:
True if the image contains only two different color values. False
otherwise.
"""
return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
def is_printable(char: str) -> bool:
"""
Determines if a chode point is printable/visible when printed.
Args:
char (str): Input code point.
Returns:
True if printable, False otherwise.
"""
letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu')
numbers = ('Nd', 'Nl', 'No')
punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps')
symbol = ('Sc', 'Sk', 'Sm', 'So')
printable = letters + numbers + punctuation + symbol
return unicodedata.category(char) in printable
def make_printable(char: str) -> str:
"""
Takes a Unicode code point and return a printable representation of it.
Args:
char (str): Input code point
Returns:
Either the original code point, the name of the code point if it is a
combining mark, whitespace etc., or the hex code if it is a control
symbol.
"""
if not char or is_printable(char):
return char
elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'):
return '0x{:x}'.format(ord(char))
else:
return unicodedata.name(char)
| 27.811111 | 77 | 0.582901 |
a3a7f40bcb06653665d3b8d30577d4282cd0f05f | 2,877 | py | Python | analysis/calculate_holding_amount.py | hao44le/ico_top_holder_analysis | aeeab01c90e4446b424c52c33a68ccb814123121 | [
"MIT"
] | 538 | 2018-07-04T21:14:52.000Z | 2022-03-26T15:16:08.000Z | analysis/calculate_holding_amount.py | hao44le/ico_top_holder_analysis | aeeab01c90e4446b424c52c33a68ccb814123121 | [
"MIT"
] | 4 | 2018-07-08T22:11:32.000Z | 2021-12-13T19:48:38.000Z | analysis/calculate_holding_amount.py | hao44le/ico_top_holder_analysis | aeeab01c90e4446b424c52c33a68ccb814123121 | [
"MIT"
] | 52 | 2018-07-05T12:07:37.000Z | 2021-04-05T23:34:20.000Z | import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
| 29.96875 | 91 | 0.642336 |
a3a80291d5fdb7e2a418a7fbbb6542744e0db4d2 | 66,926 | py | Python | textbox/trainer/trainer.py | JBoRu/TextBox-1 | 0dcbaa153acc507e3d55075312d7ca5d23146e03 | [
"MIT"
] | 1 | 2021-08-12T01:08:09.000Z | 2021-08-12T01:08:09.000Z | textbox/trainer/trainer.py | JBoRu/TextBox-1 | 0dcbaa153acc507e3d55075312d7ca5d23146e03 | [
"MIT"
] | null | null | null | textbox/trainer/trainer.py | JBoRu/TextBox-1 | 0dcbaa153acc507e3d55075312d7ca5d23146e03 | [
"MIT"
] | null | null | null | # @Time : 2020/11/14
# @Author : Junyi Li, Gaole He
# @Email : lijunyi@ruc.edu.cn
# UPDATE:
# @Time : 2020/12/2, 2020/11/27, 2020/12/3, 2020/12/26
# @Author : Jinhao Jiang, Xiaoxuan Hu, Tianyi Tang, Jinhao Jiang
# @Email : jiangjinhao@std.uestc.edu.cn, huxiaoxuan@ruc.edu.cn, steventang@ruc.edu.cn, jiangjinhao@std.uestc.edu.cn
r"""
textbox.trainer.trainer
################################
"""
import os
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from torch.utils.data import DataLoader
from time import time
from logging import getLogger
from textbox.module.Optimizer.optim import ScheduledOptim
from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator
from textbox.utils import ensure_dir, early_stopping
| 45.997251 | 146 | 0.621836 |
a3a8234ec61d7794c6426793212657ac24a62f4a | 649 | py | Python | rsserpent/plugins/builtin/__init__.py | EurusEurus/RSSerpent | fd7aaf67b80b2b48c14b1a3efe733374b0012338 | [
"MIT"
] | null | null | null | rsserpent/plugins/builtin/__init__.py | EurusEurus/RSSerpent | fd7aaf67b80b2b48c14b1a3efe733374b0012338 | [
"MIT"
] | null | null | null | rsserpent/plugins/builtin/__init__.py | EurusEurus/RSSerpent | fd7aaf67b80b2b48c14b1a3efe733374b0012338 | [
"MIT"
] | null | null | null | from ...models import Persona, Plugin
from . import example, example_cache, example_ratelimit, example_with_args
plugin = Plugin(
name="rsserpent-plugin-builtin",
author=Persona(
name="queensferryme",
link="https://github.com/queensferryme",
email="queensferry.me@gmail.com",
),
repository="https://github.com/RSSerpent/RSSerpent",
prefix="/_",
routers={
example.path: example.provider,
example_cache.path: example_cache.provider,
example_ratelimit.path: example_ratelimit.provider,
example_with_args.path: example_with_args.provider,
},
)
__all__ = ("plugin",)
| 28.217391 | 74 | 0.682589 |
a3a86ac522e7ca59c54af2df1492f75fd0ad7b3e | 2,859 | py | Python | data_processing/process_xls.py | luisroel91/libdib_assesment | c969cfecbce1243b457961ffafe5caaea7bb5149 | [
"MIT"
] | null | null | null | data_processing/process_xls.py | luisroel91/libdib_assesment | c969cfecbce1243b457961ffafe5caaea7bb5149 | [
"MIT"
] | null | null | null | data_processing/process_xls.py | luisroel91/libdib_assesment | c969cfecbce1243b457961ffafe5caaea7bb5149 | [
"MIT"
] | null | null | null | import pandas as pd
# Define our header
col_names = [
"year",
"num_males_with_income",
"male_median_income_curr_dollars",
"male_median_income_2019_dollars",
"num_females_with_income",
"female_median_income_curr_dollars",
"female_median_income_2019_dollars",
]
# Load Asian census data XLS, skipping all headers
dfa = pd.read_excel(
r'p08a.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define col names
names=col_names,
)
# Load White census data XLS, skipping all headers
dfw = pd.read_excel(
r'p08w.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define cold names
names=col_names
)
# Splinter off rows into age group DFs for both sets of data
dfa1524 = dfa.iloc[:20]
dfa2534 = dfa.iloc[25:45]
dfa3544 = dfa.iloc[50:70]
dfa4554 = dfa.iloc[75:95]
dfa5564 = dfa.iloc[100:120]
dfa6574 = dfa.iloc[125:145]
dfa75 = dfa.iloc[150:170]
dfw1524 = dfw.iloc[:20]
dfw2534 = dfw.iloc[25:45]
dfw3544 = dfw.iloc[50:70]
dfw4554 = dfw.iloc[75:95]
dfw5564 = dfw.iloc[100:120]
dfw6574 = dfw.iloc[125:145]
dfw75 = dfw.iloc[150:170]
# Add Age Range col to each DF
dfa1524.insert(0, 'age_range', '15-24')
dfa2534.insert(0, 'age_range', '25-34')
dfa3544.insert(0, 'age_range', '35-44')
dfa4554.insert(0, 'age_range', '45-54')
dfa5564.insert(0, 'age_range', '55-64')
dfa6574.insert(0, 'age_range', '65-74')
dfa75.insert(0, 'age_range', 'Over 75')
dfw1524.insert(0, 'age_range', '15-24')
dfw2534.insert(0, 'age_range', '25-34')
dfw3544.insert(0, 'age_range', '35-44')
dfw4554.insert(0, 'age_range', '45-54')
dfw5564.insert(0, 'age_range', '55-64')
dfw6574.insert(0, 'age_range', '65-74')
dfw75.insert(0, 'age_range', 'Over 75')
# Stack cleaned DF's vertically
dfa = pd.concat([
dfa1524,
dfa2534,
dfa3544,
dfa4554,
dfa5564,
dfa6574,
dfa75
], axis=0)
dfw = pd.concat([
dfw1524,
dfw2534,
dfw3544,
dfw4554,
dfw5564,
dfw6574,
dfw75
], axis=0)
# Add Race col
dfa.insert(0, 'race', 'asian')
dfw.insert(0, 'race', 'white')
# Clean garbage chars in Year col using regex
dfa['year'] = dfa['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
dfw['year'] = dfw['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
# Stack our cleaned + normalized data into a single DF
df = pd.concat([
dfa,
dfw
], axis=0)
# Convert the DF col types to conform to our CensusRecord model
df = df.astype({
"race": str,
"age_range": str,
"year": int,
"num_males_with_income": int,
"male_median_income_curr_dollars": float,
"male_median_income_2019_dollars": float,
"num_females_with_income": int,
"female_median_income_curr_dollars": float,
"female_median_income_2019_dollars": float,
})
# Pickle the DF
df.to_pickle("./res.pkl")
| 24.646552 | 82 | 0.671913 |
a3aa7d175c4008d278417caf82ba36b9fb655fda | 520 | py | Python | Section_1/Exercise_16.py | Szymon-Budziak/WDI_exercises_solutions | 51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460 | [
"MIT"
] | null | null | null | Section_1/Exercise_16.py | Szymon-Budziak/WDI_exercises_solutions | 51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460 | [
"MIT"
] | null | null | null | Section_1/Exercise_16.py | Szymon-Budziak/WDI_exercises_solutions | 51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460 | [
"MIT"
] | 1 | 2021-11-21T09:38:33.000Z | 2021-11-21T09:38:33.000Z | """
Dany jest cig okrelony wzorem: A[n+1] = (A[n] % 2) (3 A[n] + 1) + (1 A[n] % 2) A[n] / 2.
Startujc z dowolnej liczby naturalnej > 1 cig ten osiga warto 1. Napisa program, ktry
znajdzie wyraz pocztkowy z przedziau 2-10000 dla ktrego warto 1 jest osigalna po najwikszej
liczbie krokw.
"""
a0 = 2
m = 1
for a0 in range(2, 10000):
n = 0
while a0 != 1:
a0 = (((a0 % 2) * (3 * a0 + 1)) + ((1 - (a0 % 2)) * (a0 / 2)))
n += 1
if n > m:
m = n
a0 += 1
print(m)
| 27.368421 | 98 | 0.542308 |
a3ac4915a74b531c1dc0b8afb60e2d05592076cd | 61,910 | py | Python | SysPy_ver/funcs/_var_declaration.py | evlog/SysPy | d1ee6e2ca60492d20339c0016a9c24d027170553 | [
"CNRI-Python"
] | 4 | 2017-12-28T14:00:16.000Z | 2021-01-21T08:53:14.000Z | SysPy_ver/funcs/_var_declaration.py | evlog/SysPy | d1ee6e2ca60492d20339c0016a9c24d027170553 | [
"CNRI-Python"
] | 1 | 2018-07-31T16:27:00.000Z | 2018-07-31T16:27:37.000Z | SysPy_ver/funcs/_var_declaration.py | evlog/SysPy | d1ee6e2ca60492d20339c0016a9c24d027170553 | [
"CNRI-Python"
] | 2 | 2015-10-12T09:13:13.000Z | 2020-01-06T12:22:55.000Z | """
*****************************************************************************
*
H E A D E R I N F O R M A T I O N *
*
*****************************************************************************
Project Name: SysPy (System Python)
http://cgi.di.uoa.gr/~evlog/syspy.html
File Name: _var_declaration.py
Created by: Evangelos Logaras
*****************************************************************************
*
C O P Y R I G H T N O T I C E *
*
*****************************************************************************
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License, a copy of which is available from
http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
*****************************************************************************
*
D E S C R I P T I O N *
*
*****************************************************************************
Variable declaration when a variable assignment is tracked.
"""
from pdb import *
def var_declaration(assign_lines_count, token_struct, assign_lines, signals, process_vars):
"""
FUNCTION: var_declaration(a int, b(), c[], d[], e[])
a: assign lines counter integer
b: token's tupple
c: list containing the VHDL code
d: list containing the signal statements
e: list containing
Variable declaration when a variable assignment is tracked.
"""
# Python's variable declerations
#----------------------------------------------------------------------------------------------------------------------------------
count0 = 0
count1 = 0
process_vars_d = []
vars0 = []
var0 = ''
var1 = ''
#----------------------------------------------------------------------------------------------------------------------------------
print("process_vars:", process_vars)
# Erasing duplicated registrations in "process_vars[]"
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(process_vars)):
vars0 = []
#flag_process_vars = 0
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
var0 = process_vars[i][1].replace('=', '')
var0 = var0.replace('! ', '')
var0 = var0.replace('>', '')
var0 = var0.replace('<', '')
var0 = var0.replace(' ', '')
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item_var"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
flag_process_vars = 0
for n in range(0, len(vars0)):
for j in range(len(process_vars_d)):
if ((process_vars_d[j][0] == "name_left") or (process_vars_d[j][0] == "name_right")):
var1 = process_vars_d[j][1].replace('=', '')
var1 = var1.replace('! ', '')
var1 = var1.replace('>', '')
var1 = var1.replace('<', '')
var1 = var1.replace(' ', '')
elif (process_vars_d[j][0] == "name_right_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_item_var"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var2"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var02"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var12"):
var1 = process_vars_d[j][1]
if (vars0[n] == var1):
if (n == 0):
flag_process_vars += 1
if (n == 1):
flag_process_vars += 2
if (n == 2):
flag_process_vars += 4
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 4):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_item_var"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
process_vars = process_vars_d
#----------------------------------------------------------------------------------------------------------------------------------
j = assign_lines_count
for m in range(0, len(process_vars)):
if ((process_vars[m][0] == "name_left") or (process_vars[m][0] == "name_right")):
t = process_vars[m][1].replace('=', '')
t = t.replace(' ', '')
elif (process_vars[m][0] == "name_right_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_item_var"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_item_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var2"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var02"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var12"):
t = process_vars[m][1]
for i in range (0, len(signals)):
if (t == signals[i]['N']):
if (signals[i]['D'] == 'v'):
L = signals[i]['L'].__doc__
n = signals[i]['N'].__doc__
if (m == 0):
sp = ''
while 1:
if (assign_lines[j][0] == "process_sens_list"):
assign_lines[j][0] = assign_lines[j][0] + "_var"
for k in range(0, assign_lines[j][4]):
sp = sp + ' '
assign_lines[j][1] = assign_lines[j][1].replace("begin", '')
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "-- Variables"
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "-------------------------------------------------------------------"
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
break
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
break
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " downto " + str(signals_intr[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
break
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals_intr[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
count0 = count0 + 1
break
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_type" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_type" + str(count1) + ";\n"
count1 = count1 + 1
break
elif (j == 0):
break
j = j - 1
elif (m != 0):
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals[i]['L'][1][0]) + " downto " + str(signals[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + ", "
count0 = count0 + 1
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_typev" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_typev" + str(count1) + ";\n"
count1 = count1 + 1
if (len(process_vars) > 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "-------------------------------------------------------------------"
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "begin\n\n"
| 85.866852 | 356 | 0.37351 |
a3ac7f877a13b1b1d1be58575a8e398e8062fac9 | 190 | py | Python | Giraffe/Functions.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | Giraffe/Functions.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | Giraffe/Functions.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | 1 | 2022-01-14T17:12:44.000Z | 2022-01-14T17:12:44.000Z |
say_hi("Mike", "35")
result = cube(4) # variable
print(result)
| 11.875 | 47 | 0.6 |
a3aceb33684c4eb53e7c078943f4c37d7dd1af91 | 4,321 | py | Python | airspace_surgery.py | wipfli/airspaces | c2e01615fa6a065895ed04b8f342a38732e9196b | [
"Apache-2.0"
] | 1 | 2021-12-28T23:40:51.000Z | 2021-12-28T23:40:51.000Z | airspace_surgery.py | wipfli/airspaces | c2e01615fa6a065895ed04b8f342a38732e9196b | [
"Apache-2.0"
] | 1 | 2021-01-30T13:15:14.000Z | 2021-02-07T14:50:27.000Z | airspace_surgery.py | wipfli/aviation | c2e01615fa6a065895ed04b8f342a38732e9196b | [
"Apache-2.0"
] | null | null | null | import glob
import json
path_in = './airspaces/'
path_out = './airspaces_processed/'
filenames = [path.split('/')[-1] for path in glob.glob(path_in + '*')]
remove = {
'france_fr.geojson': [
314327,
314187,
314360,
314359,
314362,
314361,
314364,
314363,
314333,
314329,
314331,
],
'germany_de.geojson': [
307563,
307638,
307639,
307640,
]
}
replacements = {
'france_fr.geojson': [
['Bale10 119.35', 'Bale 10 TMA 130.9'],
['Bale1 119.35', 'Bale 1 TMA 130.9'],
['Bale2 119.35', 'Bale 2 TMA 130.9'],
['Bale3 119.35', 'Bale 3 TMA 130.9'],
['Bale4 119.35', 'Bale 4 TMA 130.9'],
['Bale5 119.35', 'Bale 5 TMA 130.9'],
['Bale5 119.35', 'Bale 5 TMA 130.9'],
['Bale6 119.35', 'Bale 6 TMA 130.9'],
['Bale7 119.35', 'Bale 7 TMA 130.9'],
['Bale8 119.35', 'Bale 8 TMA 130.9'],
['Bale9 119.35', 'Bale 9 TMA 130.9'],
['Bale AZ4T1 134.67', 'Bale T1 TMA HX 134.68'],
['Bale AZ4T2 134.67', 'Bale T2 TMA HX 134.68'],
['Bale AZ4T3 134.67', 'Bale T3 TMA HX 134.68'],
['CTR BALE', 'Bale CTR 118.3']
],
'switzerland_ch.geojson': [
['ZURICH 10 TMA 118.1', 'ZURICH 10 TMA 124.7'],
['ZURICH 11 TMA 118.1', 'ZURICH 11 TMA 124.7'],
['ZURICH 12 TMA 118.1', 'ZURICH 12 TMA 124.7'],
['ZURICH 13 TMA 118.1', 'ZURICH 13 TMA 124.7'],
['ZURICH 14 TMA 118.1', 'ZURICH 14 TMA HX 127.755'],
['ZURICH 15 TMA 118.1', 'ZURICH 15 TMA HX 127.755'],
['ZURICH 1 TMA 118.1', 'ZURICH 1 TMA 124.7'],
['ZURICH 2 CTR 118.1', 'ZURICH 2 CTR HX 118.975'],
['ZURICH 2 TMA 118.1', 'ZURICH 2 TMA 124.7'],
['ZURICH 3 TMA 118.1', 'ZURICH 3 TMA 124.7'],
['ZURICH 4A TMA 118.1', 'ZURICH 4A TMA 124.7'],
['ZURICH 4B TMA 118.1', 'ZURICH 4B TMA 124.7'],
['ZURICH 4C TMA 118.1', 'ZURICH 4C TMA 124.7'],
['ZURICH 5 TMA 118.1', 'ZURICH 5 TMA 124.7'],
['ZURICH 6 TMA 118.1', 'ZURICH 6 TMA 124.7'],
['ZURICH 7 TMA 118.1', 'ZURICH 7 TMA 124.7'],
['ZURICH 8 TMA 118.1', 'ZURICH 8 TMA 124.7'],
['ZURICH 9 TMA 118.1', 'ZURICH 9 TMA 124.7'],
['BERN 1 TMA 121.025', 'BERN 1 TMA HX 127.325'],
['BERN 2 TMA 121.025', 'BERN 2 TMA HX 127.325'],
['BERN CTR 121.025', 'BERN CTR HX 121.025'],
['EMMEN 1 CTR 120.425', 'EMMEN 1 CTR HX 120.425'],
['EMMEN 1 TMA 120.425', 'EMMEN 1 TMA HX 134.130'],
['EMMEN 2 CTR 120.425', 'EMMEN 2 CTR HX 120.425'],
['EMMEN 2 TMA 120.425', 'EMMEN 2 TMA HX 134.130'],
['EMMEN 3 TMA 120.425', 'EMMEN 3 TMA HX 134.130'],
['EMMEN 4 TMA 120.425', 'EMMEN 4 TMA HX 134.130'],
['EMMEN 5 TMA 120.425', 'EMMEN 5 TMA HX 134.130'],
['EMMEN 6 TMA 120.425', 'EMMEN 6 TMA HX 134.130'],
]
}
for filename in filenames:
print(filename)
with open(path_in + filename) as f:
data = json.load(f)
if filename in replacements:
targets = [r[0] for r in replacements[filename]]
for feature in data['features']:
if feature['properties']['N'] in targets:
print('replace ' + feature['properties']['N'] + '...')
feature['properties']['N'] = next(x for x in replacements[filename] if x[0] == feature['properties']['N'])[1]
if filename in remove:
features_out = [f for f in data['features'] if int(f['properties']['ID']) not in remove[filename]]
else:
features_out = data['features']
print('removed ' + str(len(data['features']) - len(features_out)) + ' features')
geojson = {
'type': 'FeatureCollection',
'features': features_out
}
print('write ' + filename + '...')
with open(path_out + filename, 'w') as f:
json.dump(geojson, f)
all_features = []
for filename in filenames:
print('read ' + filename + '...')
with open(path_out + filename) as f:
all_features += json.load(f)['features']
print('write airspaces.geojson...')
with open('airspaces.geojson', 'w') as f:
json.dump({
'type': 'FeatureCollection',
'features': all_features
}, f)
print('done')
| 34.023622 | 125 | 0.532053 |
a3ad80bfdfa53d706abcbf25b9e00b65302a112a | 1,480 | py | Python | AndroidSpider/spider_main.py | lidenghong1/SmallReptileTraining | a1bfb81c9969edfb7554acc50370c0cb036da690 | [
"MIT"
] | 1 | 2018-05-10T01:52:37.000Z | 2018-05-10T01:52:37.000Z | AndroidSpider/spider_main.py | lidenghong1/SmallReptileTraining | a1bfb81c9969edfb7554acc50370c0cb036da690 | [
"MIT"
] | null | null | null | AndroidSpider/spider_main.py | lidenghong1/SmallReptileTraining | a1bfb81c9969edfb7554acc50370c0cb036da690 | [
"MIT"
] | null | null | null | from AndroidSpider import url_manager, html_downloader, html_parser, html_output
'''
Android HTML tab
Extra module:
BeautifulSoup
'''
if __name__ == "__main__":
rootUrl = "http://baike.baidu.com/item/Android"
objSpider = SpiderMain()
objSpider.craw(rootUrl)
| 36.097561 | 141 | 0.597297 |
a3ae0fed36bd78447d3c9b110c995da7eb0ec44e | 517 | py | Python | trompace/mutations/__init__.py | trompamusic/ce-queries-template | cc5ae69d0e76623bfd72e9453f569f6624bf7c3b | [
"Apache-2.0"
] | 1 | 2020-06-18T15:43:18.000Z | 2020-06-18T15:43:18.000Z | trompace/mutations/__init__.py | trompamusic/ce-queries-template | cc5ae69d0e76623bfd72e9453f569f6624bf7c3b | [
"Apache-2.0"
] | 60 | 2019-12-17T11:08:28.000Z | 2021-03-02T16:19:41.000Z | trompace/mutations/__init__.py | trompamusic/trompace-client | cc5ae69d0e76623bfd72e9453f569f6624bf7c3b | [
"Apache-2.0"
] | null | null | null | MUTATION = '''mutation {{
{mutation}
}}'''
def _verify_additional_type(additionaltype):
"""Check that the input to additionaltype is a list of strings.
If it is empty, raise ValueError
If it is a string, convert it to a list of strings."""
if additionaltype is None:
return None
if isinstance(additionaltype, str):
additionaltype = [additionaltype]
if len(additionaltype) == 0:
raise ValueError("additionaltype must be a non-empty list")
return additionaltype
| 28.722222 | 67 | 0.68472 |
a3ae4f1aada9f0b92aa00f9f17807bd4f8c072c1 | 951 | py | Python | Web_App/infrastructure/infra.py | CapitalOneDevExchangeHackathon/Financial-Fitness | 54a2203d6b3d96687d822247b040613b644874f2 | [
"MIT"
] | null | null | null | Web_App/infrastructure/infra.py | CapitalOneDevExchangeHackathon/Financial-Fitness | 54a2203d6b3d96687d822247b040613b644874f2 | [
"MIT"
] | null | null | null | Web_App/infrastructure/infra.py | CapitalOneDevExchangeHackathon/Financial-Fitness | 54a2203d6b3d96687d822247b040613b644874f2 | [
"MIT"
] | null | null | null | import boto
import boto3
from config import Config
dynamodb = boto3.resource('dynamodb',
aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY,
region_name=Config.REGION)
table = dynamodb.Table('user_details')
tables = boto3.resource('dynamodb', aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION).Table('user_details')
print(tables.creation_date_time)
if __name__ == "__main__":
main()
| 22.116279 | 119 | 0.589905 |
a3b0b5f68e1084bc860c329219fb7ebd7ec06dcc | 70 | py | Python | numberTheory/natural.py | ndarwin314/symbolicPy | ce2e48bf1557b5995db6c324ada9fbd4767df1e3 | [
"MIT"
] | null | null | null | numberTheory/natural.py | ndarwin314/symbolicPy | ce2e48bf1557b5995db6c324ada9fbd4767df1e3 | [
"MIT"
] | null | null | null | numberTheory/natural.py | ndarwin314/symbolicPy | ce2e48bf1557b5995db6c324ada9fbd4767df1e3 | [
"MIT"
] | null | null | null | # TODO: implement algorithms in c++ or something to make them fast
| 23.333333 | 67 | 0.728571 |
a3b0debd51a02674a2485fcb5fa43dc82bc97eff | 2,751 | py | Python | SelfTests.py | TeaPackCZ/RobotZed | 7ac8bfb14a6c2e5887f8fed299ad87b384701c54 | [
"MIT"
] | null | null | null | SelfTests.py | TeaPackCZ/RobotZed | 7ac8bfb14a6c2e5887f8fed299ad87b384701c54 | [
"MIT"
] | null | null | null | SelfTests.py | TeaPackCZ/RobotZed | 7ac8bfb14a6c2e5887f8fed299ad87b384701c54 | [
"MIT"
] | null | null | null | import os
import unittest
from Logger import Logger
from gpsNavigation import gpsModule,gpsPoint
if __name__ == '__main__':
unittest.main()
| 31.988372 | 66 | 0.624137 |
a3b19235edf240100e043436d336caa4a2f88321 | 1,986 | py | Python | manga_py/parser.py | Abijithkrishna/manga-py | 03b142ecb944ef37a36e5095ffa580209021e3b0 | [
"MIT"
] | null | null | null | manga_py/parser.py | Abijithkrishna/manga-py | 03b142ecb944ef37a36e5095ffa580209021e3b0 | [
"MIT"
] | null | null | null | manga_py/parser.py | Abijithkrishna/manga-py | 03b142ecb944ef37a36e5095ffa580209021e3b0 | [
"MIT"
] | null | null | null | from logging import warning
from requests import get
from .info import Info
from .provider import Provider
from .providers import get_provider
| 29.205882 | 74 | 0.618832 |
a3b256695d6b1472ade6817590ffa769163e8848 | 487 | py | Python | src/villages/migrations/0008_auto_20161228_2209.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/villages/migrations/0008_auto_20161228_2209.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/villages/migrations/0008_auto_20161228_2209.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-28 22:09
from django.db import migrations, models
import django.db.models.deletion
| 22.136364 | 98 | 0.620123 |
a3b29bffdf2e36c45f804f1c4fc3a56bbdcb9b59 | 1,127 | py | Python | customers/views.py | sindhumadhadi09/CustomerMgmt | db8b27ad6ceb8050843dc33509dc2b6c2ed2c1e2 | [
"MIT"
] | null | null | null | customers/views.py | sindhumadhadi09/CustomerMgmt | db8b27ad6ceb8050843dc33509dc2b6c2ed2c1e2 | [
"MIT"
] | null | null | null | customers/views.py | sindhumadhadi09/CustomerMgmt | db8b27ad6ceb8050843dc33509dc2b6c2ed2c1e2 | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Customer
def add_customer(request):
customer = Customer()
customer.customer_firstname = request.POST['fname']
customer.customer_lastname = request.POST['lname']
customer.customer_address = request.POST['address']
customer.customer_city = request.POST['city']
customer.customer_zipcode = request.POST['zip']
customer.customer_state = request.POST['state']
customer.save()
return HttpResponseRedirect(reverse('customers:index'))
def delete_customer(request, customer_id):
p = Customer.objects.get(pk=customer_id)
p.delete()
return HttpResponseRedirect(reverse('customers:index')) | 34.151515 | 59 | 0.759539 |
a3b2f8e0ee7fa10fe388b6e668b6e1e8224ddcfe | 1,531 | py | Python | salt/ext/tornado/test/import_test.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | salt/ext/tornado/test/import_test.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | salt/ext/tornado/test/import_test.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # flake8: noqa
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from salt.ext.tornado.test.util import unittest
| 31.244898 | 73 | 0.666884 |
a3b315d5551d6efa8a8b5d2f47e368467747b831 | 3,512 | py | Python | butterfree/configs/db/metastore_config.py | fossabot/butterfree | 8a7da8c540b51c6560b2825cb926c40a351f202b | [
"Apache-2.0"
] | null | null | null | butterfree/configs/db/metastore_config.py | fossabot/butterfree | 8a7da8c540b51c6560b2825cb926c40a351f202b | [
"Apache-2.0"
] | null | null | null | butterfree/configs/db/metastore_config.py | fossabot/butterfree | 8a7da8c540b51c6560b2825cb926c40a351f202b | [
"Apache-2.0"
] | null | null | null | """Holds configurations to read and write with Spark to AWS S3."""
import os
from typing import Any, Dict, List, Optional
from pyspark.sql import DataFrame
from butterfree.configs import environment
from butterfree.configs.db import AbstractWriteConfig
from butterfree.dataframe_service import extract_partition_values
def get_path_with_partitions(self, key: str, dataframe: DataFrame) -> List:
"""Get options for AWS S3 from partitioned parquet file.
Options will be a dictionary with the write and read configuration for
Spark to AWS S3.
Args:
key: path to save data into AWS S3 bucket.
dataframe: spark dataframe containing data from a feature set.
Returns:
A list of string for file-system backed data sources.
"""
path_list = []
dataframe_values = extract_partition_values(
dataframe, partition_columns=["year", "month", "day"]
)
for row in dataframe_values:
path_list.append(
f"{self.file_system}://{self.path}/{key}/year={row['year']}/"
f"month={row['month']}/day={row['day']}"
)
return path_list
def translate(self, schema: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Translate feature set spark schema to the corresponding database."""
pass
| 29.024793 | 82 | 0.611902 |
a3b384657bc7cd2ab9ee0a1d8b09ee80039ad894 | 2,401 | py | Python | examples/2-objects.py | johanngan/special_relativity | cd372c7460d2c0d4040c81bc1bd0090086dba735 | [
"MIT"
] | 4 | 2020-08-19T04:56:40.000Z | 2022-02-07T22:09:45.000Z | examples/2-objects.py | johanngan/special_relativity | cd372c7460d2c0d4040c81bc1bd0090086dba735 | [
"MIT"
] | null | null | null | examples/2-objects.py | johanngan/special_relativity | cd372c7460d2c0d4040c81bc1bd0090086dba735 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Shared parameters
include_grid = True
include_legend = True
tlim = (0, 2)
xlim = (-2, 2)
# A stationary point object
stationary = phy.MovingObject(0, draw_options={'label': '$v = 0$'})
## Alternate:
# direction = (1, 0)
# point = (0, 0)
# stationary = geom.Line(direction, point, draw_options={'label': '$v = 0$'})
title='Stationary object'
p = vis.stplot(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
p.save('2-objects_stationary_point.png')
p.show()
# A stationary point object, animated
anim = vis.stanimate(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
anim.save('2-objects_stationary_point_anim.mp4')
anim.show()
# A stationary point object, animated with worldline
anim = vis.stanimate_with_worldline(stationary, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper right')
anim.save('2-objects_stationary_point_anim_worldline.mp4')
anim.show()
# A bunch of moving point objects, animated
moving = phy.MovingObject(0, velocity=1/2,
draw_options={'color': 'red', 'label': '$v = c/2$'})
light = phy.MovingObject(0, velocity=1,
draw_options={'color': 'gold', 'label': '$v = c$'})
ftl = phy.MovingObject(0, velocity=3/2,
draw_options={'color': 'cyan', 'label': '$v = 3c/2$'})
objects = geom.Collection([stationary, moving, light, ftl])
title = 'Various objects'
anim = vis.stanimate_with_worldline(objects, title=title,
current_time_color='magenta', tlim=tlim, xlim=xlim, grid=include_grid,
legend=include_legend, legend_loc='upper left')
anim.save('2-objects_moving_points.mp4')
anim.show()
# A moving meterstick
meterstick = phy.MovingObject(-1/2, length=1, velocity=1/2,
draw_options={'label': 'Meterstick'})
# # Alternate:
# direction = (1, 1/2)
# left = geom.Line(direction, (0, -1/2))
# right = geom.Line(direction, (0, 1/2))
# meterstick = geom.Ribbon(left, right, draw_options={'label': 'Meterstick'})
title = 'Moving meterstick ($v = c/2$)'
anim = vis.stanimate_with_worldline(meterstick, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper left')
anim.save('2-objects_moving_meterstick.mp4')
anim.show()
| 34.797101 | 77 | 0.7197 |
a3b459175d9e5a84e03ca2cd0f4e7e7f14be6f69 | 3,101 | py | Python | firmware/modulator.py | mfkiwl/OpenXcvr | 9bea6efd03cd246f16982f0fadafed684ac5ce1c | [
"MIT"
] | 14 | 2020-02-16T15:36:31.000Z | 2022-03-27T02:24:40.000Z | firmware/modulator.py | mfkiwl/OpenXcvr | 9bea6efd03cd246f16982f0fadafed684ac5ce1c | [
"MIT"
] | 1 | 2020-11-23T16:16:33.000Z | 2020-11-23T16:16:33.000Z | firmware/modulator.py | mfkiwl/OpenXcvr | 9bea6efd03cd246f16982f0fadafed684ac5ce1c | [
"MIT"
] | 4 | 2021-03-29T16:55:03.000Z | 2022-01-23T16:43:59.000Z | from baremetal import *
from math import pi, sin, cos
import sys
from scale import scale
from settings import *
from ssb import ssb_polar
import numpy as np
from matplotlib import pyplot as plt
if __name__ == "__main__" and "sim" in sys.argv:
#mode am stim am
stimulus=(
np.sin(np.arange(1000)*2.0*pi*0.02)*1023+
np.sin(np.arange(1000)*2.0*pi*0.03)*1023
)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, NBFM)
test_modulator(stimulus, USB)
| 29.533333 | 117 | 0.633022 |
a3b4e8143896f099b74b0a3738681f49e357493f | 4,049 | py | Python | tests/sentry/auth/test_helper.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/auth/test_helper.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/auth/test_helper.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from six.moves.urllib.parse import urlencode
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from sentry.auth.helper import handle_new_user
from sentry.models import AuthProvider, InviteStatus, OrganizationMember
from sentry.testutils import TestCase
from sentry.utils.compat import mock
| 39.31068 | 100 | 0.674241 |
a3b4f00010ceb5e0331d09eb4a19ef587eba8526 | 348 | py | Python | groundstation/broadcast_events/__init__.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | 26 | 2015-06-18T20:17:07.000Z | 2019-09-26T09:55:35.000Z | groundstation/broadcast_events/__init__.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | null | null | null | groundstation/broadcast_events/__init__.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | 5 | 2015-07-20T01:52:47.000Z | 2017-01-08T09:54:07.000Z | from broadcast_ping import BroadcastPing
EVENT_TYPES = {
"PING": BroadcastPing,
}
| 23.2 | 47 | 0.732759 |
a3b55358fffe0e7cc61738673a1b1895170d48c3 | 9,891 | py | Python | mbta_python/__init__.py | dougzor/mbta_python | f277f48f8bf8048cb5c9c6307e672c37292e57f7 | [
"MIT"
] | null | null | null | mbta_python/__init__.py | dougzor/mbta_python | f277f48f8bf8048cb5c9c6307e672c37292e57f7 | [
"MIT"
] | null | null | null | mbta_python/__init__.py | dougzor/mbta_python | f277f48f8bf8048cb5c9c6307e672c37292e57f7 | [
"MIT"
] | null | null | null | import datetime
import requests
from mbta_python.models import Stop, Direction, Schedule, Mode, \
TripSchedule, Alert, StopWithMode, Prediction
HOST = "http://realtime.mbta.com/developer/api/v2"
| 37.324528 | 83 | 0.586897 |
a3b57d8c1a4088165ce4f67e6fb27850615f9653 | 4,583 | py | Python | density_model_torch_custom.py | piotrwinkler/breast_density_classifier | 4d47dd98bb0a839cea8b9aef242f5af5db84f06f | [
"BSD-2-Clause"
] | null | null | null | density_model_torch_custom.py | piotrwinkler/breast_density_classifier | 4d47dd98bb0a839cea8b9aef242f5af5db84f06f | [
"BSD-2-Clause"
] | null | null | null | density_model_torch_custom.py | piotrwinkler/breast_density_classifier | 4d47dd98bb0a839cea8b9aef242f5af5db84f06f | [
"BSD-2-Clause"
] | null | null | null | import argparse
import glob
import os
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import models_torch as models
import utils
EXPERIMENT_DATA_DIR = "/tmp/mgr"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Inference')
parser.add_argument('model_type')
parser.add_argument('--bins-histogram', default=50)
parser.add_argument('--model-path', default=None)
parser.add_argument('--device-type', default="cpu")
# parser.add_argument('--image-path', default="images/")
args = parser.parse_args()
parameters_ = {
"model_type": args.model_type,
"bins_histogram": args.bins_histogram,
"model_path": args.model_path,
"device_type": args.device_type,
# "image_path": args.image_path,
}
if parameters_["model_path"] is None:
if args.model_type == "histogram":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineHistogramModel/model.p"
if args.model_type == "cnn":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineBreastModel/model.p"
predicted_values = []
real_values = []
predicted_values_two_classes = []
real_values_two_classes = []
two_classes_mapping = {1: 0, 2: 0, 3: 1, 4: 1}
for dir in glob.glob(f"{EXPERIMENT_DATA_DIR}/*/"):
parameters_["image_path"] = dir
predicted_density = inference(parameters_)
with open(os.path.join(dir, "density.txt")) as file:
real_density = int(file.read())
print(f"Predicted density: {predicted_density}")
print(f"Real density: {real_density}\n")
print(f"Predicted density (2 cls): {two_classes_mapping[predicted_density]}")
print(f"Real density (2 cls): {two_classes_mapping[real_density]}\n")
predicted_values.append(predicted_density)
real_values.append(real_density)
predicted_values_two_classes.append(two_classes_mapping[predicted_density])
real_values_two_classes.append(two_classes_mapping[real_density])
print(f"Total accuracy: {accuracy_score(real_values, predicted_values)}")
print(f"Total accuracy two classes: {accuracy_score(real_values_two_classes, predicted_values_two_classes)}")
"""
python density_model_torch_custom.py histogram
python density_model_torch_custom.py cnn
"""
| 37.565574 | 113 | 0.669212 |
a3b664d11a53af7fe489af747c1768858a1613a2 | 4,878 | py | Python | esmvaltool/diag_scripts/ensclus/ens_anom.py | yifatdzigan/ESMValTool | 83320b0e0b24ddde965599961bb80428e180a731 | [
"Apache-2.0"
] | 148 | 2017-02-07T13:16:03.000Z | 2022-03-26T02:21:56.000Z | esmvaltool/diag_scripts/ensclus/ens_anom.py | yifatdzigan/ESMValTool | 83320b0e0b24ddde965599961bb80428e180a731 | [
"Apache-2.0"
] | 2,026 | 2017-02-03T12:57:13.000Z | 2022-03-31T15:11:51.000Z | esmvaltool/diag_scripts/ensclus/ens_anom.py | yifatdzigan/ESMValTool | 83320b0e0b24ddde965599961bb80428e180a731 | [
"Apache-2.0"
] | 113 | 2017-01-27T13:10:19.000Z | 2022-02-03T13:42:11.000Z | """Computation of ensemble anomalies based on a desired value."""
import os
import numpy as np
from scipy import stats
# User-defined packages
from read_netcdf import read_iris, save_n_2d_fields
from sel_season_area import sel_area, sel_season
def ens_anom(filenames, dir_output, name_outputs, varname, numens, season,
area, extreme):
"""Ensemble anomalies.
Computation of the ensemble anomalies based on the desired value
from the input variable (it can be the percentile, mean, maximum, standard
deviation or trend)
OUTPUT: NetCDF files of ensemble mean of climatology, selected value and
anomaly maps.
"""
print('The name of the output files will be <variable>_{0}.txt'
.format(name_outputs))
print('Number of ensemble members: {0}'.format(numens))
outfiles = []
# Reading the netCDF file of 3Dfield, for all the ensemble members
var_ens = []
for ens in range(numens):
ifile = filenames[ens]
# print('ENSEMBLE MEMBER %s' %ens)
var, varunits, lat, lon, dates, _ = read_iris(ifile)
# Convertion from kg m-2 s-1 to mm/day
if varunits == 'kg m-2 s-1':
var = var * 86400 # there are 86400 seconds in a day
varunits = 'mm/day'
# Selecting a season (DJF,DJFM,NDJFM,JJA)
var_season, _ = sel_season(var, dates, season)
# Selecting only [latS-latN, lonW-lonE] box region
var_area, lat_area, lon_area = sel_area(lat, lon, var_season, area)
var_ens.append(var_area)
if varunits == 'kg m-2 s-1':
print('\nPrecipitation rate units were converted from kg m-2 s-1 '
'to mm/day')
print('The variable is {0} ({1})'.format(varname, varunits))
print('Original var shape: (time x lat x lon)={0}'.format(var.shape))
print('var shape after selecting season {0} and area {1}: '
'(time x lat x lon)={2}'.format(season, area, var_area.shape))
if extreme == 'mean':
# Compute the time mean over the entire period, for each ens member
varextreme_ens = [np.nanmean(var_ens[i], axis=0)
for i in range(numens)]
elif len(extreme.split("_")) == 2:
# Compute the chosen percentile over the period, for each ens member
quant = int(extreme.partition("th")[0])
varextreme_ens = [np.nanpercentile(var_ens[i], quant, axis=0)
for i in range(numens)]
elif extreme == 'maximum':
# Compute the maximum value over the period, for each ensemble member
varextreme_ens = [np.nanmax(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'std':
# Compute the standard deviation over the period, for each ens member
varextreme_ens = [np.nanstd(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'trend':
# Compute the linear trend over the period, for each ensemble member
trendmap = np.empty((var_ens[0].shape[1], var_ens[0].shape[2]))
trendmap_ens = []
for i in range(numens):
for jla in range(var_ens[0].shape[1]):
for jlo in range(var_ens[0].shape[2]):
slope, _, _, _, _ = \
stats.linregress(range(var_ens[0].shape[0]),
var_ens[i][:, jla, jlo])
trendmap[jla, jlo] = slope
trendmap_ens.append(trendmap.copy())
varextreme_ens = trendmap_ens
varextreme_ens_np = np.array(varextreme_ens)
print('Anomalies are computed with respect to the {0}'.format(extreme))
# Compute and save the anomalies with respect to the ensemble
ens_anomalies = varextreme_ens_np - np.nanmean(varextreme_ens_np, axis=0)
varsave = 'ens_anomalies'
ofile = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
# print(ofile)
print('ens_anomalies shape: (numens x lat x lon)={0}'
.format(ens_anomalies.shape))
save_n_2d_fields(lat_area, lon_area, ens_anomalies, varsave,
varunits, ofile)
outfiles.append(ofile)
# Compute and save the climatology
vartimemean_ens = [np.mean(var_ens[i], axis=0) for i in range(numens)]
ens_climatologies = np.array(vartimemean_ens)
varsave = 'ens_climatologies'
ofile = os.path.join(dir_output, 'ens_climatologies_{0}.nc'
.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_climatologies, varsave,
varunits, ofile)
outfiles.append(ofile)
ens_extreme = varextreme_ens_np
varsave = 'ens_extreme'
ofile = os.path.join(dir_output, 'ens_extreme_{0}.nc'.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_extreme, varsave,
varunits, ofile)
outfiles.append(ofile)
return outfiles
| 40.65 | 79 | 0.630381 |
a3b714ec9b000678e3e81df98484d9da903f0406 | 24,074 | py | Python | pytition/petition/models.py | Te-k/Pytition | 16ebce01b491b72ed387709d9b705f7cb0d5476f | [
"BSD-3-Clause"
] | null | null | null | pytition/petition/models.py | Te-k/Pytition | 16ebce01b491b72ed387709d9b705f7cb0d5476f | [
"BSD-3-Clause"
] | null | null | null | pytition/petition/models.py | Te-k/Pytition | 16ebce01b491b72ed387709d9b705f7cb0d5476f | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.utils.html import mark_safe, strip_tags
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.hashers import get_hasher
from django.db import transaction
from django.urls import reverse
from django.db.models import Q
from tinymce import models as tinymce_models
from colorfield.fields import ColorField
import html
class Permission(models.Model):
organization = models.ForeignKey(Organization, on_delete=models.CASCADE,
verbose_name=ugettext_lazy("Organization related to these permissions"))
can_add_members = models.BooleanField(default=False)
can_remove_members = models.BooleanField(default=False)
can_create_petitions = models.BooleanField(default=False)
can_modify_petitions = models.BooleanField(default=False)
can_delete_petitions = models.BooleanField(default=False)
can_create_templates = models.BooleanField(default=False)
can_modify_templates = models.BooleanField(default=False)
can_delete_templates = models.BooleanField(default=False)
can_view_signatures = models.BooleanField(default=False)
can_modify_signatures = models.BooleanField(default=False)
can_delete_signatures = models.BooleanField(default=False)
can_modify_permissions = models.BooleanField(default=False)
class PytitionUser(models.Model):
petitions = models.ManyToManyField(Petition, blank=True)
organizations = models.ManyToManyField(Organization, related_name="members", blank=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="pytitionuser")
permissions = models.ManyToManyField(Permission, related_name="user", blank=True)
invitations = models.ManyToManyField(Organization, related_name="invited", blank=True)
petition_templates = models.ManyToManyField(PetitionTemplate, blank=True, through='TemplateOwnership',
through_fields=['user', 'template'],
verbose_name=ugettext_lazy("Petition templates"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
def __str__(self):
return self.get_full_name
def __repr__(self):
return self.get_full_name
class TemplateOwnership(models.Model):
user = models.ForeignKey(PytitionUser, blank=True, null=True, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.CASCADE)
template = models.ForeignKey(PetitionTemplate, to_field='id', on_delete=models.CASCADE)
#class Meta:
# unique_together = (("user", "template"), ("organization", "template"))
| 40.734349 | 124 | 0.662748 |
a3b72847ef50516acce4d8d4114c3432f306c66d | 4,026 | py | Python | bin/socialhistory.py | JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare- | d0ffb26e1b99204a796df905b50c8caf01417f69 | [
"Apache-2.0"
] | 1 | 2019-11-11T11:21:08.000Z | 2019-11-11T11:21:08.000Z | bin/socialhistory.py | JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare- | d0ffb26e1b99204a796df905b50c8caf01417f69 | [
"Apache-2.0"
] | null | null | null | bin/socialhistory.py | JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare- | d0ffb26e1b99204a796df905b50c8caf01417f69 | [
"Apache-2.0"
] | 1 | 2020-01-28T03:48:14.000Z | 2020-01-28T03:48:14.000Z | import csv
from testdata import SOCIALHISTORY_FILE
from testdata import rndDate
from patient import Patient
SMOKINGCODES = {
'428041000124106': 'Current some day smoker',
'266919005' : 'Never smoker',
'449868002' : 'Current every day smoker',
'266927001' : 'Unknown if ever smoked',
'8517006' : 'Former smoker'
}
| 35.946429 | 92 | 0.435171 |
a3b8b5beaa0f8d8ecd98462fe75b978547dc1472 | 4,248 | py | Python | Python X/Dictionaries in python.py | nirobio/puzzles | fda8c84d8eefd93b40594636fb9b7f0fde02b014 | [
"MIT"
] | null | null | null | Python X/Dictionaries in python.py | nirobio/puzzles | fda8c84d8eefd93b40594636fb9b7f0fde02b014 | [
"MIT"
] | null | null | null | Python X/Dictionaries in python.py | nirobio/puzzles | fda8c84d8eefd93b40594636fb9b7f0fde02b014 | [
"MIT"
] | null | null | null | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# dictionaries, look-up tables & key-value pairs\n",
"# d = {} OR d = dict()\n",
"#e.g. d = {\"George\": 24, \"Tom\": 32}\n",
"\n",
"d = {}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"d[\"George\"] = 24"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"d[\"Tom\"] = 32\n",
"d[\"Jenny\"] = 16"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'George': 24, 'Tom': 32, 'Jenny': 16}\n"
]
}
],
"source": [
"print(d)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'Jenny' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-0bdfff196d23>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mJenny\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'Jenny' is not defined"
]
}
],
"source": [
"print(d[Jenny])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32\n"
]
}
],
"source": [
"print(d[\"Tom\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"d[\"Jenny\"] = 20"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"20\n"
]
}
],
"source": [
"print(d[\"Jenny\"])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# keys are strings or numbers \n",
"\n",
"d[10] = 100"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100\n"
]
}
],
"source": [
"print(d[10])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# how to iterate over key-value pairs"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"key:\n",
"George\n",
"value:\n",
"24\n",
"\n",
"key:\n",
"Tom\n",
"value:\n",
"32\n",
"\n",
"key:\n",
"Jenny\n",
"value:\n",
"20\n",
"\n",
"key:\n",
"10\n",
"value:\n",
"100\n",
"\n"
]
}
],
"source": [
" for key, value in d.items():\n",
" print(\"key:\")\n",
" print(key)\n",
" print(\"value:\")\n",
" print(value)\n",
" print(\"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 18.88 | 354 | 0.439266 |
a3b9cafed89d7582e18fd4f82c78858c2882f5b3 | 1,453 | py | Python | lib/spack/spack/test/cache_fetch.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | lib/spack/spack/test/cache_fetch.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | lib/spack/spack/test/cache_fetch.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
from llnl.util.filesystem import mkdirp, touch
import spack.config
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
from spack.stage import Stage
| 35.439024 | 76 | 0.705437 |
a3bac2f51025032288427c9fc39e3497207cc25d | 2,201 | py | Python | temp_range_sql.py | hanhanwu/Hanhan-Spark-Python | a04c33100742acffa2ad11d1937ea05c44688427 | [
"MIT"
] | 45 | 2016-03-18T07:57:53.000Z | 2022-03-20T07:14:15.000Z | temp_range_sql.py | hanhanwu/Hanhan-Spark-Python | a04c33100742acffa2ad11d1937ea05c44688427 | [
"MIT"
] | null | null | null | temp_range_sql.py | hanhanwu/Hanhan-Spark-Python | a04c33100742acffa2ad11d1937ea05c44688427 | [
"MIT"
] | 16 | 2016-07-07T16:47:46.000Z | 2020-05-04T17:38:40.000Z | __author__ = 'hanhanw'
import sys
from pyspark import SparkConf, SparkContext
from pyspark.sql.context import SQLContext
from pyspark.sql.types import StructType, StructField, StringType, DoubleType
conf = SparkConf().setAppName("temp range sql")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
assert sc.version >= '1.5.1'
inputs1 = sys.argv[1]
output = sys.argv[2]
if __name__ == "__main__":
main()
| 31.898551 | 117 | 0.698319 |
a3bafb776906d3ce50f018766ee8f4cea08b123b | 1,059 | py | Python | container/pyf/graphqltypes/Event.py | Pompino/react-components-23KB | 3201a417c5160e1b77f29fc1eac74ae9dc10d6ad | [
"MIT"
] | 2 | 2021-10-30T18:18:33.000Z | 2021-12-01T10:21:28.000Z | container/pyf/graphqltypes/Event.py | Pompino/react-components-23KB | 3201a417c5160e1b77f29fc1eac74ae9dc10d6ad | [
"MIT"
] | null | null | null | container/pyf/graphqltypes/Event.py | Pompino/react-components-23KB | 3201a417c5160e1b77f29fc1eac74ae9dc10d6ad | [
"MIT"
] | null | null | null | from typing_extensions import Required
#from sqlalchemy.sql.sqltypes import Boolean
from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int
from models.EventsRelated.EventModel import EventModel
from graphqltypes.Utils import extractSession
| 32.090909 | 90 | 0.700661 |
a3bca9436abafd191ec47379ebb1db10a4043237 | 11,326 | py | Python | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 3 | 2018-01-29T14:16:02.000Z | 2019-02-05T21:33:05.000Z | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 4 | 2021-03-11T04:02:00.000Z | 2022-03-27T08:31:56.000Z | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2 | 2019-12-05T17:24:36.000Z | 2021-11-22T21:21:32.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.styles.colors import Color, BLACK, WHITE
from openpyxl.utils.units import (
pixels_to_EMU,
EMU_to_pixels,
short_color,
)
from openpyxl.compat import deprecated
from openpyxl.xml.functions import Element, SubElement, tostring
from openpyxl.xml.constants import (
DRAWING_NS,
SHEET_DRAWING_NS,
CHART_NS,
CHART_DRAWING_NS,
PKG_REL_NS
)
from openpyxl.compat.strings import safe_string
| 27.160671 | 113 | 0.607099 |
a3bd2daadf5e4d9e5163b4a0fc7578b8fb655779 | 3,118 | py | Python | scripts/VCF/FILTER/subset_vcf.py | elowy01/igsr_analysis | ffea4885227c2299f886a4f41e70b6e1f6bb43da | [
"Apache-2.0"
] | 3 | 2018-04-20T15:04:34.000Z | 2022-03-30T06:36:02.000Z | scripts/VCF/FILTER/subset_vcf.py | elowy01/igsr_analysis | ffea4885227c2299f886a4f41e70b6e1f6bb43da | [
"Apache-2.0"
] | 7 | 2019-06-06T09:22:20.000Z | 2021-11-23T17:41:52.000Z | scripts/VCF/FILTER/subset_vcf.py | elowy01/igsr_analysis | ffea4885227c2299f886a4f41e70b6e1f6bb43da | [
"Apache-2.0"
] | 5 | 2017-11-02T11:17:35.000Z | 2021-12-11T19:34:09.000Z |
from VcfQC import VcfQC
from ReseqTrackDB import File
from ReseqTrackDB import ReseqTrackDB
import argparse
import os
import logging
import datetime
#get command line arguments
parser = argparse.ArgumentParser(description='Script to subset a VCF by excluding the variants within the regions defined by a BED file')
'''
Reseqtrack DB connection parameters
'''
parser.add_argument('--hostname', type=str, required=True, help='Hostname for ReseqTrack DB' )
parser.add_argument('--username', type=str, required=True, help='User for ReseqTrack DB' )
parser.add_argument('--port', type=int, required=True, help='Port number in the ReseqTrack DB' )
parser.add_argument('--pwd', type=str, help='PWD for the ReseqTrack DB' )
parser.add_argument('--db', type=str, required=True, help='DB name in the ReseqTrack DB' )
parser.add_argument('--type', type=str, required=True, help='Type of the new VCF file' )
parser.add_argument('--vcftools_folder', type=str, required=True, help='Folder containing the VCFtools binary' )
parser.add_argument('--bgzip_folder', type=str, required=True, help='Folder containing the bgzip binary')
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--bed', type=str, required=True, help='BED file containing the coordinates to exclude' )
parser.add_argument('--outsuffix', type=str, required=True, help='Suffix for vcf output file. i.e. no_cms or no_offtarget' )
parser.add_argument('--outdir', type=str, required=True, help='Directory used to put the output files.' )
args = parser.parse_args()
if __name__ == '__main__':
if os.path.isdir(args.outdir) == False:
raise Exception("Output dir does not exist: %s"%args.outdir)
hostname=args.hostname
username=args.username
db=args.db
port=args.port
pwd=args.pwd
reseqdb = ReseqTrackDB(host=hostname,user=username,port=port,pwd=pwd,db=db)
file=reseqdb.fetch_file_by_filename(args.filename)
#constructing the out filename
now = datetime.datetime.now().strftime('%Y%m%d')
bits= os.path.basename(file.name).split('.')
outprefix=bits[0]+"."+bits[1]+"."+args.outsuffix+"."+now
log_filename="subset_vcf_%s.log"% outprefix
logger = logging.getLogger("subset_vcf")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(log_filename)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
logger.info("Program started")
vcfQC = VcfQC(vcf=file.path,bgzip_folder=args.bgzip_folder,vcftools_folder=args.vcftools_folder)
vcffile=vcfQC.subset_vcf(bed=args.bed,outprefix=outprefix,outdir=args.outdir,create_index=True)
f=File(path=vcffile,type=args.type,host_id=1,withdrawn=0)
f.store(reseqdb,do_md5=True)
logger.info("Done!.")
| 41.026316 | 275 | 0.735407 |
a3bea3b575a46a0bd0557e3e985c4141109eee00 | 266 | py | Python | controllers/restart.py | Acidburn0zzz/helloworld | 9d88357658c55dadf9d4c6f923b63e8cb6207f75 | [
"MIT"
] | null | null | null | controllers/restart.py | Acidburn0zzz/helloworld | 9d88357658c55dadf9d4c6f923b63e8cb6207f75 | [
"MIT"
] | null | null | null | controllers/restart.py | Acidburn0zzz/helloworld | 9d88357658c55dadf9d4c6f923b63e8cb6207f75 | [
"MIT"
] | null | null | null | import os
from base import BaseHandler
| 22.166667 | 67 | 0.725564 |
a3bef41781bb732a7cb06f991f90aba75666a0ca | 4,276 | py | Python | nova/tests/unit/conductor/tasks/test_migrate.py | badock/nova-tidb | 4c4591f2cd887fdc22828e12f0c297c051bbd912 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/conductor/tasks/test_migrate.py | badock/nova-tidb | 4c4591f2cd887fdc22828e12f0c297c051bbd912 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/conductor/tasks/test_migrate.py | badock/nova-tidb | 4c4591f2cd887fdc22828e12f0c297c051bbd912 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor.tasks import migrate
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit.conductor.test_conductor import FakeContext
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
| 47.511111 | 79 | 0.667212 |
a3bf6d02c2f4e332e2c37541b89b9a4e5f82ec94 | 97 | py | Python | CH7_GitCmdAndCtrl/modules/environment.py | maxmac12/BlackHatPython | 60044c65ffc2f1216cbf92c2ec850a4e2e9ca5bf | [
"MIT"
] | null | null | null | CH7_GitCmdAndCtrl/modules/environment.py | maxmac12/BlackHatPython | 60044c65ffc2f1216cbf92c2ec850a4e2e9ca5bf | [
"MIT"
] | null | null | null | CH7_GitCmdAndCtrl/modules/environment.py | maxmac12/BlackHatPython | 60044c65ffc2f1216cbf92c2ec850a4e2e9ca5bf | [
"MIT"
] | null | null | null | import os
| 16.166667 | 39 | 0.639175 |
a3c068d2dc2c438793e5de5d6de56af20454dc8f | 507 | py | Python | diskcatalog/core/views.py | rywjhzd/Cataloging-and-Visualizing-Cradles-of-Planet-Formation | 6d59ea9d9a07630721e19c554651bae2775962ac | [
"MIT"
] | null | null | null | diskcatalog/core/views.py | rywjhzd/Cataloging-and-Visualizing-Cradles-of-Planet-Formation | 6d59ea9d9a07630721e19c554651bae2775962ac | [
"MIT"
] | null | null | null | diskcatalog/core/views.py | rywjhzd/Cataloging-and-Visualizing-Cradles-of-Planet-Formation | 6d59ea9d9a07630721e19c554651bae2775962ac | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Disk
import os
#def index(request):
# module_dir = os.path.dirname(__file__)
# file_path = os.path.join(module_dir, 'data.txt')
# disk_list = open(file_path , 'r')
# data = data_file.read()
# context = {'disk_list': data}
# return render(request, 'index.html', context)
| 25.35 | 53 | 0.672584 |
a3c17e6746a0528783d5b0c338fdad4e4910e00a | 1,976 | py | Python | misc/python/materialize/checks/insert_select.py | guswynn/materialize | f433173ed71f511d91311769ec58c2d427dd6c3b | [
"MIT"
] | null | null | null | misc/python/materialize/checks/insert_select.py | guswynn/materialize | f433173ed71f511d91311769ec58c2d427dd6c3b | [
"MIT"
] | 157 | 2021-12-28T19:17:45.000Z | 2022-03-31T17:44:27.000Z | misc/python/materialize/checks/insert_select.py | guswynn/materialize | f433173ed71f511d91311769ec58c2d427dd6c3b | [
"MIT"
] | null | null | null | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from textwrap import dedent
from typing import List
from materialize.checks.actions import Testdrive
from materialize.checks.checks import Check
| 34.666667 | 119 | 0.598684 |
a3c289b2ddb7ec4ef9412f5ae94e7553200e0202 | 4,668 | py | Python | mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py | garlicbutter/Jonathan-Tom | c1696f0a94da46911b3566a3d4f49791e877373f | [
"MIT"
] | 2 | 2021-10-05T04:31:19.000Z | 2021-10-05T04:31:26.000Z | mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py | garlicbutter/Tom-Jonathan | c1696f0a94da46911b3566a3d4f49791e877373f | [
"MIT"
] | null | null | null | mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py | garlicbutter/Tom-Jonathan | c1696f0a94da46911b3566a3d4f49791e877373f | [
"MIT"
] | null | null | null | import numpy as np
import mujoco_py as mj
from mujoco_py_renderer import SimulationError, XMLError, MujocoPyRenderer
from mujoco_py import (MjSim, load_model_from_xml,functions,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from matplotlib import pyplot as plt
import time
xml = """
<mujoco model="example">
<compiler coordinate="global"/>
<default>
<geom rgba=".8 .6 .4 1"/>
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="1 1 1" rgb2=".6 .8 1"
width="256" height="256"/>
</asset>
<worldbody>
<light pos="0 1 1" dir="0 -1 -1" diffuse="1 1 1"/>
<geom name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="10 10 10" type="plane"/>
<body>
<site name="world" size="0.1" pos="0 0 0" />
<geom name="first_pole" type="capsule" fromto="0 0 0 0 0 0.5" size="0.04"/>
<joint name='a' type="hinge" pos="0 0 0" axis="0 0 1" />
<body name="second_pole">
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0 0 0.5 0.5 0 0.5" size="0.04" name="second_pole"/>
<joint name='b' type="hinge" pos="0 0 0.5" axis="0 1 0"/>
<body name='third_pole'>
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0.5 0 0.5 1 0 0.5" size="0.04" name="third_pole"/>
<joint name='c' type="hinge" pos="0.5 0 0.5" axis="0 1 0"/>
<site name="target" size="0.1" pos="1 0 0.5" />
<body name="mass">
<inertial pos="1 0 0.5" mass="1e-2" diaginertia="1e-008 1e-008 1e-008" />
<geom type="sphere" pos="1 0 0.5" size="0.2" name="mass"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
<motor joint="c"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
viewer = MujocoPyRenderer(sim)
sim.reset()
# After reset jacobians are all zeros
sim.forward()
target_jacp = np.zeros(3 * sim.model.nv)
target_jacr= np.zeros(3 * sim.model.nv)
F=np.array([0,0,-9.81*1e-2,0,0,0]).T
#np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
#sim.forward()
K_diag=2000
C_diag=100
A_diag=1e-3
K=np.identity(3)*K_diag
C=np.identity(3)*C_diag
A=np.identity(3)*A_diag
#K_diag=0.3
#C_diag=0.05
for i in range(3):
K[i, i]=K_diag
C[i,i]=C_diag
A[i, i] = A_diag
x_intial=sim.data.site_xpos[1]
print(x_intial)
x_desired=np.array([0,1,0.3])
v_intial=sim.data.site_xvelp[1]
v_desired=np.array([0,0,0])
a_desired=np.array([0,0,0])
a_intial=np.array([0,0,0])
dt=sim.model.opt.timestep
#sim.data.get_site_jacp('target', jacp=target_jacp)
# Should be unchanged after steps (zero action)
graph=[]
for _ in range(100000):
F[:3]=np.dot(K,x_desired-x_intial)+np.dot(C,v_desired-v_intial)+np.dot(A,a_desired-a_intial)
H = np.zeros(sim.model.nv* sim.model.nv)
functions.mj_fullM(sim.model, H, sim.data.qM)
sim.data.get_site_jacp('target', jacp=target_jacp)
sim.data.get_site_jacr('target', jacr=target_jacr)
J_L = target_jacp.reshape((3, sim.model.nv))
J_A = target_jacr.reshape((3, sim.model.nv))
J = np.concatenate((J_L, J_A), axis=0)
H_L =np.dot(np.linalg.pinv(J_L.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J_L)))
H_all=np.dot(np.linalg.pinv(J.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J)))
#F_a=np.dot(A,0.3-sim.data.qacc)
#action = np.dot(J_L.T, np.dot(H_L, F[:3]))+sim.data.qfrc_bias
action = sim.data.qfrc_bias+np.dot(H.reshape(3,3),np.dot(J_L.T,F[:3]))
#print(action)
#action = np.dot(J.T, F)
sim.data.ctrl[:] = action
sim.step()
sim.forward()
#print(np.max(action))
#print(sim.data.qacc)
viewer.render()
x_intial = sim.data.site_xpos[1]
a_intial=(v_intial-sim.data.site_xvelp[1])/dt
print(a_intial)
v_intial = sim.data.site_xvelp[1]
normal=np.linalg.norm(x_intial-x_desired)
#print(normal)
if normal<0.1:
print("in")
if x_desired[0]==0:
x_desired = np.array([-1, 0, 0.5])
elif x_desired[0]==1:
x_desired = np.array([0, 1, 0.3])
elif x_desired[0] == -1:
x_desired = np.array([1, 0, 0.5])
graph.append(np.abs(x_intial-x_desired))
# sim.forward()
print("the desired is {} and the intial is{}".format(x_desired,x_intial))
plt.plot(graph)
plt.show() | 29.923077 | 105 | 0.610111 |
a3c2ca7e8eeb8a5b7daf690508f0da4c87ebd47d | 3,323 | py | Python | evaluation/wordpress/pull_docker_images_from_private_registry.py | seveirbian/gear-old | 8d3529a9bf42e652a9d7475c9d14e9a6afc69a76 | [
"Apache-2.0"
] | null | null | null | evaluation/wordpress/pull_docker_images_from_private_registry.py | seveirbian/gear-old | 8d3529a9bf42e652a9d7475c9d14e9a6afc69a76 | [
"Apache-2.0"
] | null | null | null | evaluation/wordpress/pull_docker_images_from_private_registry.py | seveirbian/gear-old | 8d3529a9bf42e652a9d7475c9d14e9a6afc69a76 | [
"Apache-2.0"
] | null | null | null | import sys
# package need to be installed, pip install docker
import docker
import time
import yaml
import os
import xlwt
auto = False
private_registry = "202.114.10.146:9999/"
# result
result = [["tag", "finishTime", "size", "data"], ]
def get_net_data():
netCard = "/proc/net/dev"
fd = open(netCard, "r")
for line in fd.readlines():
if line.find("enp0s3") >= 0:
field = line.split()
data = float(field[1]) / 1024.0 / 1024.0
fd.close()
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
auto = True
generator = Generator(os.path.split(os.path.realpath(__file__))[0]+"/image_versions.yaml")
images = generator.generateFromProfile()
puller = Puller(images)
puller.pull()
# create a workbook sheet
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("run_time")
for row in range(len(result)):
for column in range(len(result[row])):
sheet.write(row, column, result[row][column])
workbook.save(os.path.split(os.path.realpath(__file__))[0]+"/pull.xls") | 27.46281 | 101 | 0.550707 |
a3c4634520b2ba72e01bed684e08b442a5657f9b | 385 | py | Python | jiminy/envs/vnc_wog.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | 3 | 2020-03-16T13:50:40.000Z | 2021-06-09T05:26:13.000Z | jiminy/envs/vnc_wog.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | jiminy/envs/vnc_wog.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | from jiminy.envs import vnc_env
from jiminy.spaces import VNCActionSpace
| 35 | 75 | 0.703896 |
a3c726cfaf4ab3b53d1df8bd6d6c24aef693e3ab | 5,066 | py | Python | fedml_api/standalone/federated_sgan/fedssgan_api.py | arj119/FedML | 5b7c098659f3e61f9e44583965300d8d0829f7a8 | [
"Apache-2.0"
] | null | null | null | fedml_api/standalone/federated_sgan/fedssgan_api.py | arj119/FedML | 5b7c098659f3e61f9e44583965300d8d0829f7a8 | [
"Apache-2.0"
] | null | null | null | fedml_api/standalone/federated_sgan/fedssgan_api.py | arj119/FedML | 5b7c098659f3e61f9e44583965300d8d0829f7a8 | [
"Apache-2.0"
] | null | null | null | import copy
import logging
import random
from typing import List, Tuple
import numpy as np
import torch
import wandb
from torch.utils.data import ConcatDataset
from fedml_api.standalone.fedavg.my_model_trainer import MyModelTrainer
from fedml_api.standalone.federated_sgan.ac_gan_model_trainer import ACGANModelTrainer
from fedml_api.standalone.federated_sgan.client import FedSSGANClient
from fedml_api.standalone.federated_sgan.model_trainer import FedSSGANModelTrainer
from fedml_api.standalone.utils.HeterogeneousModelBaseTrainerAPI import HeterogeneousModelBaseTrainerAPI
| 44.831858 | 129 | 0.627319 |
a3c78b4ed55d10de069695bce6f3d899ee02cc99 | 20,932 | py | Python | pytorch-word2vec-master/csv.py | arjun-sai-krishnan/tamil-morpho-embeddings | a33bcb427d635dba3b1857f26ea7ab287e1a44c5 | [
"MIT"
] | 2 | 2021-04-11T18:25:16.000Z | 2022-03-16T03:48:52.000Z | pytorch-word2vec-master/csv.py | arjun-sai-krishnan/tamil-morpho-embeddings | a33bcb427d635dba3b1857f26ea7ab287e1a44c5 | [
"MIT"
] | null | null | null | pytorch-word2vec-master/csv.py | arjun-sai-krishnan/tamil-morpho-embeddings | a33bcb427d635dba3b1857f26ea7ab287e1a44c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
from collections import Counter
import pdb
import pickle
import re
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import data_producer
from multiprocessing import set_start_method
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="", help="training file")
parser.add_argument("--vocab", type=str, default="", help="vocab pickle file")
parser.add_argument("--save", type=str, default="csv.pth.tar", help="saved model filename")
parser.add_argument("--size", type=int, default=300, help="word embedding dimension")
parser.add_argument("--window", type=int, default=5, help="context window size")
parser.add_argument("--sample", type=float, default=1e-5, help="subsample threshold")
parser.add_argument("--negative", type=int, default=10, help="number of negative samples")
parser.add_argument("--delta", type=float, default=0.15, help="create new sense for a type if similarity lower than this value.")
parser.add_argument("--min_count", type=int, default=5, help="minimum frequency of a word")
parser.add_argument("--processes", type=int, default=4, help="number of processes")
parser.add_argument("--num_workers", type=int, default=6, help="number of workers for data processsing")
parser.add_argument("--iter", type=int, default=3, help="number of iterations")
parser.add_argument("--lr", type=float, default=-1.0, help="initial learning rate")
parser.add_argument("--batch_size", type=int, default=100, help="(max) batch size")
parser.add_argument("--cuda", action='store_true', default=False, help="enable cuda")
parser.add_argument("--multi_proto", action='store_true', default=False, help="True: multi-prototype, False:single-prototype")
MAX_SENT_LEN = 1000
# Build the vocabulary.
# Initialize model.
def init_net(args):
if args.lr == -1.0:
vars(args)['lr'] = 0.05
return CSV(args)
def save_model(filename, model, args, word2idx):
torch.save({
'word2idx':word2idx,
'args':args,
#'word2sense': model.word2sense,
'n_senses': model.n_senses,
'params': model.state_dict()
}, filename)
def load_model(filename):
checkpoint = torch.load(filename)
word2idx = checkpoint['word2idx']
args = checkpoint['args']
model = CSV(args)
if args.cuda:
model.cuda()
model.global_embs.weight.data = checkpoint['params']['global_embs.weight']
model.sense_embs.weight.data = checkpoint['params']['sense_embs.weight']
model.ctx_weight.data = checkpoint['params']['ctx_weight']
model.word2sense = checkpoint['word2sense']
#model.word2sense.data = checkpoint['params']['word2sense']
#model.word_sense_cnts.data = checkpoint['params']['word_sense_cnts']
model.n_senses = checkpoint['n_senses']
return model, word2idx
# Training
if __name__ == '__main__':
set_start_method('forkserver')
args = parser.parse_args()
print("Starting training using file %s" % args.train)
train_file = open(args.train)
train_file.seek(0, 2)
vars(args)['file_size'] = train_file.tell()
word_count_actual = mp.Value('L', 0)
if args.vocab == '':
word2idx, word_list, freq = build_vocab(args)
else:
with open(args.vocab, 'rb') as f:
word2idx, word_list, freq, pos2idx, dep2id = pickle.load(f)
word_count = sum([freq[k] for k in freq])
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
model = init_net(args)
model.share_memory()
if args.cuda:
model.cuda()
# stage 1, learn robust context representation.
vars(args)['stage'] = 1
print("Stage 1")
vars(args)['lr_anneal'] = True
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
del processes
print("\nStage 1, ", time.monotonic() - args.t_start, " secs ", word_count_actual.value)
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage1.pth.tar'
save_model(filename, model, args, word2idx)
if args.multi_proto:
# stage 2, create new sense in a non-parametric way.
# Freeze model paramters except sense_embs, and use only 1 process to prevent race condition
old_batch_size = vars(args)['batch_size']
model.global_embs.requires_grad = False
model.ctx_weight.requires_grad = False
model.sense_embs = model.sense_embs.cpu()
vars(args)['stage'] = 2
vars(args)['batch_size'] = 5000
print("\nStage 2")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
train_process_stage2(0, word_count_actual, word2idx, word_list, freq, args, model)
if args.cuda:
model.cuda()
print("\nStage 2, ", time.monotonic() - args.t_start, " secs")
print("Current # of senses: %d" % model.n_senses)
pdb.set_trace()
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage2.pth.tar'
save_model(filename, model, args, word2idx)
# stage 3, no more sense creation.
vars(args)['lr'] = args.lr * 0.01
vars(args)['batch_size'] = old_batch_size
model.global_embs.requires_grad = True
model.ctx_weight.requires_grad = True
vars(args)['stage'] = 3
print("\nBegin stage 3")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
print("\nStage 3, ", time.monotonic() - args.t_start, " secs")
# save model
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage3.pth.tar'
save_model(filename, model, args, word2idx)
print("")
| 40.487427 | 250 | 0.591821 |
a3c8721ad82d9b0c4f4bbb5e4ea027824401f22d | 339 | py | Python | Ogrenciler/Varol/buyuksayi.py | ProEgitim/Python-Dersleri-BEM | b25e9fdb1fa3026925a46b2fcbcba348726b775c | [
"MIT"
] | 1 | 2021-04-18T17:35:22.000Z | 2021-04-18T17:35:22.000Z | Ogrenciler/Varol/buyuksayi.py | waroi/Python-Dersleri-BEM | b25e9fdb1fa3026925a46b2fcbcba348726b775c | [
"MIT"
] | null | null | null | Ogrenciler/Varol/buyuksayi.py | waroi/Python-Dersleri-BEM | b25e9fdb1fa3026925a46b2fcbcba348726b775c | [
"MIT"
] | 2 | 2021-04-18T18:22:26.000Z | 2021-04-24T17:16:19.000Z | sayi1 = int(input("1. Say: "))
sayi2 = int(input("2. Say: "))
sayi3 = int(input("3. Say: "))
sayi4 = int(input("4. Say: "))
sayi5 = int(input("5. Say: "))
sayilar=[];
sayilar.append(sayi1)
sayilar.append(sayi2)
sayilar.append(sayi3)
sayilar.append(sayi4)
sayilar.append(sayi5)
sayilar.sort()
print("En byk sayimiz..",sayilar[-1])
| 21.1875 | 39 | 0.663717 |
a3c959da81854ccd184aefdeb715f7df8413b8b8 | 8,899 | py | Python | baselines/deepq/build_graph_mfec.py | MouseHu/emdqn | ba907e959f21dd0b5a17117accccae9c82a79a3b | [
"MIT"
] | null | null | null | baselines/deepq/build_graph_mfec.py | MouseHu/emdqn | ba907e959f21dd0b5a17117accccae9c82a79a3b | [
"MIT"
] | null | null | null | baselines/deepq/build_graph_mfec.py | MouseHu/emdqn | ba907e959f21dd0b5a17117accccae9c82a79a3b | [
"MIT"
] | 1 | 2021-04-26T13:55:47.000Z | 2021-04-26T13:55:47.000Z | """Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative not update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
import numpy as np
def build_train_mf(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, scope="mfec",
alpha=1.0, beta=1.0, theta=1.0, latent_dim=32, ib=True, reuse=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
act_noise = tf.placeholder(tf.float32, [None, latent_dim], name="act_noise")
act_f = build_act_mf(make_obs_ph, q_func, act_noise, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
# EMDQN
obs_vae_input = U.ensure_tf_input(make_obs_ph("obs_vae"))
z_noise_vae = tf.placeholder(tf.float32, [None, latent_dim], name="z_noise_vae")
inputs = [obs_vae_input,z_noise_vae]
if ib:
qec_input = tf.placeholder(tf.float32, [None], name='qec')
inputs.append(qec_input)
outputs = []
q_vae, q_deterministic_vae, v_mean_vae, v_logvar_vae, z_mean_vae, z_logvar_vae, recon_obs = q_func(obs_vae_input.get(),
z_noise_vae, num_actions,
scope="q_func",
reuse=True)
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
encoder_loss = -1 + z_mean_vae ** 2 + tf.exp(z_logvar_vae) - z_logvar_vae
total_loss = tf.reduce_mean(beta * encoder_loss)
decoder_loss = tf.keras.losses.binary_crossentropy(tf.reshape(recon_obs, [-1]), tf.reshape(
tf.dtypes.cast(obs_vae_input._placeholder, tf.float32), [-1]))
print("here", z_mean_vae.shape, z_logvar_vae.shape, encoder_loss.shape, decoder_loss.shape)
vae_loss = beta * encoder_loss + theta * decoder_loss
outputs.append(encoder_loss)
outputs.append(decoder_loss)
outputs.append(vae_loss)
total_loss += tf.reduce_mean(theta * decoder_loss)
if ib:
ib_loss = (v_mean_vae - tf.stop_gradient(tf.expand_dims(qec_input, 1))) ** 2 / tf.exp(
v_logvar_vae) + v_logvar_vae
print("here2", v_mean_vae.shape, tf.expand_dims(qec_input, 1).shape, v_logvar_vae.shape, ib_loss.shape)
total_ib_loss = alpha * ib_loss + beta * encoder_loss
outputs.append(total_ib_loss)
total_loss += tf.reduce_mean(alpha * ib_loss)
if grad_norm_clipping is not None:
optimize_expr = U.minimize_and_clip(optimizer,
total_loss,
var_list=q_func_vars,
clip_val=grad_norm_clipping)
else:
optimize_expr = optimizer.minimize(total_loss, var_list=q_func_vars)
# Create callable functions
# EMDQN
total_loss_summary = tf.summary.scalar("total loss", total_loss)
z_var_summary = tf.summary.scalar("z_var", tf.reduce_mean(tf.exp(z_logvar_vae)))
encoder_loss_summary = tf.summary.scalar("encoder loss", tf.reduce_mean(encoder_loss))
decoder_loss_summary = tf.summary.scalar("decoder loss", tf.reduce_mean(decoder_loss))
summaries = [total_loss_summary, z_var_summary, encoder_loss_summary, decoder_loss_summary]
if ib:
ib_loss_summary = tf.summary.scalar("ib loss", tf.reduce_mean(ib_loss))
total_ib_loss_summary = tf.summary.scalar("total ib loss", tf.reduce_mean(total_ib_loss))
summaries.append(ib_loss_summary)
summaries.append(total_ib_loss_summary)
summary = tf.summary.merge(summaries)
outputs.append(summary)
train = U.function(
inputs=inputs,
outputs=[total_loss,summary],
updates=[optimize_expr]
)
return act_f, train
| 42.37619 | 127 | 0.618047 |
a3c978469e28670107c4646aa77b54f6269dda05 | 2,244 | py | Python | tests/test_prior.py | frodre/LMR | 4c00d3f9db96447e69bd3f426d59524f7b5f3ef5 | [
"BSD-3-Clause"
] | 17 | 2018-08-27T18:50:36.000Z | 2021-03-17T22:48:55.000Z | tests/test_prior.py | mingsongli/LMR | 4c00d3f9db96447e69bd3f426d59524f7b5f3ef5 | [
"BSD-3-Clause"
] | 5 | 2018-10-15T22:13:27.000Z | 2019-04-26T11:45:58.000Z | tests/test_prior.py | mingsongli/LMR | 4c00d3f9db96447e69bd3f426d59524f7b5f3ef5 | [
"BSD-3-Clause"
] | 11 | 2018-10-11T19:35:34.000Z | 2021-08-17T12:08:11.000Z | import sys
sys.path.append('../')
import LMR_config as cfg
import LMR_prior
import numpy as np
import pytest
| 24.933333 | 60 | 0.685829 |
a3cadf1c1469dc28d63f965c32ff3b98b7eb9d52 | 8,719 | py | Python | src/salgan_dhf1k/train_bce.py | juanjo3ns/SalGAN2 | ac52af743b94961cdb44c5d89774b72fc8acfd3e | [
"MIT"
] | null | null | null | src/salgan_dhf1k/train_bce.py | juanjo3ns/SalGAN2 | ac52af743b94961cdb44c5d89774b72fc8acfd3e | [
"MIT"
] | null | null | null | src/salgan_dhf1k/train_bce.py | juanjo3ns/SalGAN2 | ac52af743b94961cdb44c5d89774b72fc8acfd3e | [
"MIT"
] | null | null | null | import os
from dataloader.datasetDHF1K import DHF1K
from torch.utils.data import DataLoader
from utils.salgan_utils import save_model, get_lr_optimizer
from utils.sendTelegram import send
from utils.printer import param_print
from utils.salgan_generator import create_model, add_bn
from evaluation.fast_evaluation import compute_metrics
import numpy as np
import torch
from torch.nn import AvgPool2d
from torch.nn.modules.loss import BCELoss
import torch.backends.cudnn as cudnn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from time import time
from IPython import embed
from tensorboard_logger import configure, log_value, log_histogram
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--path_out", default='sal_dhf1k_adamdepthcoordaugm2_frombestsaldepth',
type=str,
help="""set output path for the trained model""")
parser.add_argument("--batch_size", default=12,
type=int,
help="""Set batch size""")
parser.add_argument("--n_epochs", default=10, type=int,
help="""Set total number of epochs""")
parser.add_argument("--depth", default=False, type=bool,
help="""Enable 4th channel with depth""")
parser.add_argument("--augment", default=False, type=bool,
help="""Enable data augmentation""")
parser.add_argument("--coord", default=False, type=bool,
help="""Enable coordconv""")
parser.add_argument("--flow", default=False, type=bool,
help="""Enable opticalflow""")
parser.add_argument("--lr", type=float, default=0.00001,
help="""Learning rate for training""")
parser.add_argument("--patience", type=int, default=3,
help="""Patience for learning rate scheduler (default 10)""")
args = parser.parse_args()
# set output path ==========================================================
path_out = '../trained_models/batch12_/' + args.path_out
if not os.path.exists(path_out):
# create output path
os.makedirs(path_out)
# create output for models
path_models = os.path.join(path_out, 'models')
if not os.path.exists(path_models):
os.makedirs(path_models)
# tensorboard
configure("{}".format(path_out), flush_secs=5)
# data =====================================================================
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = args.lr
DEPTH = args.depth
AUGMENT = args.augment
COORD = args.coord
FLOW = args.flow
# Datasets for DHF1K
ds_train = DHF1K(mode=TRAIN, transformation=True, depth=DEPTH, d_augm=AUGMENT, coord=COORD)
ds_validate = DHF1K(mode=VAL, transformation=False, depth=DEPTH, d_augm=False, coord=COORD)
# Dataloaders
dataloader = {
TRAIN: DataLoader(ds_train, batch_size=batch_size,
shuffle=True, num_workers=2),
VAL: DataLoader(ds_validate, batch_size=batch_size,
shuffle=False, num_workers=2)
}
# POSSIBILITY OF CHOOSING GPU
torch.cuda.set_device(1)
# MODEL INITIALIZATION
print("Init model...")
vgg_weights = torch.load('../trained_models/salgan_baseline.pt')['state_dict']
model = create_model(3)
# if DEPTH and COORD:
# model = create_model(6)
# for i in range(0,3):
# vgg_weights = add_layer_weights(vgg_weights)
# elif DEPTH:
# model = create_model(4)
# add_layer_weights(vgg_weights)
# elif COORD:
# model = create_model(5)
# for i in range(0,2):
# vgg_weights = add_layer_weights(vgg_weights)
# else: model = create_model(3)
# Instead of adding manually the layer of new weights, we could use strict=False
model.load_state_dict(vgg_weights)
# Add batch normalization to current model if needed
model = add_bn(model)
model.train()
model.cuda()
cudnn.benchmark = True
# NOT WORKING UNMOUNTED DISK
# If we have the two GPU's available we are going to use both
# if torch.cuda.device_count() > 1:
# print("Using ", torch.cuda.device_count(), "GPUs!")
# model = torch.nn.DataParallel(model)
# LOSS FUNCTION
bce_loss = BCELoss()
# FINE-TUNE WHOLE NETWORK OR JUST DECODER => uncomment / or different lr for each part
# decoder_parameters = []
# base_params = []
# for i, (a, p) in enumerate(model.named_parameters()):
# embed()
# if i>25:
# # print(i, a, p.shape)
# decoder_parameters.append(p)
# else:
# base_params.append(p)
# If you wanna train just the decoder put this
# p.requires_grad = False
# ADAM OPTIMIZER
optimizer = Adam(model.parameters(),
lr = lr,
weight_decay=0.000001)
# STOCHASTIC GRADIENT DESCENT OPTIMIZER
# optimizer = SGD(model.parameters(),
# lr = 0.00001,
# momentum=0.9,
# weight_decay=0.00001,
# nesterov=True)
# NUMBER OF TOTAL PARAMETERS
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# NUMBER OF TRAINABLE PARAMETERS
trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Trainable parameters: ", trainable_parameters)
send("Trainable parameters: " + str(trainable_parameters))
send("Experiment: " + args.path_out)
# PRINT TABLE OF PARAMETERS
param_print([path_out,"",DEPTH,AUGMENT,COORD,FLOW,batch_size,lr,n_epochs, trainable_parameters])
# set learning rate scheduler
# ReduceLROnPlateau(
# optimizer,
# mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari,
# factor (float) factor de reduccio de la lr,
# patience (int) num epochs sense millora a partir dels quals es redueix lr,
# verbose (bool),
# )
# scheduler = ReduceLROnPlateau(optimizer,
# 'min',
# patience=args.patience,
# verbose=True)
scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
best_loss=9999999
# main loop training =======================================================
for id_epoch in range(n_epochs):
for mode in [VAL, TRAIN]:
# select dataloader
data_iterator = dataloader[mode]
#
# # saliency metrics
# if mode ==VAL:
# print("Evaluating metrics....")
# # only do 100 images from validation
# metrics = compute_metrics(model, 100, DEPTH, COORD)
#
# # log metric values
# for metric in metrics.keys():
# log_value("Metrics/{}".format(metric),
# metrics[metric], id_epoch)
#
# # get epoch loss
# print("--> {} epoch {}".format(mode, id_epoch))
epoch_loss = train_eval(mode, model, optimizer, dataloader)
lr = list(get_lr_optimizer(optimizer))[0]
print("-----------")
print("Done! {} epoch {} loss {} lr {}".format(mode, id_epoch, epoch_loss, lr))
send("{} epoch {}/{} loss {}".format(mode, id_epoch, n_epochs, epoch_loss))
print("\n")
# record loss
log_value("loss/{}".format(mode), epoch_loss, id_epoch)
log_value("lr/{}".format(mode), lr, id_epoch)
# for v in model.state_dict():
# log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch)
if (id_epoch%2)==0:
save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch))
# store model if val loss improves
if mode==VAL:
if best_loss > epoch_loss:
# update loss
best_loss = epoch_loss
save_model(model, optimizer, id_epoch, path_out, name_model='best')
# scheduler.step(epoch_loss)
scheduler.step()
| 31.139286 | 112 | 0.686661 |
a3cae716974e2bebe27ab17e3253013ab6b42f7b | 782 | py | Python | dragontail/content/models/basicpage.py | tracon/dragontail | aae860acb5fe400015557f659b6d4221b939747a | [
"MIT"
] | null | null | null | dragontail/content/models/basicpage.py | tracon/dragontail | aae860acb5fe400015557f659b6d4221b939747a | [
"MIT"
] | null | null | null | dragontail/content/models/basicpage.py | tracon/dragontail | aae860acb5fe400015557f659b6d4221b939747a | [
"MIT"
] | null | null | null | # encoding: utf-8
from django.db import models
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore import blocks
from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.wagtailimages.blocks import ImageChooserBlock
| 28.962963 | 75 | 0.742967 |