hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
670452b80f4a4923b00927b3d39770582eb6ad21
| 908
|
py
|
Python
|
py/get_balance.py
|
parinya-ekparinya/ethereump-testbed-setup
|
06fa2a95b6cfb076ac05c71b128ec91150a3fafa
|
[
"Apache-2.0"
] | null | null | null |
py/get_balance.py
|
parinya-ekparinya/ethereump-testbed-setup
|
06fa2a95b6cfb076ac05c71b128ec91150a3fafa
|
[
"Apache-2.0"
] | null | null | null |
py/get_balance.py
|
parinya-ekparinya/ethereump-testbed-setup
|
06fa2a95b6cfb076ac05c71b128ec91150a3fafa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import argparse
from ethjsonrpc import EthJsonRpc
ETH_PORT = 8545
def parseRpcAddr(rpcaddr):
if rpcaddr.find(":") != -1:
s = rpcaddr.split(":")
netaddr = s[0]
port = int(s[1])
else:
netaddr = rpcaddr
port = ETH_PORT
return (netaddr, port)
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(description='.')
parser.add_argument("rpcaddr", help="RPC address of an ethereum node", default="127.0.0.1:8545")
parser.add_argument("-w", "--wallet", help="etherbase/wallet address", default="0")
args = parser.parse_args()
netaddr, port = parseRpcAddr(args.rpcaddr)
eth_client = EthJsonRpc(netaddr, port)
if args.wallet == "0":
wallet = eth_client.eth_coinbase()
else:
wallet = args.wallet
balance = eth_client.eth_getBalance(wallet)
print balance
| 25.942857
| 100
| 0.637665
|
c540cde3cfe856afc6dcc6e35952cde06bf1eb1a
| 1,902
|
py
|
Python
|
lib/common.py
|
HumanCellAtlas/metadata-migration
|
a6127195be420f55a3b5321cc68434fc55ed0487
|
[
"MIT"
] | null | null | null |
lib/common.py
|
HumanCellAtlas/metadata-migration
|
a6127195be420f55a3b5321cc68434fc55ed0487
|
[
"MIT"
] | null | null | null |
lib/common.py
|
HumanCellAtlas/metadata-migration
|
a6127195be420f55a3b5321cc68434fc55ed0487
|
[
"MIT"
] | null | null | null |
from enum import Enum
class MigrationDirection(Enum):
DOWNGRADE = 1
UPGRADE = 2
class Strategy(Enum):
EXACT = 1
BEST_EFFORT = 2
class SchemaRef:
def __init__(self, url_string):
self.url_string = url_string
self.base_url, self.high_level_entity, self.domain_entity, self.version, self.module = url_string.rsplit('/', 4)
def __eq__(self, other):
return self.url_string == other.url_string
def __ne__(self, other):
return self.url_string != other.url_string
def __gt__(self, other):
self._validate(other)
return compare_versions(other.version, self.version) > 0
def __ge__(self, other):
self._validate(other)
return compare_versions(other.version, self.version) >= 0
def __lt__(self, other):
self._validate(other)
return compare_versions(other.version, self.version) < 0
def __le__(self, other):
self._validate(other)
return compare_versions(other.version, self.version) <= 0
def _validate(self, other):
if not all([
self.base_url == other.base_url,
self.high_level_entity == other.high_level_entity,
self.domain_entity == other.domain_entity,
self.module == other.module
]):
raise Exception('These versions are non-comparable!')
def compare_versions(ver_a, ver_b):
ver_a_ints = [int(i) for i in ver_a.split('.')]
ver_b_ints = [int(i) for i in ver_b.split('.')]
assert (len(ver_a_ints) == len(ver_b_ints))
for a_int, b_int in zip(ver_a_ints, ver_b_ints):
diff = b_int - a_int
if diff != 0:
return diff
return 0
def migration_direction(ver_a, ver_b):
i = compare_versions(ver_a, ver_b)
if i < 0:
return MigrationDirection.DOWNGRADE
if i > 0:
return MigrationDirection.UPGRADE
return None
| 27.171429
| 120
| 0.639853
|
0c73d4a2f8a1d01a1879c3a39b489540703b7bfe
| 7,899
|
py
|
Python
|
backend/mobileapp_33158/settings.py
|
crowdbotics-apps/mobileapp-33158
|
bd10747bb3f5545f2d5283a8ff0aac348471621d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/mobileapp_33158/settings.py
|
crowdbotics-apps/mobileapp-33158
|
bd10747bb3f5545f2d5283a8ff0aac348471621d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/mobileapp_33158/settings.py
|
crowdbotics-apps/mobileapp-33158
|
bd10747bb3f5545f2d5283a8ff0aac348471621d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for mobileapp_33158 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobileapp_33158.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobileapp_33158.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.380769
| 112
| 0.736802
|
0b7d7ccae6416758d9a75177fddffa57c9d9de2d
| 1,103
|
py
|
Python
|
temp/demo.py
|
Rabmelon/tiSPHi
|
8ffb0e505edd01cb31cb049bfe54f1f2b99cf121
|
[
"MIT"
] | 5
|
2022-01-03T12:14:34.000Z
|
2022-02-11T01:22:52.000Z
|
temp/demo.py
|
Rabmelon/taichiCourse01_tiSPHi
|
8ffb0e505edd01cb31cb049bfe54f1f2b99cf121
|
[
"MIT"
] | null | null | null |
temp/demo.py
|
Rabmelon/taichiCourse01_tiSPHi
|
8ffb0e505edd01cb31cb049bfe54f1f2b99cf121
|
[
"MIT"
] | null | null | null |
import taichi as ti
import numpy as np
from particle_system import *
from wcsph import *
# ti.init(arch=ti.cpu)
# Use GPU for higher peformance if available
ti.init(arch=ti.gpu, device_memory_GB=3, packed=True)
if __name__ == "__main__":
ps = ParticleSystem((512, 512))
ps.add_cube(lower_corner=[6, 2],
cube_size=[3.0, 5.0],
velocity=[-5.0, -10.0],
density=1000.0,
color=0x956333,
material=1)
ps.add_cube(lower_corner=[3, 1],
cube_size=[2.0, 6.0],
velocity=[0.0, -20.0],
density=1000.0,
color=0x956333,
material=1)
wcsph_solver = WCSPHSolver(ps)
gui = ti.GUI(background_color=0xFFFFFF)
while gui.running:
for i in range(5):
wcsph_solver.step()
particle_info = ps.dump()
gui.circles(particle_info['position'] * ps.screen_to_world_ratio / 512,
radius=ps.particle_radius / 1.5 * ps.screen_to_world_ratio,
color=0x956333)
gui.show()
| 29.026316
| 79
| 0.558477
|
9285cc3aec32c764f0a2c1f0c4c324ee9522c8bd
| 990
|
py
|
Python
|
string_module/String_Concatenation.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | 1
|
2019-02-24T09:57:16.000Z
|
2019-02-24T09:57:16.000Z
|
string_module/String_Concatenation.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | null | null | null |
string_module/String_Concatenation.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | 4
|
2017-05-21T15:34:53.000Z
|
2018-09-25T06:56:15.000Z
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: String Concatenation Version: 1.0
# Date: 27-12-16 Language: python3
# Description: String Concatenation strings
#
###############################################################
names = ['kenny', 'sofie', 'jill', 'samantha']
# for name in names:
# print('hello there,' +name)
# print(' '.join(['hello there ,', name]))
print(', '.join(names))
who = 'gary'
how_many = 12
# onderstaande is hoe het vroeger was in python2
print(who, 'bought', how_many, 'apples today')
# onderstaande is de juiste manier in python3 nu
print('{} bought {} apples today!' .format(who, how_many))
# als je toch een order wil toevoegen
print('{0} bought {1} apples today!' .format(who, how_many))
print('{1} bought {0} apples today!' .format(who, how_many))
print('##########')
| 25.384615
| 63
| 0.525253
|
53a3fa7c9df8d69bc6dca59e5791aa6f806dd678
| 22,102
|
py
|
Python
|
beartype_test/a00_unit/data/data_pep563.py
|
jonathanmorley/beartype
|
0d1207210220807d5c5848033d13657afa307983
|
[
"MIT"
] | null | null | null |
beartype_test/a00_unit/data/data_pep563.py
|
jonathanmorley/beartype
|
0d1207210220807d5c5848033d13657afa307983
|
[
"MIT"
] | null | null | null |
beartype_test/a00_unit/data/data_pep563.py
|
jonathanmorley/beartype
|
0d1207210220807d5c5848033d13657afa307983
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype** `PEP 563`_ **data submodule.**
This submodule exercises `PEP 563`_ support implemented in the
:func:`beartype.beartype` decorator by enabling this support with a leading
``from __future__ import annotations`` statement and then declaring a callable
decorated by that decorator. External unit tests are expected to conditionally
import this submodule if the active Python interpreter targets at least
Python 3.7.0 and then call that callable.
Caveats
----------
**This submodule requires the active Python interpreter to target at least
Python 3.7.0.** If this is *not* the case, importing this submodule raises an
:class:`AttributeError` exception.
.. _PEP 563:
https://www.python.org/dev/peps/pep-0563
'''
# ....................{ IMPORTS }....................
from __future__ import annotations
from beartype import beartype
from beartype._cave._cavefast import IntType
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
from beartype_test.a00_unit.data.data_type import decorator
from collections.abc import Callable
from typing import List, Union
# ....................{ HINTS }....................
ListStrType = list[str] if IS_PYTHON_AT_LEAST_3_9 else List[str]
'''
PEP-compliant type hint matching a list of strings.
This hint is globalized to avoid `PEP 585`_-specific deprecating warnings under
Python >= 3.9.
.. _PEP 585:
https://www.python.org/dev/peps/pep-0585
'''
# ....................{ CONSTANTS }....................
_MINECRAFT_END_TXT_STANZAS = (
'I see the player you mean.',
'{player_name}?',
'Yes. Take care. It has reached a higher level now. It can read our thoughts.',
"That doesn't matter. It thinks we are part of the game.",
'I like this player. It played well. It did not give up.',
'It is reading our thoughts as though they were words on a screen.',
'That is how it chooses to imagine many things, when it is deep in the dream of a game.',
'Words make a wonderful interface. Very flexible. And less terrifying than staring at the reality behind the screen.',
'They used to hear voices. Before players could read. Back in the days when those who did not play called the players witches, and warlocks. And players dreamed they flew through the air, on sticks powered by demons.',
'What did this player dream?',
'This player dreamed of sunlight and trees. Of fire and water. It dreamed it created. And it dreamed it destroyed. It dreamed it hunted, and was hunted. It dreamed of shelter.',
'Hah, the original interface. A million years old, and it still works. But what true structure did this player create, in the reality behind the screen?',
'It worked, with a million others, to sculpt a true world in a fold of the [scrambled], and created a [scrambled] for [scrambled], in the [scrambled].',
'It cannot read that thought.',
'No. It has not yet achieved the highest level. That, it must achieve in the long dream of life, not the short dream of a game.',
'Does it know that we love it? That the universe is kind?',
'Sometimes, through the noise of its thoughts, it hears the universe, yes.',
'But there are times it is sad, in the long dream. It creates worlds that have no summer, and it shivers under a black sun, and it takes its sad creation for reality.',
'To cure it of sorrow would destroy it. The sorrow is part of its own private task. We cannot interfere.',
'Sometimes when they are deep in dreams, I want to tell them, they are building true worlds in reality. Sometimes I want to tell them of their importance to the universe. Sometimes, when they have not made a true connection in a while, I want to help them to speak the word they fear.',
'It reads our thoughts.',
'Sometimes I do not care. Sometimes I wish to tell them, this world you take for truth is merely [scrambled] and [scrambled], I wish to tell them that they are [scrambled] in the [scrambled]. They see so little of reality, in their long dream.',
'And yet they play the game.',
'But it would be so easy to tell them...',
'Too strong for this dream. To tell them how to live is to prevent them living.',
'I will not tell the player how to live.',
'The player is growing restless.',
'I will tell the player a story.',
'But not the truth.',
'No. A story that contains the truth safely, in a cage of words. Not the naked truth that can burn over any distance.',
'Give it a body, again.',
'Yes. Player...',
'Use its name.',
'{player_name}. Player of games.',
'Good.',
'Take a breath, now. Take another. Feel air in your lungs. Let your limbs return. Yes, move your fingers. Have a body again, under gravity, in air. Respawn in the long dream. There you are. Your body touching the universe again at every point, as though you were separate things. As though we were separate things.',
'Who are we? Once we were called the spirit of the mountain. Father sun, mother moon. Ancestral spirits, animal spirits. Jinn. Ghosts. The green man. Then gods, demons. Angels. Poltergeists. Aliens, extraterrestrials. Leptons, quarks. The words change. We do not change.',
"We are the universe. We are everything you think isn't you. You are looking at us now, through your skin and your eyes. And why does the universe touch your skin, and throw light on you? To see you, player. To know you. And to be known. I shall tell you a story.",
'Once upon a time, there was a player.',
'The player was you, {player_name}.',
'Sometimes it thought itself human, on the thin crust of a spinning globe of molten rock. The ball of molten rock circled a ball of blazing gas that was three hundred and thirty thousand times more massive than it. They were so far apart that light took eight minutes to cross the gap. The light was information from a star, and it could burn your skin from a hundred and fifty million kilometres away.',
'Sometimes the player dreamed it was a miner, on the surface of a world that was flat, and infinite. The sun was a square of white. The days were short; there was much to do; and death was a temporary inconvenience.',
'Sometimes the player dreamed it was lost in a story.',
'Sometimes the player dreamed it was other things, in other places. Sometimes these dreams were disturbing. Sometimes very beautiful indeed. Sometimes the player woke from one dream into another, then woke from that into a third.',
'Sometimes the player dreamed it watched words on a screen.',
"Let's go back.",
'The atoms of the player were scattered in the grass, in the rivers, in the air, in the ground. A woman gathered the atoms; she drank and ate and inhaled; and the woman assembled the player, in her body.',
"And the player awoke, from the warm, dark world of its mother's body, into the long dream.",
'And the player was a new story, never told before, written in letters of DNA. And the player was a new program, never run before, generated by a sourcecode a billion years old. And the player was a new human, never alive before, made from nothing but milk and love.',
'You are the player. The story. The program. The human. Made from nothing but milk and love.',
"Let's go further back.",
"The seven billion billion billion atoms of the player's body were created, long before this game, in the heart of a star. So the player, too, is information from a star. And the player moves through a story, which is a forest of information planted by a man called Julian, on a flat, infinite world created by a man called Markus, that exists inside a small, private world created by the player, who inhabits a universe created by...",
'Shush. Sometimes the player created a small, private world that was soft and warm and simple. Sometimes hard, and cold, and complicated. Sometimes it built a model of the universe in its head; flecks of energy, moving through vast empty spaces. Sometimes it called those flecks "electrons" and "protons".',
'Sometimes it called them "planets" and "stars".',
'Sometimes it believed it was in a universe that was made of energy that was made of offs and ons; zeros and ones; lines of code. Sometimes it believed it was playing a game. Sometimes it believed it was reading words on a screen.',
'You are the player, reading words...',
'Shush... Sometimes the player read lines of code on a screen. Decoded them into words; decoded words into meaning; decoded meaning into feelings, emotions, theories, ideas, and the player started to breathe faster and deeper and realised it was alive, it was alive, those thousand deaths had not been real, the player was alive',
'You. You. You are alive.',
'and sometimes the player believed the universe had spoken to it through the sunlight that came through the shuffling leaves of the summer trees',
"and sometimes the player believed the universe had spoken to it through the light that fell from the crisp night sky of winter, where a fleck of light in the corner of the player's eye might be a star a million times as massive as the sun, boiling its planets to plasma in order to be visible for a moment to the player, walking home at the far side of the universe, suddenly smelling food, almost at the familiar door, about to dream again",
'and sometimes the player believed the universe had spoken to it through the zeros and ones, through the electricity of the world, through the scrolling words on a screen at the end of a dream',
'and the universe said I love you',
'and the universe said you have played the game well',
'and the universe said everything you need is within you',
'and the universe said you are stronger than you know',
'and the universe said you are the daylight',
'and the universe said you are the night',
'and the universe said the darkness you fight is within you',
'and the universe said the light you seek is within you',
'and the universe said you are not alone',
'and the universe said you are not separate from every other thing',
'and the universe said you are the universe tasting itself, talking to itself, reading its own code',
'and the universe said I love you because you are love.',
'And the game was over and the player woke up from the dream. And the player began a new dream. And the player dreamed again, dreamed better. And the player was the universe. And the player was love.',
'You are the player.',
'Wake up.',
)
# ....................{ CALLABLES ~ module }....................
# Callables exercising module-scoped edge cases under PEP 563.
def get_minecraft_end_txt(player_name: str) -> str:
'''
Callable *not* decorated by :func:`beartype.beartype`.
The ``test_pep_563()`` unit test tests that :func:`beartype.beartype`
silently accepts callables with one or more non-postponed annotations under
PEP 563 by manually resolving all postponed annotations on this callable
and then manually passing this callable to :func:`beartype.beartype`.
'''
return ''.join(_MINECRAFT_END_TXT_STANZAS).format(player_name=player_name)
@beartype
def get_minecraft_end_txt_stanza(
player_name: str, stanza_index: IntType) -> str:
'''
Callable decorated by :func:`beartype.beartype`.
'''
return _MINECRAFT_END_TXT_STANZAS[stanza_index].format(
player_name=player_name)
# ....................{ CALLABLES ~ closure }....................
# Callables exercising closure-scoped edge cases under PEP 563.
@beartype
def get_minecraft_end_txt_closure(player_name: str) -> Callable:
'''
Callable decorated by :func:`beartype.beartype`, internally declaring and
returning a closure also decorated by :func:`beartype.beartype` and
annotated by PEP-compliant type hints accessible only as local variables.
'''
# PEP-compliant type hints accessible only as local variables to the
# following closure, exercising a significant edge case in PEP 563 support.
StringLike = Union[str, int, bytes]
ListOfStrings = ListStrType
# Intentionally delimited by one layer of decoration to exercise edges.
@decorator
@beartype
@decorator
def get_minecraft_end_txt_substr(substr: StringLike) -> ListOfStrings:
'''
Closure decorated by both :func:`beartype.beartype` and one or more
decorators that are *not* :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as local variables.
'''
return [
stanza.format(player_name=player_name)
for stanza in _MINECRAFT_END_TXT_STANZAS
if str(substr) in stanza
]
# print(f'mc.__qualname__: {get_minecraft_end_txt_substr.__qualname__}')
# Return this closure.
return get_minecraft_end_txt_substr
@beartype
def get_minecraft_end_txt_closure_factory(player_name: str) -> Callable:
'''
Callable decorated by :func:`beartype.beartype`, internally declaring and
returning a closure also decorated by :func:`beartype.beartype` and
annotated by PEP-compliant type hints accessible only as local variables,
internally declaring and returning *another* nested closure also decorated
by :func:`beartype.beartype` and annotated by PEP-compliant type hints
accessible only as local variables in a manner exercising edge case
precedence in scope aggregation.
'''
# PEP-compliant type hints accessible only as local variables to the
# following closure, exercising a significant edge case in PEP 563 support.
IntLike = Union[float, int]
ReturnType = Callable
InnerReturnType = ListStrType
# Intentionally delimited by two layers of decoration to exercise edges.
@decorator
@decorator
@beartype
@decorator
@decorator
def get_minecraft_end_txt_closure_outer(
stanza_len_min: IntLike) -> ReturnType:
'''
Outer closure decorated by :func:`beartype.beartype` and one or more
decorators that are *not* :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as local variables, internally
declaring and returning *another* nested closure also decorated by
:func:`beartype.beartype` and annotated by PEP-compliant type hints
accessible only as local variables in a manner exercising edge case
precedence in scope aggregation.
'''
# PEP-compliant type hints accessible only as local variables to the
# following closure, overriding those declared above and again
# exercising a significant edge case in PEP 563 support.
StringLike = Union[str, bytes]
ReturnType = InnerReturnType
# Intentionally delimited by no layers of decoration to exercise edges.
@beartype
def get_minecraft_end_txt_closure_inner(
stanza_len_max: IntLike,
substr: StringLike,
) -> ReturnType:
'''
Inner closure decorated by :func:`beartype.beartype` and one or
more decorators that are *not* :func:`beartype.beartype`, annotated
by PEP-compliant type hints accessible only as local variables.
'''
return [
stanza.format(player_name=player_name)
for stanza in _MINECRAFT_END_TXT_STANZAS
if (
len(stanza) >= int(stanza_len_min) and
len(stanza) <= int(stanza_len_max) and
str(substr) in stanza
)
]
# Return this closure.
return get_minecraft_end_txt_closure_inner
# print(f'mc.__qualname__: {get_minecraft_end_txt_substr.__qualname__}')
# Return this closure.
return get_minecraft_end_txt_closure_outer
# ....................{ CLASSES }....................
# Classes exercising module-scoped edge cases under PEP 563.
#FIXME: We should probably nest this class in a function to exercise
#everything, but this would seem to suffice for now as an initial foray.
class MinecraftEndTxtUnscrambler(object):
'''
Class declaring a method decorated by :func:`beartype.beartype` annotated
by PEP-compliant type hints accessible only as class variables.
'''
# PEP-compliant type hints accessible only as class variables to the
# following method, exercising a significant edge case in PEP 563 support.
NoneIsh = None
TextIsh = Union[str, bytes]
@beartype
def __init__(self, unscrambling: TextIsh) -> NoneIsh:
'''
Method decorated by :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as class variables.
'''
_minecraft_end_txt_stanzas_unscrambled = [
minecraft_end_txt_stanza.replace('[scrambled]', unscrambling)
for minecraft_end_txt_stanza in _MINECRAFT_END_TXT_STANZAS
if '[scrambled]' in minecraft_end_txt_stanza
]
# PEP-compliant type hints accessible only as local variables to the
# following closure, exercising an edge case in PEP 563 support.
BoolIsh = Union[bool, float, int]
@beartype
def get_minecraft_end_txt_unscrambled_stanza_closure(
self, is_stanza_last: BoolIsh) -> self.TextIsh:
'''
Closure decorated by :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as both class and local
variables.
'''
return _minecraft_end_txt_stanzas_unscrambled[
int(bool(is_stanza_last))]
# Reuse this closure as a bound method.
self.get_minecraft_end_txt_unscrambled_stanza = (
get_minecraft_end_txt_unscrambled_stanza_closure)
# ....................{ CALLABLES ~ limit }....................
#FIXME: Hilariously, we can't even unit test whether the
#beartype._decor._pep563._die_if_hint_repr_exceeds_child_limit() function
#behaves as expected. Why not? Because some combination of the "typing" module
#and/or PEP 563 were implemented so space-inefficiently than even attempting to
#instantiate a PEP-compliant type hint that would violate the child limit
#(i.e., the maximum size for fixed lists used by the @beartype decorator to
#implement its breadth-first search (BFS) across child hints) induces a memory
#error from the CPython parser -- complete with non-human-readable debug
#"stderr" output that I highly doubt CPython is even supposed to publicly emit:
#
# beartype_test/unit/data/data_pep563.py:180: MemoryError
# ---------------------------------------------------- Captured stderr call -----------------------------------------------------
# s_push: parser stack overflow
#
#"s_push: parser stack overflow"? Really? What the pablum is this nonsense?
#
#Naturally, this implies that end users are by definition prohibited from
#violating our package-specific child limit without our ever needing to even
#explicitly validate this limit. This is ridiculous, absurd, and yet another
#proverbial nail in the coffin for annotation-centric PEPs. I don't know who
#was tasked with implementing this API, but they clearly had little to no
#coherent idea of what they were doing.
# from beartype._util.cache.pool.utilcachepoollistfixed import SIZE_BIG
# from typing import List, Union
#
# # This global is defined below for sanity.
# _HINT_BIG = None
# '''
# PEP-compliant type hint guaranteed to raise an exception from the private
# :func:`beartype._decor._pep563._die_if_hint_repr_exceeds_child_limit`
# function, which imposes strict limits on the number of child hints permitted to
# be transitively nested in any top-level PEP-compliant type hint.
# '''
#
#
# def _init() -> None:
# '''
# Define the :data:`_HINT_BIG` global declared above.
# '''
#
# # Enable this global to be defined.
# global _HINT_BIG
#
# # This fixed length subtracted by 1 divided by 3. Just 'cause.
# SIZE_LESS_BIG = (SIZE_BIG-1) / 3
#
# # Assert the fixed length of the cached fixed lists constraining the number
# # of child hints permitted to be transitively nested in any top-level
# # PEP-compliant type hint is evenly divisible by 3 when subtracted by 1,
# # thus producing whole integers when subject to the above operation.
# #
# # Oddly, this condition applies to a surprising number of powers of two:
# # >>> (1024 - 1) % 3
# # 341
# # >>> (256 - 1) % 3
# # 85
# assert SIZE_LESS_BIG.is_integer(), (
# '{} not integer.'.format(SIZE_LESS_BIG))
#
# # Constrain this length to an integer as expected by the range() builtin.
# SIZE_LESS_BIG = int(SIZE_LESS_BIG)
#
# # Python expression used to dynamically define this global below.
# _HINT_BIG_EXPR = '{}{}{}'.format(
# # Substring prefixing this hint.
# ''.join('Union[int, List[' for _ in range(SIZE_LESS_BIG)),
# # Substring subscripting the last "List" child hint of this hint.
# 'str',
# # Substring suffixing this hint.
# ''.join(']]' for _ in range(SIZE_LESS_BIG)),
# )
#
# # Dynamically define this global, as "SIZE_BIG" is typically too large to
# # allow this global to be statically defined.
# _HINT_BIG = eval(_HINT_BIG_EXPR, globals())
#
#
# # Define the "_HINT_BIG" global declared above.
# _init()
#
#
# # Callable annotated by this global *AFTER* defining this global above.
# #
# # Note that this callable is intentionally *NOT* decorated by @beartype here,
# # as doing so would immediately raise an exception that we would rather
# # explicitly test for elsewhere.
# def player_was_love(player_was_the_universe: _HINT_BIG) -> _HINT_BIG:
# return player_was_the_universe
| 55.117207
| 447
| 0.698489
|
b93eea6fa5488e08859ab30a90293846ab6e100a
| 1,318
|
py
|
Python
|
neslter/qaqc/utils.py
|
WHOIGit/nes-lter-ims
|
d4cc96c10da56ca33286af84d669625b67170522
|
[
"MIT"
] | 3
|
2019-01-24T16:32:50.000Z
|
2021-11-05T02:18:12.000Z
|
neslter/qaqc/utils.py
|
WHOIGit/nes-lter-ims
|
d4cc96c10da56ca33286af84d669625b67170522
|
[
"MIT"
] | 45
|
2019-05-23T15:15:32.000Z
|
2022-03-15T14:09:20.000Z
|
neslter/qaqc/utils.py
|
WHOIGit/nes-lter-ims
|
d4cc96c10da56ca33286af84d669625b67170522
|
[
"MIT"
] | null | null | null |
import numexpr
def evaluate_expression(df, expression, col_map={}):
"""evaluates a numexpr expression using variables named after
columns in the dataframe. you can use variable names that differ
from the column names by specifying the mapping in col_map. For example
df = pd.DataFrame({
'foobar': [1,2,3],
'bazquux': [4,5,6]
})
evaluate_expression('foobar + a', col_map={
'bazquux': 'a'
})
"""
# apply col_name mappings, if any
local_dict = {}
for col_name, values in df.to_dict('series').items():
if col_name in col_map:
col_name = col_map[col_name]
local_dict[col_name] = values
# evaluate the expression
values = numexpr.evaluate(expression, local_dict=local_dict)
# return the results as a series, indexed the same as the dataframe
return pd.Series(values, index=df.index)
def compute_flags(df, flag_expressions, col_map={}):
# flag expressions should be a dict, and it should
# be an OrderedDict to get columns in specified order
# (flagname, expression)
# returns a dataframe
d = OrderedDict()
for flagname, expression in flag_expressions.items():
values = evaluate_expression(df, expression, col_map=col_map)
d[flagname] = values
return pd.DataFrame(d)
| 36.611111
| 75
| 0.672231
|
64cead63d8e7786cf097e11c5b4231dc1dbccaa9
| 6,363
|
py
|
Python
|
scraper/scraper.py
|
Copper-Head/revacom-time-tracking
|
da9c579950702fc9b97075397cd542f82baaa144
|
[
"MIT"
] | null | null | null |
scraper/scraper.py
|
Copper-Head/revacom-time-tracking
|
da9c579950702fc9b97075397cd542f82baaa144
|
[
"MIT"
] | null | null | null |
scraper/scraper.py
|
Copper-Head/revacom-time-tracking
|
da9c579950702fc9b97075397cd542f82baaa144
|
[
"MIT"
] | null | null | null |
"""Provides code to scrape tt.revacom.com and process results into a CSV file."""
import csv
import logging
from datetime import date, datetime
import os
import json
from calendar import Calendar
from collections import namedtuple
from itertools import chain, takewhile
from functools import partial
import requests
from bs4 import BeautifulSoup
logging.basicConfig(format="%(asctime)s -- %(levelname)s -- %(message)s")
ROOT_URL = "http://tt.revacom.com"
LOGIN_URL = ROOT_URL + "/Home/Login"
LINK_TPL = (ROOT_URL + "/GetAssignment/PackagingStatistic?group_type=proj&proj=&isWeek=false"
"&from={0}&to={1}&account=132&date_type=month&all_proj=true"
"&fromweek=&toweek=&refresh_report=false")
CACHE_TPL = "{0}\t{1}\n"
DATE_FMT = "%Y-%m-%d"
CACHE_PATH = "/data/scrape-dates-cache"
OUTPUT_FILE = "/data/time_tracking.csv"
Span = namedtuple("Span", "start end")
def week_to_span(calendar_week: list):
return Span(calendar_week[0], calendar_week[-1])
def span_to_str(span: Span):
return (span.start.strftime(DATE_FMT), span.end.strftime(DATE_FMT))
def strings_to_span(start: str, end: str):
return Span(datetime.strptime(start, DATE_FMT).date(), datetime.strptime(end, DATE_FMT).date())
def insert_span_str(template: str, span: Span):
return template.format(*span_to_str(span))
def span_to_url(span: Span):
return insert_span_str(LINK_TPL, span)
def span_to_cache_entry(span: Span):
return insert_span_str(CACHE_TPL, span)
def cache_entry_to_span(line: str):
return strings_to_span(*line.split())
def cache_file():
"""Opens file in append or write mode depending on whether it's already present."""
mode = 'a' if os.path.isfile(CACHE_PATH) else 'w'
return open(CACHE_PATH, mode)
def clear_cache():
"""Removes cache file if present."""
try:
os.remove(CACHE_PATH)
except OSError:
pass
def instantiate_span_cache():
"""Read cache file (if present) into memory."""
if not os.path.isfile(CACHE_PATH):
return set()
with open(CACHE_PATH) as cache_f:
return set(cache_entry_to_span(line) for line in cache_f)
def all_weeks(start_date, end_date):
"""Chain together calendar weeks from start date to end date."""
# Our calendar's first/last week day is a Sunday!
cal = Calendar(firstweekday=6)
return chain.from_iterable(
chain.from_iterable(
chain.from_iterable(
cal.yeardatescalendar(year) for year in range(start_date.year, end_date.year + 1))))
def week_is_before(some_date: date, week):
"""Check if last day of week happened before some date."""
return week[-1] < some_date
def generate_spans(cache, start_date=None, end_date=None):
"""Lazily generates spans of one week each from start_date to end_date."""
start_date = date(2012, 1, 1) if start_date is None else start_date
end_date = date.today() if end_date is None else end_date
up_to_today = takewhile(partial(week_is_before, end_date), all_weeks(start_date, end_date))
return filter(lambda span: span not in cache, map(week_to_span, up_to_today))
def request_report(report_link: str, login_payload: dict):
"""Login and request a report.
Given how long it takes TimeTracker to respond to requests for reports,
it makes sense to login separately for each request.
"""
with requests.Session() as session:
session.post(LOGIN_URL, data=login_payload)
logging.debug("Done authenticating, requesting this URL: {}".format(report_link))
# wait forever with timeout=None
return session.get(report_link, timeout=None)
def extract_table(report_page: requests.Response):
"""Extract list of rows (table) from the report HTML page."""
soup = BeautifulSoup(report_page.content, 'html.parser')
table = soup.find(id='issueDetails')
# first entry is the period for which we're searching
if table:
logging.debug("found the table")
rows = filter(None, (tr('td') for tr in iter(table('tr'))))
return [[td.string for td in tr] for tr in rows]
return []
def split_jira_key(table_row):
# this is admittedly wonky, to rely on the jira key's position in the row to stay consistent
return table_row[:1] + table_row[0].split('-') + table_row[1:]
def scrape_to_csv():
"""Main module function.
Either creates a new CSV file or updates an existing one with the
data scraped from tt.revacom.com
To avoid requesting and processing entries twice, caches them.
"""
with open("/secrets/tt_credentials.json") as f:
LOGIN_CREDENTIALS = json.load(f)
# If the output file doesn't exist, we should recreate it from scratch.
if not os.path.isfile(OUTPUT_FILE):
# Here's the code for extracting header:
# [" ".join(c for c in th.children if isinstance(c, str)) for th in table.tr.children]
# Prepend "Date" to this and you have the full header
# yapf: disable
header_row = [
'Date',
'JIRA-Key',
'Project',
"Package Number",
'Name',
'Type',
'Complexity',
'Technology',
'Status',
'Packager',
'QA',
'Account/ Order#',
'Total Time (1)',
'Time in period (2)',
'QA passes',
'Overdue',
'Innovations (hr)',
'Packaging & Development',
'Testing (hr)',
'TR package (hr)'
]
# yapf: enable
with open(OUTPUT_FILE, 'w') as outfile:
csvfile = csv.writer(outfile)
csvfile.writerow(header_row)
clear_cache()
cache = instantiate_span_cache()
for timespan in generate_spans(cache):
table_rows = extract_table(request_report(span_to_url(timespan), LOGIN_CREDENTIALS))
table_rows = map(split_jira_key, table_rows)
rows_with_date = [[timespan.start.strftime(DATE_FMT)] + row for row in table_rows]
with open(OUTPUT_FILE, 'a', encoding='utf-8') as outf:
csvfile = csv.writer(outf)
csvfile.writerows(rows_with_date)
cache.add(timespan)
with cache_file() as cache_f:
cache_f.write(span_to_cache_entry(timespan))
if __name__ == '__main__':
scrape_to_csv()
| 31.815
| 100
| 0.661952
|
3c25be59cae747a922fb200c4195c91e32d81ef0
| 1,149
|
py
|
Python
|
vmapper/utils/process_points.py
|
wcchin/mappy
|
38015bfc58d39552939d36e0e7eaa696ddbb2c5c
|
[
"MIT"
] | 2
|
2017-06-10T13:19:58.000Z
|
2017-06-25T18:09:04.000Z
|
vmapper/utils/process_points.py
|
wcchin/vmapper
|
38015bfc58d39552939d36e0e7eaa696ddbb2c5c
|
[
"MIT"
] | null | null | null |
vmapper/utils/process_points.py
|
wcchin/vmapper
|
38015bfc58d39552939d36e0e7eaa696ddbb2c5c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from Layer import Layer
from vmapper import geometry as geom
def process_points(layername, geoms, indexes, radius=2., labels=None, colors=None, opacitys=None, edgecolors=None, edgewidths=None, radiuses=None, showlabel=False, animate_times=None):
alayer = Layer(layername=layername)
geoms2 = geoms.tolist()
for i in range(len(geoms2)):
g = geoms2[i]
idd = indexes[i]
lab,fc,fo,ec,ew = None,None,None,None,None
if not(labels is None):
lab = labels[i]
if not(colors is None):
fc = colors[i]
if not(opacitys is None):
fo = opacitys[i]
if not(edgecolors is None):
ec = edgecolors[i]
if not(edgewidths is None):
ew = edgewidths[i]
if not(radiuses is None):
rr = radiuses[i]
else:
rr = radius
center = list(g.coords)[0]
alayer.addtoLayer(geom.Circle(center=center, radius=rr, layer=layername, index=idd, label=lab, color=fc, opacity=fo, strokecolor=ec, strokewidth=ew, showlabel=showlabel, animate_times=animate_times))
return alayer
| 38.3
| 207
| 0.617929
|
e4ad34c07ac0c8bda34996a9cdc75b9de0d1638b
| 4,751
|
py
|
Python
|
src/full_node/sync_blocks_processor.py
|
fakecoinbase/Chia-Networkslashchia-blockchain
|
84e6a4da18fb0a790a870cbd516f13c9bc7f0716
|
[
"Apache-2.0"
] | 1
|
2021-04-12T09:10:51.000Z
|
2021-04-12T09:10:51.000Z
|
src/full_node/sync_blocks_processor.py
|
mariano54/chia-blockchain
|
c6241d2fa6fe415876d3b40376fb882276611fa7
|
[
"Apache-2.0"
] | 1
|
2022-03-25T19:10:51.000Z
|
2022-03-25T19:10:51.000Z
|
src/full_node/sync_blocks_processor.py
|
fakecoinbase/Chia-Networkslashchia-blockchain
|
84e6a4da18fb0a790a870cbd516f13c9bc7f0716
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import concurrent
import logging
import time
from typing import Optional
from src.full_node.blockchain import Blockchain, ReceiveBlockResult
from src.full_node.sync_store import SyncStore
from src.types.full_block import FullBlock
from src.util.errors import ConsensusError
from src.util.ints import uint32
log = logging.getLogger(__name__)
class SyncBlocksProcessor:
def __init__(
self,
sync_store: SyncStore,
fork_height: uint32,
tip_height: uint32,
blockchain: Blockchain,
):
self.sync_store = sync_store
self.blockchain = blockchain
self.fork_height = fork_height
self.tip_height = tip_height
self._shut_down = False
self.BATCH_SIZE = 10
self.SLEEP_INTERVAL = 10
self.TOTAL_TIMEOUT = 200
def shut_down(self):
self._shut_down = True
async def process(self) -> None:
header_hashes = self.sync_store.get_potential_hashes()
# TODO: run this in a new process so it doesn't have to share CPU time with other things
for batch_start_height in range(
self.fork_height + 1, self.tip_height + 1, self.BATCH_SIZE
):
if self._shut_down:
return
total_time_slept = 0
batch_end_height = min(
batch_start_height + self.BATCH_SIZE - 1, self.tip_height
)
for height in range(batch_start_height, batch_end_height + 1):
# If we have already added this block to the chain, skip it
if header_hashes[height] in self.blockchain.headers:
batch_start_height = height + 1
while True:
if self._shut_down:
return
if total_time_slept > self.TOTAL_TIMEOUT:
raise TimeoutError("Took too long to fetch blocks")
awaitables = [
(self.sync_store.potential_blocks_received[uint32(height)]).wait()
for height in range(batch_start_height, batch_end_height + 1)
]
future = asyncio.gather(*awaitables, return_exceptions=True)
try:
await asyncio.wait_for(future, timeout=self.SLEEP_INTERVAL)
break
except concurrent.futures.TimeoutError:
try:
await future
except asyncio.CancelledError:
pass
total_time_slept += self.SLEEP_INTERVAL
log.info(
f"Did not receive desired blocks ({batch_start_height}, {batch_end_height})"
)
# Verifies this batch, which we are guaranteed to have (since we broke from the above loop)
blocks = []
for height in range(batch_start_height, batch_end_height + 1):
b: Optional[FullBlock] = self.sync_store.potential_blocks[
uint32(height)
]
assert b is not None
blocks.append(b)
validation_start_time = time.time()
prevalidate_results = await self.blockchain.pre_validate_blocks_multiprocessing(
blocks
)
if self._shut_down:
return
for index, block in enumerate(blocks):
assert block is not None
# The block gets permanantly added to the blockchain
validated, pos = prevalidate_results[index]
async with self.blockchain.lock:
(
result,
header_block,
error_code,
) = await self.blockchain.receive_block(
block, validated, pos, sync_mode=True
)
if (
result == ReceiveBlockResult.INVALID_BLOCK
or result == ReceiveBlockResult.DISCONNECTED_BLOCK
):
if error_code is not None:
raise ConsensusError(error_code, block.header_hash)
raise RuntimeError(f"Invalid block {block.header_hash}")
assert (
max([h.height for h in self.blockchain.get_current_tips()])
>= block.height
)
del self.sync_store.potential_blocks[block.height]
log.info(
f"Took {time.time() - validation_start_time} seconds to validate and add blocks "
f"{batch_start_height} to {batch_end_height + 1}."
)
| 37.706349
| 103
| 0.551463
|
38d519fb3c6efb73259191776f2a037a80d4ba68
| 11,002
|
py
|
Python
|
tests/models/test_gpu.py
|
karlinjf/pytorch-lightning
|
831842972f7e2d25ae3a376d5584748c3054f899
|
[
"Apache-2.0"
] | 1
|
2020-05-07T15:15:40.000Z
|
2020-05-07T15:15:40.000Z
|
tests/models/test_gpu.py
|
karlinjf/pytorch-lightning
|
831842972f7e2d25ae3a376d5584748c3054f899
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_gpu.py
|
karlinjf/pytorch-lightning
|
831842972f7e2d25ae3a376d5584748c3054f899
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
import torch
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core import memory
from pytorch_lightning.trainer.distrib_parts import parse_gpu_ids, determine_root_gpu_device
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import LightningTestModel
PRETEND_N_OF_GPUS = 16
@pytest.mark.spawn
@pytest.mark.parametrize("backend", ['dp', 'ddp', 'ddp2'])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model(tmpdir, backend):
"""Make sure DDP works."""
tutils.reset_seed()
tutils.set_random_master_port()
model, hparams = tutils.get_default_model()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=[0, 1],
distributed_backend=backend,
)
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result
# test memory helper functions
memory.get_memory_profile('min_max')
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
"""Make sure DDP works with dataloaders passed to fit()"""
tutils.reset_seed()
tutils.set_random_master_port()
model, hparams = tutils.get_default_model()
trainer_options = dict(default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=[0, 1],
distributed_backend='ddp')
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())
trainer = Trainer(**trainer_options)
result = trainer.fit(model, **fit_options)
assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_cpu_slurm_save_load(tmpdir):
"""Verify model save/load/checkpoint on CPU."""
tutils.reset_seed()
hparams = tutils.get_default_hparams()
model = LightningTestModel(hparams)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
version = logger.version
trainer_options = dict(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert result == 1, 'cpu model failed to complete'
# predict with trained model before saving
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
model.eval()
pred_before_saving = model(x)
# test HPC saving
# simulate snapshot on slurm
saved_filepath = trainer.hpc_save(tmpdir, logger)
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = tutils.get_default_logger(tmpdir, version=version)
trainer_options = dict(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir),
)
trainer = Trainer(**trainer_options)
model = LightningTestModel(hparams)
# set the epoch start hook so we can predict before the model does the full training
def assert_pred_same():
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
trainer.model.eval()
new_pred = trainer.model(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
model.on_epoch_start = assert_pred_same
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_none_backend(tmpdir):
"""Make sure when using multiple GPUs the user can't use `distributed_backend = None`."""
tutils.reset_seed()
model, hparams = tutils.get_default_model()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
train_percent_check=0.1,
val_percent_check=0.1,
gpus='-1'
)
with pytest.warns(UserWarning):
tutils.run_model_test(trainer_options, model)
@pytest.fixture
def mocked_device_count(monkeypatch):
def device_count():
return PRETEND_N_OF_GPUS
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.fixture
def mocked_device_count_0(monkeypatch):
def device_count():
return 0
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
pytest.param('-1', PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)")
])
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
])
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
pytest.param('-1', 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)")
])
def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, None, id="None is None"),
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="None is None"),
])
def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu
# Asking for a gpu when non are available will result in a MisconfigurationException
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(1, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param([1, 2], None, "ddp"),
pytest.param([0, 1], None, "ddp"),
pytest.param(-1, None, "ddp"),
pytest.param('-1', None, "ddp")
])
def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
with pytest.raises(MisconfigurationException):
Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu'], [
pytest.param(None, None, id="No gpus, expect gpu root device to be None"),
pytest.param([0], 0, id="Oth gpu, expect gpu root device to be 0."),
pytest.param([1], 1, id="1st gpu, expect gpu root device to be 1."),
pytest.param([3], 3, id="3rd gpu, expect gpu root device to be 3."),
pytest.param([1, 2], 1, id="[1, 2] gpus, expect gpu root device to be 1."),
])
def test_determine_root_gpu_device(gpus, expected_root_gpu):
assert determine_root_gpu_device(gpus) == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_gpu_ids'], [
pytest.param(None, None),
pytest.param(0, None),
pytest.param(1, [0]),
pytest.param(3, [0, 1, 2]),
pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id="-1 - use all gpus"),
pytest.param([0], [0]),
pytest.param([1, 3], [1, 3]),
pytest.param('0', [0]),
pytest.param('3', [3]),
pytest.param('1, 3', [1, 3]),
pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
])
def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus'], [
pytest.param(0.1),
pytest.param(-2),
pytest.param(False),
pytest.param([]),
pytest.param([-1]),
pytest.param([None]),
pytest.param(['0']),
pytest.param((0, 1)),
])
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_empty_string(mocked_device_count):
# This currently results in a ValueError instead of MisconfigurationException
with pytest.raises(ValueError):
parse_gpu_ids('')
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, '-1'])
def test_parse_gpu_fail_on_non_existant_id(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_non_existant_id_2(mocked_device_count):
with pytest.raises(MisconfigurationException):
parse_gpu_ids([1, 2, 19])
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [-1, '-1'])
def test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
| 35.605178
| 106
| 0.700145
|
2644e167efaf0d707a87b77344a5abd7429f50e4
| 2,654
|
py
|
Python
|
network.py
|
ZaydH/trump_char_rnn
|
dbe47ccc239f7a694b2e6ad71f2b8b95a4dc0a9a
|
[
"BSD-3-Clause"
] | 3
|
2017-12-12T00:18:54.000Z
|
2019-10-22T15:35:23.000Z
|
network.py
|
ZaydH/trump_char_rnn
|
dbe47ccc239f7a694b2e6ad71f2b8b95a4dc0a9a
|
[
"BSD-3-Clause"
] | null | null | null |
network.py
|
ZaydH/trump_char_rnn
|
dbe47ccc239f7a694b2e6ad71f2b8b95a4dc0a9a
|
[
"BSD-3-Clause"
] | 2
|
2019-02-23T03:24:36.000Z
|
2020-11-18T17:11:00.000Z
|
"""
network.py
Construct RNN for character level text prediction
"""
import tensorflow as tf
import data_parser
from feed_forward import setup_feed_forward
from basic_config import Config
def construct():
"""
Trump Neural Network Constructor
Builds all layers of the neural network.
"""
# create data input placeholder
input_x = tf.placeholder(tf.int32, shape=[Config.batch_size, None])
# create target input placeholder
target = tf.placeholder(tf.float32, shape=[Config.batch_size, Config.vocab_size()])
# Create the embedding matrix
embed_matrix = tf.get_variable("word_embeddings",
[Config.vocab_size(), Config.RNN.hidden_size])
embedded = tf.nn.embedding_lookup(embed_matrix, input_x)
# create RNN cell
cells = []
for _ in range(Config.RNN.num_layers):
cells.append(tf.nn.rnn_cell.BasicLSTMCell(Config.RNN.hidden_size))
if Config.is_train() or Config.Generate.enable_dropout:
cells = [tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
for cell in cells]
# cells = [tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=1.0, output_keep_prob=0.8,
# state_keep_prob=1.0) for cell in cells]
# else:
# cells = [tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=0.98, output_keep_prob=0.98,
# state_keep_prob=1.0) for cell in cells]
# get rnn outputs
seq_len = tf.placeholder(tf.int32, shape=[Config.batch_size])
multi_cell = tf.contrib.rnn.MultiRNNCell(cells)
rnn_output, rnn_state = tf.nn.dynamic_rnn(multi_cell, embedded,
sequence_length=seq_len,
dtype=tf.float32)
# transpose rnn_output into a time major form
seq_end = tf.range(Config.batch_size) * tf.shape(rnn_output)[1] + (seq_len - 1)
rnn_final_output = tf.gather(tf.reshape(rnn_output, [-1, Config.RNN.hidden_size]), seq_end)
softmax_out = setup_feed_forward(rnn_final_output)
final_output = softmax_out
return {'X': input_x, 'target': target,
'seq_len': seq_len, 'output': final_output}
def run():
"""
run a tensor flow session and try feeding the network stuff.
just for testing right now
"""
# start the session
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
# main
if __name__ == '__main__':
data_parser.build_training_and_verification_sets()
network_features = construct()
#
run()
| 31.595238
| 99
| 0.641296
|
4ec7e8978b75f80c74875bef4b2c295679883a3e
| 539
|
py
|
Python
|
main.py
|
johangenis/quiz-game-start
|
54491057aa1cc54600bc2b43c06b1c475e84e196
|
[
"MIT"
] | null | null | null |
main.py
|
johangenis/quiz-game-start
|
54491057aa1cc54600bc2b43c06b1c475e84e196
|
[
"MIT"
] | null | null | null |
main.py
|
johangenis/quiz-game-start
|
54491057aa1cc54600bc2b43c06b1c475e84e196
|
[
"MIT"
] | null | null | null |
from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for question in question_data:
question_text = question["question"]
question_answer = question["correct_answer"]
new_question = Question(question_text, question_answer)
question_bank.append(new_question)
quiz = QuizBrain(question_bank)
while quiz.still_has_questions():
quiz.next_question()
print("You've completed the quiz!")
print(f"Your final score was: {quiz.score}/{quiz.question_number}")
| 26.95
| 67
| 0.779221
|
165e21a0c69dd5db5c9986f6428ebc153d003f33
| 976
|
py
|
Python
|
mmo_module/text.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 1
|
2021-12-12T02:50:20.000Z
|
2021-12-12T02:50:20.000Z
|
mmo_module/text.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 17
|
2020-02-07T23:40:36.000Z
|
2020-12-22T16:38:44.000Z
|
mmo_module/text.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | null | null | null |
INVALID_COMMAND = "Invalid command given."
MMO_BRIEF = "User control panel for the MMO feature."
MMO_ENABLE_BRIEF = "Enables the user MMO features."
MMO_DISABLE_BRIEF = "Disables the user MMO features."
MMO_STATUS_BRIEF = "Shows the status of your character."
MMO_BATTLE_BRIEF = "Starts a 1v1 against an NPC around your level."
MMO_ATTACK_BRIEF = "Changes your attack mid-battle."
MMO_CHAR_BRIEF = "Controls specific aspects of your character."
MMO_CHAR_CLASS_BRIEF = "Shows available and lets you switch classes."
MMO_CHAR_CLASS_LIST_BRIEF = "Shows available and unavailable classes."
MMO_CHAR_NAME_BRIEF = "Sets your characters name. Wrap in quotes for multi-word."
MMO_CHAR_ATTACK_BRIEF = "Sets the default attack to start battles with."
MMO_SPELLS_BRIEF = "Lists currently available spells."
MMOA_BRIEF = "Admin control panel for the MMO feature."
MMOA_ENABLE_BRIEF = "Enables the server-wide MMO features."
MMOA_DISABLE_BRIEF = "Disables the server-wide MMO features."
| 51.368421
| 81
| 0.797131
|
5cbecc37ac4ec2efb075c5dc86ccb5ddf8335bbf
| 135
|
py
|
Python
|
testdata/reexporter/reexporter/subpkg/__init__.py
|
sourcegraph/python-deps
|
a7f1b28cc53bfdc3c71f70d0c0f3ae759e68c6f3
|
[
"BSD-2-Clause"
] | 1
|
2018-06-22T10:13:13.000Z
|
2018-06-22T10:13:13.000Z
|
testdata/reexporter/reexporter/subpkg/__init__.py
|
sourcegraph/python-deps
|
a7f1b28cc53bfdc3c71f70d0c0f3ae759e68c6f3
|
[
"BSD-2-Clause"
] | null | null | null |
testdata/reexporter/reexporter/subpkg/__init__.py
|
sourcegraph/python-deps
|
a7f1b28cc53bfdc3c71f70d0c0f3ae759e68c6f3
|
[
"BSD-2-Clause"
] | 4
|
2015-04-19T15:59:00.000Z
|
2020-12-18T11:25:41.000Z
|
# Django
from reexporter.subpkg.foo import *
from reexporter.subpkg.foo import bar
from reexporter.subpkg.foo import (
baz, blah)
| 19.285714
| 37
| 0.762963
|
734c72f80cd541a6cf8a5cab6f0d81b56bda9fd4
| 7,612
|
py
|
Python
|
dataactbroker/scripts/update_historical_duns.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
dataactbroker/scripts/update_historical_duns.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
dataactbroker/scripts/update_historical_duns.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
import logging
import boto3
import os
import pandas as pd
import argparse
from datetime import datetime
from dataactcore.models.domainModels import DUNS
from dataactcore.utils.parentDuns import sam_config_is_valid
from dataactcore.utils.duns import load_duns_by_row
from dataactvalidator.scripts.loader_utils import clean_data
from dataactvalidator.health_check import create_app
from dataactcore.interfaces.db import GlobalDB
from dataactcore.logging import configure_logging
from dataactcore.config import CONFIG_BROKER
import dataactcore.utils.parentDuns
logger = logging.getLogger(__name__)
# CSV column header name in DUNS file
column_headers = [
"awardee_or_recipient_uniqu", # DUNS Field
"registration_date", # Registration_Date
"expiration_date", # Expiration_Date
"last_sam_mod_date", # Last_Update_Date
"activation_date", # Activation_Date
"legal_business_name" # Legal_Business_Name
]
props_columns = {
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': []
}
column_mappings = {x: x for x in column_headers + list(props_columns.keys())}
def remove_existing_duns(data, sess):
""" Remove rows from file that already have a entry in broker database. We should only update missing DUNS
Args:
data: dataframe representing a list of duns
sess: the database session
Returns:
a new dataframe with the DUNS removed that already exist in the database
"""
duns_in_file = ",".join(list(data['awardee_or_recipient_uniqu'].unique()))
sql_query = "SELECT awardee_or_recipient_uniqu " +\
"FROM duns where awardee_or_recipient_uniqu = ANY('{" + \
duns_in_file +\
"}')"
db_duns = pd.read_sql(sql_query, sess.bind)
missing_duns = data[~data['awardee_or_recipient_uniqu'].isin(db_duns['awardee_or_recipient_uniqu'])]
return missing_duns
def clean_duns_csv_data(data):
""" Simple wrapper around clean_data applied just for duns
Args:
data: dataframe representing the data to be cleaned
Returns:
a dataframe cleaned and to be imported to the database
"""
return clean_data(data, DUNS, column_mappings, {})
def batch(iterable, n=1):
""" Simple function to create batches from a list
Args:
iterable: the list to be batched
n: the size of the batches
Yields:
the same list (iterable) in batches depending on the size of N
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def update_duns_props(df, client):
""" Returns same dataframe with address data updated"
Args:
df: the dataframe containing the duns data
client: the connection to the SAM service
Returns:
a merged dataframe with the duns updated with location info from SAM
"""
all_duns = df['awardee_or_recipient_uniqu'].tolist()
columns = ['awardee_or_recipient_uniqu'] + list(props_columns.keys())
duns_props_df = pd.DataFrame(columns=columns)
# SAM service only takes in batches of 100
for duns_list in batch(all_duns, 100):
duns_props_batch = dataactcore.utils.parentDuns.get_location_business_from_sam(client, duns_list)
# Adding in blank rows for DUNS where location data was not found
added_duns_list = []
if not duns_props_batch.empty:
added_duns_list = [str(duns) for duns in duns_props_batch['awardee_or_recipient_uniqu'].tolist()]
empty_duns_rows = []
for duns in (set(added_duns_list) ^ set(duns_list)):
empty_duns_row = props_columns.copy()
empty_duns_row['awardee_or_recipient_uniqu'] = duns
empty_duns_rows.append(empty_duns_row)
duns_props_batch = duns_props_batch.append(pd.DataFrame(empty_duns_rows))
duns_props_df = duns_props_df.append(duns_props_batch)
return pd.merge(df, duns_props_df, on=['awardee_or_recipient_uniqu'])
def run_duns_batches(file, sess, client, block_size=10000):
""" Updates DUNS table in chunks from csv file
Args:
file: path to the DUNS export file to use
sess: the database connection
client: the connection to the SAM service
block_size: the size of the batches to read from the DUNS export file.
"""
logger.info("Retrieving total rows from duns file")
start = datetime.now()
row_count = len(pd.read_csv(file, skipinitialspace=True, header=None, encoding='latin1', quotechar='"',
dtype=str, names=column_headers, skiprows=1))
logger.info("Retrieved row count of {} in {} s".format(row_count, (datetime.now()-start).total_seconds()))
duns_reader_obj = pd.read_csv(file, skipinitialspace=True, header=None, encoding='latin1', quotechar='"',
dtype=str, names=column_headers, iterator=True, chunksize=block_size, skiprows=1)
for duns_df in duns_reader_obj:
start = datetime.now()
# Remove rows where awardee_or_recipient_uniqu is null
duns_df = duns_df[duns_df['awardee_or_recipient_uniqu'].notnull()]
duns_to_load = remove_existing_duns(duns_df, sess)
duns_count = 0
# Only update database if there are DUNS from file missing in database
if not duns_to_load.empty:
duns_count = duns_to_load.shape[0]
# get address info for incoming duns
duns_to_load = update_duns_props(duns_to_load, client)
duns_to_load = clean_duns_csv_data(duns_to_load)
models = {}
load_duns_by_row(duns_to_load, sess, models, None)
sess.commit()
logger.info("Finished updating {} DUNS rows in {} s".format(duns_count,
(datetime.now()-start).total_seconds()))
def main():
""" Loads DUNS from the DUNS export file (comprised of DUNS pre-2014) """
parser = argparse.ArgumentParser(description='Adding historical DUNS to Broker.')
parser.add_argument('-size', '--block_size', help='Number of rows to batch load', type=int,
default=10000)
args = parser.parse_args()
sess = GlobalDB.db().session
client = sam_config_is_valid()
logger.info('Retrieving historical DUNS file')
start = datetime.now()
if CONFIG_BROKER["use_aws"]:
s3_client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
duns_file = s3_client.generate_presigned_url('get_object', {'Bucket': CONFIG_BROKER['archive_bucket'],
'Key': "DUNS_export_deduped.csv"}, ExpiresIn=10000)
else:
duns_file = os.path.join(CONFIG_BROKER["broker_files"], "DUNS_export_deduped.csv")
if not duns_file:
raise OSError("No DUNS_export_deduped.csv found.")
logger.info("Retrieved historical DUNS file in {} s".format((datetime.now()-start).total_seconds()))
try:
run_duns_batches(duns_file, sess, client, args.block_size)
except Exception as e:
logger.exception(e)
sess.rollback()
logger.info("Updating historical DUNS complete")
sess.close()
if __name__ == '__main__':
with create_app().app_context():
configure_logging()
with create_app().app_context():
main()
| 36.772947
| 119
| 0.666185
|
52d27eb99febe6db8e4a9019760a1d7cedd1f9e1
| 188
|
py
|
Python
|
bot/exts/recruitment/talentpool/__init__.py
|
hugovk/bot
|
46d3f877b569a8a6db8a50fbd80ff49c90ba04cf
|
[
"MIT",
"BSD-3-Clause"
] | 1,003
|
2018-11-17T21:10:01.000Z
|
2022-03-31T22:50:39.000Z
|
bot/exts/recruitment/talentpool/__init__.py
|
hugovk/bot
|
46d3f877b569a8a6db8a50fbd80ff49c90ba04cf
|
[
"MIT",
"BSD-3-Clause"
] | 1,474
|
2018-11-17T10:18:14.000Z
|
2022-03-31T18:01:39.000Z
|
bot/exts/recruitment/talentpool/__init__.py
|
hugovk/bot
|
46d3f877b569a8a6db8a50fbd80ff49c90ba04cf
|
[
"MIT",
"BSD-3-Clause"
] | 771
|
2018-11-21T08:36:07.000Z
|
2022-03-31T14:56:39.000Z
|
from bot.bot import Bot
def setup(bot: Bot) -> None:
"""Load the TalentPool cog."""
from bot.exts.recruitment.talentpool._cog import TalentPool
bot.add_cog(TalentPool(bot))
| 20.888889
| 63
| 0.702128
|
8c90736827cccffb57a4c980969281a2dbc457be
| 9,867
|
py
|
Python
|
src/autofj/optimizer/autofj_multi_column_greedy_algorithm.py
|
chu-data-lab/AutomaticFuzzyJoin
|
2e638b2dd17da41abf5ed71575cea94b1d175ccd
|
[
"MIT"
] | 16
|
2021-05-06T08:04:16.000Z
|
2022-01-25T04:19:31.000Z
|
src/autofj/optimizer/autofj_multi_column_greedy_algorithm.py
|
chu-data-lab/AutomaticFuzzyJoin
|
2e638b2dd17da41abf5ed71575cea94b1d175ccd
|
[
"MIT"
] | 1
|
2022-03-07T15:49:34.000Z
|
2022-03-07T15:49:34.000Z
|
src/autofj/optimizer/autofj_multi_column_greedy_algorithm.py
|
chu-data-lab/AutomaticFuzzyJoin
|
2e638b2dd17da41abf5ed71575cea94b1d175ccd
|
[
"MIT"
] | 5
|
2021-09-09T18:41:55.000Z
|
2022-03-18T11:52:15.000Z
|
import pandas as pd
import numpy as np
from multiprocessing import Pool
from .autofj_single_column_greedy_algorithm import AutoFJGreedyAlgorithm
from ..utils import print_log
import os
class AutoFJMulticolGreedyAlgorithm(object):
"""Greedy algorithm for multi-column datasets. Select optimal columns and
column weights using column forward selection.
Parameters
----------
LL_distance: dict
Distance of tuple pairs in LL tables measured by different join
functions. The distance is saved in a dict. The key is the name
of join functions. The value is a table of distance between
tuple pairs on different columns. The first two columns in the
table are "autofj_id_l", "autofj_id_r". The remaining columns
are distance on different columns.
LR_distance: dict
Distance of tuple pairs in LR tables measured by different join
functions. The distance is saved in a dict. The key is the name
of join functions. The value is a pd.DataFrame of distance between
tuple pairs on different columns. The first two columns in the
table are "autofj_id_l", "autofj_id_r". The remaining columns
are distance on different columns.
precision_target: float
Precision target. This should be a float number between 0-1.
candidate_thresholds: list
The search space of distance threshold.
candidate_column_weights: list
The search space of column weights.
n_jobs : int, default=1
Number of CPU cores used. -1 means using all processors.
verbose: bool, default=False
Whether to print logging
"""
def __init__(self,
LL_distance,
LR_distance,
precision_target,
candidate_thresholds,
candidate_column_weights,
n_jobs=-1,
verbose=False):
self.precision_target = precision_target
self.LL_distance = LL_distance
self.LR_distance = LR_distance
self.join_functions = sorted(list(LL_distance.keys()))
self.candidate_thresholds = candidate_thresholds
self.candidate_column_weights = candidate_column_weights
self.verbose = verbose
self.column_names = []
for c in LL_distance[self.join_functions[0]].columns:
if c != "autofj_id_l" and c != "autofj_id_r":
self.column_names.append(c)
self.n_jobs = n_jobs if n_jobs > 0 else os.cpu_count()
def run(self):
"""Running forward selection algorithm"""
best_reward = float("-inf")
best_LR_joins = None
best_columns = []
best_weights = None
best_join_config = None
best_column_weights = None
for i in range(len(self.column_names)):
# get the best result after adding one column
columns, weights, join_config, LR_joins, reward \
= self.forward_selection(best_columns, best_weights)
# if the reward stops increasing by adding columns, terminates
if reward <= best_reward:
break
# save best result
best_columns, best_weights, best_join_config, best_LR_joins, best_reward \
= columns, weights, join_config, LR_joins, reward
if best_join_config is not None:
best_column_weights = self.get_column_weights(best_columns,
best_weights)
if self.verbose:
print_log("Best column_weight: {}:{}, Best reward: {}"
.format(",".join(best_columns),
",".join([str(w) for w in best_weights]),
best_reward))
return best_column_weights, best_join_config, best_LR_joins
def forward_selection(self, base_columns, base_weights):
"""Do one step forward selection. Adding one column from the remaining
columns, get the best column and weights.
Parameters
----------
base_columns: list
Old columns (best column from the last iteration)
base_weights: list
Old weights (best weight from the last iteration)
Returns
-------
best_columns: list
Best columns after adding one column
best_weights: list
Best weights after adding one column
best_pred: list
LR prediction
best_reward: float
resulting reward (profit or estimated recall) given the best
columns and weights
"""
# get all candidate column weights
column_weights_cands = self.get_column_weights_cands(base_columns,
base_weights)
if self.n_jobs == 1:
results = []
for column_weights in column_weights_cands:
res = self.run_one_weight(column_weights)
results.append(res)
else:
with Pool(self.n_jobs) as pool:
results = pool.map(self.run_one_weight, column_weights_cands)
best_reward = float("-inf")
best_weights = None
best_LR_joins = None
best_join_config = None
best_columns = None
for i, (LR_joins, reward, config_selected) in enumerate(results):
if reward > best_reward:
best_reward = reward
best_columns = list(column_weights_cands[i].keys())
best_weights = list(column_weights_cands[i].values())
best_LR_joins = LR_joins
best_join_config = config_selected
return best_columns, best_weights, best_join_config, best_LR_joins, \
best_reward
def get_column_weights_cands(self, base_columns, base_weights):
"""Get candidate column weights by adding one column into old columns
Parameters
----------
base_columns: list
Old columns (best column from the last iteration)
base_weights: list
Old weights (best weight from the last iteration)
Returns
-------
column_weights_cands: list
A list of candidate column weights (dict)
"""
""" get column-weight candidates"""
column_weights_cands = []
for c in self.column_names:
if c in base_columns:
continue
columns = base_columns + [c]
# get search space (weights candidates) for new weights
new_weights = self.get_new_weights(base_weights)
for weights in new_weights:
column_weights = self.get_column_weights(columns, weights)
column_weights_cands.append(column_weights)
return column_weights_cands
def run_one_weight(self, column_weights):
# if self.verbose:
# print_log("Run greedy algorithm with column weights"
# .format(column_weights)
LL_w = self.get_weighted_distance(self.LL_distance, column_weights)
LR_w = self.get_weighted_distance(self.LR_distance, column_weights)
optimizer = AutoFJGreedyAlgorithm(LL_w,
LR_w,
self.precision_target,
self.candidate_thresholds,
n_jobs=self.n_jobs)
LR_joins, config_selected = optimizer.run()
reward = optimizer.get_reward()
return LR_joins, reward, config_selected
def get_new_weights(self, old_weights):
"""Get column weight search space. Keeping ratios of old weights fixed,
append weight for the new column.
Parameters
----------
old_weights: list
Weights of old columns
Return
------
column_weights: list
A list of new weights. In each new weight, the last one is the new
weight, the others are old weights. The ratio between old weights
is fixed.
Example: old_weights: [0.5, 0.5]
column_weights = [[0.45, 0.45, 0.1],
[0.40, 0.40, 0.2],
[0.35, 0.35, 0.3],
...]
"""
# if old weight is empty, new weight is [1].
if old_weights is None:
return np.array([[1]])
# add weight for new column
column_weights = []
for nw in self.candidate_column_weights[1:-1]:
new_w = np.array([w * nw for w in old_weights] + [1 - nw])
column_weights.append(new_w)
return column_weights
def get_weighted_distance(self, LL_distance, column_weights):
"""LL_w: {config: [lid, rid, distance]}"""
LL_w = {}
for config, distance in LL_distance.items():
columns = [c for c in distance.columns if
c not in ["autofj_id_l", "autofj_id_r"]]
weights = []
for c in columns:
if c in column_weights:
weights.append(column_weights[c])
else:
weights.append(0)
weights = np.array(weights).reshape(-1, 1)
weighted_dist = distance[columns].values.dot(weights).ravel()
LL_w[config] = pd.DataFrame(
{"autofj_id_l": distance["autofj_id_l"],
"autofj_id_r": distance["autofj_id_r"],
"distance": weighted_dist})
return LL_w
def get_column_weights(self, columns, weights):
column_weights = {}
for c, w in zip(columns, weights):
column_weights[c] = w
return column_weights
| 37.804598
| 86
| 0.585386
|
2ac575f8c4ce7cfdd0ee6888e3bd8d9c8bdeca3b
| 5,681
|
py
|
Python
|
features/steps/step_definitions.py
|
tedivm/dockerpty
|
f8d17d893c6758b7cc25825e99f6b02202632a97
|
[
"Apache-2.0"
] | 129
|
2015-01-19T15:02:47.000Z
|
2022-03-28T07:46:46.000Z
|
features/steps/step_definitions.py
|
tedivm/dockerpty
|
f8d17d893c6758b7cc25825e99f6b02202632a97
|
[
"Apache-2.0"
] | 52
|
2015-01-01T11:22:23.000Z
|
2021-03-29T14:11:10.000Z
|
features/steps/step_definitions.py
|
tedivm/dockerpty
|
f8d17d893c6758b7cc25825e99f6b02202632a97
|
[
"Apache-2.0"
] | 71
|
2015-02-07T16:25:27.000Z
|
2022-02-06T02:34:18.000Z
|
# dockerpty: step_definitions.py
#
# Copyright 2014 Chris Corbyn <chris@w3style.co.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from behave import then, given, when
from expects import expect, equal, be_true, be_false
import tests.util as util
import dockerpty
import pty
import sys
import os
import signal
import time
from utils import get_client
def alloc_pty(ctx, f, *args, **kwargs):
pid, fd = pty.fork()
if pid == pty.CHILD:
tty = os.ttyname(0)
sys.stdin = open(tty, 'r')
sys.stdout = open(tty, 'w')
sys.stderr = open(tty, 'w')
# alternative way of doing ^ is to do:
# kwargs["stdout"] = open(tty, 'w')
# kwargs["stderr"] = open(tty, 'w')
# kwargs["stdin"] = open(tty, 'r')
# Create a new client for the child process to avoid concurrency issues
client = get_client()
f(client, *args, **kwargs)
sys.exit(0)
else:
ctx.pty = fd
util.set_pty_size(
ctx.pty,
(ctx.rows, ctx.cols)
)
ctx.pid = pid
util.wait(ctx.pty, timeout=5)
time.sleep(1) # give the terminal some time to print prompt
# util.exit_code can be called only once
ctx.exit_code = util.exit_code(ctx.pid, timeout=5)
if ctx.exit_code != 0:
raise Exception("child process did not finish correctly")
@given('I am using a TTY')
def step_impl(ctx):
ctx.rows = 20
ctx.cols = 80
@given('I am using a TTY with dimensions {rows} x {cols}')
def step_impl(ctx, rows, cols):
ctx.rows = int(rows)
ctx.cols = int(cols)
@given('I run "{cmd}" in a docker container with a PTY')
def step_impl(ctx, cmd):
ctx.container = ctx.client.create_container(
image='busybox:latest',
command=cmd,
stdin_open=True,
tty=True,
)
@given('I run "{cmd}" in a docker container with a PTY and disabled logging')
def step_impl(ctx, cmd):
ctx.container = ctx.client.create_container(
image='busybox:latest',
command=cmd,
stdin_open=True,
tty=True,
host_config={"LogConfig": {
"Type": "none" # there is not "none" driver on 1.8
}}
)
@given('I run "{cmd}" in a docker container')
def step_impl(ctx, cmd):
ctx.container = ctx.client.create_container(
image='busybox:latest',
command=cmd,
)
@given('I exec "{cmd}" in a docker container with a PTY')
def step_impl(ctx, cmd):
ctx.exec_id = dockerpty.exec_create(ctx.client, ctx.container, cmd, interactive=True)
@given('I run "{cmd}" in a docker container with stdin open')
def step_impl(ctx, cmd):
ctx.container = ctx.client.create_container(
image='busybox:latest',
command=cmd,
stdin_open=True,
)
@given('I start the container')
def step_impl(ctx):
ctx.client.start(ctx.container)
@when('I start the container')
def step_impl(ctx):
ctx.client.start(ctx.container)
@when('I start dockerpty')
def step_impl(ctx):
alloc_pty(ctx, dockerpty.start, ctx.container, logs=0)
@when('I exec "{cmd}" in a running docker container')
def step_impl(ctx, cmd):
alloc_pty(ctx, dockerpty.exec_command, ctx.container, cmd, interactive=False)
@when('I exec "{cmd}" in a running docker container with a PTY')
def step_impl(ctx, cmd):
alloc_pty(ctx, dockerpty.exec_command, ctx.container, cmd, interactive=True)
@when('I start exec')
def step_impl(ctx):
alloc_pty(ctx, dockerpty.start_exec, ctx.exec_id, interactive=False)
@when('I start exec with a PTY')
def step_impl(ctx):
alloc_pty(ctx, dockerpty.start_exec, ctx.exec_id, interactive=True)
@when('I resize the terminal to {rows} x {cols}')
def step_impl(ctx, rows, cols):
ctx.rows = int(rows)
ctx.cols = int(cols)
util.set_pty_size(
ctx.pty,
(ctx.rows, ctx.cols)
)
time.sleep(1)
os.kill(ctx.pid, signal.SIGWINCH)
@when('I type "{text}"')
def step_impl(ctx, text):
util.write(ctx.pty, text.encode())
@when('I press {key}')
def step_impl(ctx, key):
mappings = {
"enter": b"\x0a",
"up": b"\x1b[A",
"down": b"\x1b[B",
"right": b"\x1b[C",
"left": b"\x1b[D",
"esc": b"\x1b",
"c-c": b"\x03",
"c-d": b"\x04",
"c-p": b"\x10",
"c-q": b"\x11",
}
util.write(ctx.pty, mappings[key.lower()])
@then('I will see the output')
def step_impl(ctx):
# you should check `actual` when tests fail
actual = util.read_printable(ctx.pty).splitlines()
wanted = ctx.text.splitlines()
expect(actual[-len(wanted):]).to(equal(wanted))
@then('The PTY will be closed cleanly')
def step_impl(ctx):
if not hasattr(ctx, "exit_code"):
ctx.exit_code = util.exit_code(ctx.pid, timeout=5)
expect(ctx.exit_code).to(equal(0))
@then('The container will not be running')
def step_impl(ctx):
running = util.container_running(ctx.client, ctx.container, duration=2)
expect(running).to(be_false)
@then('The container will still be running')
def step_impl(ctx):
running = util.container_running(ctx.client, ctx.container, duration=2)
expect(running).to(be_true)
| 26.179724
| 89
| 0.640204
|
df69c5f522755503f7878d7a300e7d9dc8abdb7a
| 3,567
|
py
|
Python
|
ml/rl/test/gridworld/gridworld_test_base.py
|
johncliu/Horizon
|
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
|
[
"BSD-3-Clause"
] | null | null | null |
ml/rl/test/gridworld/gridworld_test_base.py
|
johncliu/Horizon
|
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
|
[
"BSD-3-Clause"
] | 1
|
2021-08-25T16:13:32.000Z
|
2021-08-25T16:13:32.000Z
|
ml/rl/test/gridworld/gridworld_test_base.py
|
johncliu/Horizon
|
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import os
import tempfile
import unittest
from ml.rl.tensorboardX import SummaryWriterContext
from ml.rl.test.gridworld.gridworld_base import DISCOUNT
from ml.rl.training.ddpg_trainer import DDPGTrainer
class GridworldTestBase(unittest.TestCase):
def setUp(self):
self.check_tolerance = True
self.test_save_load = True
self.num_epochs = 5
self.tolerance_threshold = 0.1
self.run_pre_training_eval = True
SummaryWriterContext._reset_globals()
def tearDown(self):
SummaryWriterContext._reset_globals()
def evaluate_gridworld(
self, environment, evaluator, trainer, exporter, use_gpu, one_hot_action=True
):
if self.run_pre_training_eval:
predictor = exporter.export()
evaluator.evaluate(predictor)
print(
"Pre-Training eval: ",
evaluator.mc_loss[-1],
evaluator.reward_doubly_robust[-1]
if len(evaluator.reward_doubly_robust) > 0
else "None",
)
self.assertGreater(evaluator.mc_loss[-1], 0.09)
for _ in range(self.num_epochs):
samples = environment.generate_samples(10240, 1.0, DISCOUNT)
if (
hasattr(trainer.parameters.rl, "reward_boost")
and trainer.parameters.rl.reward_boost is not None
):
# Reverse any reward boost
rewards_update = []
for action, reward in zip(samples.actions, samples.rewards):
rewards_update.append(
reward - trainer.parameters.rl.reward_boost.get(action, 0.0)
)
samples.rewards = rewards_update
tdps = environment.preprocess_samples(
samples,
self.minibatch_size,
use_gpu=use_gpu,
one_hot_action=one_hot_action,
)
for tdp in tdps:
trainer.train(tdp)
# Test actor if it exists
if isinstance(trainer, DDPGTrainer):
# Make sure actor predictor works
actor = trainer.predictor(actor=True)
# Make sure all actions are optimal
error = evaluator.evaluate_actor(actor, thres=0.2)
print("gridworld optimal action match MAE: {0:.3f}".format(error))
predictor = exporter.export()
predictorClass = predictor.__class__
evaluator.evaluate(predictor)
print(
"Post-Training eval: ",
evaluator.mc_loss[-1],
evaluator.reward_doubly_robust[-1]
if len(evaluator.reward_doubly_robust) > 0
else "None",
)
if self.check_tolerance:
self.assertLess(evaluator.mc_loss[-1], self.tolerance_threshold)
if self.test_save_load:
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_path = os.path.join(tmpdirname, "model")
predictor.save(tmp_path, "minidb")
new_predictor = predictorClass.load(tmp_path, "minidb", False)
evaluator.evaluate(new_predictor)
print(
"Post-ONNX eval: ",
evaluator.mc_loss[-1],
evaluator.reward_doubly_robust[-1]
if len(evaluator.reward_doubly_robust) > 0
else "None",
)
self.assertLess(evaluator.mc_loss[-1], self.tolerance_threshold)
| 35.67
| 85
| 0.578357
|
77fe8849fb693f820f3273d8590e89cdce04559b
| 173
|
py
|
Python
|
chaosopenstack/types.py
|
chaostoolkit-incubator/chaostoolkit-openstack
|
dfba900a436860009b849535136147fa9bd529c6
|
[
"Apache-2.0"
] | 1
|
2019-06-11T16:55:04.000Z
|
2019-06-11T16:55:04.000Z
|
chaosopenstack/types.py
|
chaostoolkit-incubator/chaostoolkit-openstack
|
dfba900a436860009b849535136147fa9bd529c6
|
[
"Apache-2.0"
] | 3
|
2019-04-10T07:24:42.000Z
|
2021-12-02T10:57:24.000Z
|
chaosopenstack/types.py
|
chaostoolkit-incubator/chaostoolkit-openstack
|
dfba900a436860009b849535136147fa9bd529c6
|
[
"Apache-2.0"
] | 5
|
2019-04-10T19:39:10.000Z
|
2021-09-24T13:45:49.000Z
|
# -*- coding: utf-8 -*-
from typing import Any, Dict
__all__ = ["OpenstackResponse"]
# really dependent on the type of resource called
OpenstackResponse = Dict[str, Any]
| 19.222222
| 49
| 0.716763
|
b43881c39b9df8866c393aedb50c7efce6930ee1
| 4,560
|
py
|
Python
|
homeassistant/components/decora/light.py
|
davyike/core
|
13cc7583ed5c7de43c56b43db8fdc9879a853666
|
[
"Apache-2.0"
] | 3
|
2019-10-02T04:40:26.000Z
|
2020-02-16T13:19:08.000Z
|
homeassistant/components/decora/light.py
|
davyike/core
|
13cc7583ed5c7de43c56b43db8fdc9879a853666
|
[
"Apache-2.0"
] | 9
|
2022-01-13T13:43:25.000Z
|
2022-03-31T07:18:48.000Z
|
homeassistant/components/decora/light.py
|
davyike/core
|
13cc7583ed5c7de43c56b43db8fdc9879a853666
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Decora dimmers."""
from __future__ import annotations
import copy
from functools import wraps
import logging
import time
from bluepy.btle import BTLEException # pylint: disable=import-error
import decora # pylint: disable=import-error
import voluptuous as vol
from homeassistant import util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
PLATFORM_SCHEMA,
ColorMode,
LightEntity,
)
from homeassistant.const import CONF_API_KEY, CONF_DEVICES, CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
def _name_validator(config):
"""Validate the name."""
config = copy.deepcopy(config)
for address, device_config in config[CONF_DEVICES].items():
if CONF_NAME not in device_config:
device_config[CONF_NAME] = util.slugify(address)
return config
DEVICE_SCHEMA = vol.Schema(
{vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_API_KEY): cv.string}
)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}}
),
_name_validator,
)
)
def retry(method):
"""Retry bluetooth commands."""
@wraps(method)
def wrapper_retry(device, *args, **kwargs):
"""Try send command and retry on error."""
initial = time.monotonic()
while True:
if time.monotonic() - initial >= 10:
return None
try:
return method(device, *args, **kwargs)
except (decora.decoraException, AttributeError, BTLEException):
_LOGGER.warning(
"Decora connect error for device %s. Reconnecting",
device.name,
)
# pylint: disable=protected-access
device._switch.connect()
return wrapper_retry
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up an Decora switch."""
lights = []
for address, device_config in config[CONF_DEVICES].items():
device = {}
device["name"] = device_config[CONF_NAME]
device["key"] = device_config[CONF_API_KEY]
device["address"] = address
light = DecoraLight(device)
lights.append(light)
add_entities(lights)
class DecoraLight(LightEntity):
"""Representation of an Decora light."""
_attr_color_mode = ColorMode.BRIGHTNESS
_attr_supported_color_modes = {ColorMode.BRIGHTNESS}
def __init__(self, device):
"""Initialize the light."""
self._name = device["name"]
self._address = device["address"]
self._key = device["key"]
self._switch = decora.decora(self._address, self._key)
self._brightness = 0
self._state = False
@property
def unique_id(self):
"""Return the ID of this light."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def assumed_state(self):
"""We can read the actual state."""
return False
@retry
def set_state(self, brightness):
"""Set the state of this lamp to the provided brightness."""
self._switch.set_brightness(int(brightness / 2.55))
self._brightness = brightness
@retry
def turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
self._switch.on()
self._state = True
if brightness is not None:
self.set_state(brightness)
@retry
def turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
self._switch.off()
self._state = False
@retry
def update(self):
"""Synchronise internal state with the actual light state."""
self._brightness = self._switch.get_brightness() * 2.55
self._state = self._switch.get_on()
| 27.804878
| 80
| 0.643421
|
fe702e0a1d859795f2fb75d8464d1f9765bf133d
| 10,397
|
py
|
Python
|
nappy/nn/analyze.py
|
ryokbys/nap
|
ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804
|
[
"MIT"
] | 27
|
2015-10-05T06:21:28.000Z
|
2021-10-04T17:08:23.000Z
|
nappy/nn/analyze.py
|
ryokbys/nap
|
ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804
|
[
"MIT"
] | 4
|
2020-11-08T12:39:38.000Z
|
2021-01-10T22:31:36.000Z
|
nappy/nn/analyze.py
|
ryokbys/nap
|
ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804
|
[
"MIT"
] | 4
|
2015-01-29T23:10:34.000Z
|
2022-01-08T05:20:13.000Z
|
#!/bin/env python
"""
Analyze NN potential with drawing NN structure graph.
Usage:
analyze.py [options]
analyze.py draw [options]
Options:
-h,--help Show this message and exit.
-w Show weight values. [default: False]
-t THRESHOLD
Threshold value multiplied to max edge for omitting criterion of edge. [default: 0.01]
"""
from __future__ import print_function
import math
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from docopt import docopt
_cnstfname= 'in.const.NN'
_combfname= 'in.comb.NN'
_paramfname= 'in.params.NN'
def comb(n,m):
return math.factorial(n)/math.factorial(m)
def read_NN_config():
fcnst= open(_cnstfname,'r')
buff= fcnst.readline().split()
nl= int(buff[0])
nhl= np.zeros((nl+1),dtype=int)
nsp= int(buff[1])
nhl[0]=int(buff[2])
nhl[1]=int(buff[3])
if nl == 2:
nhl[2]= int(buff[4])
print('num of species=',nsp)
print('num of layers =',nl)
print('num of neurons-{0:d} ='.format(0),nhl[0])
print('num of neurons-{0:d} ='.format(1),nhl[1])
if nl == 2:
print('num of neurons-{0:d} ='.format(2),nhl[2])
n2=0
n3=0
ngauss= 0
ncos= 0
npoly= 0
nangle= 0
for line in fcnst.readlines():
buff= line.split()
itype= int(buff[0])
if itype <= 100:
n2 += 1
itype2= itype % 100
if itype2 == 1:
ngauss += 1
elif itype2 == 2:
ncos += 1
elif itype2 == 3:
npoly += 1
elif itype <= 200:
n3 += 1
nangle += 1
fcnst.close()
print('read in.const.NN')
print('num of 2body terms=',n2)
print('num of 3body terms=',n3)
ncmb2= nsp +comb(nsp,2)
ncmb3= ncmb2*nsp
print('num of 2body pairs =',ncmb2)
print('num of 3body triplets=',ncmb3)
nhl[0]= n2*ncmb2 +n3*ncmb3
print('num of 2body and 3body inputs =',n2*ncmb2, n3*ncmb3)
if nl == 1:
print('num of input neurons =',nhl[0]*nhl[1] +nhl[1])
elif nl == 2:
print('num of input neurons =',nhl[0]*nhl[1] +nhl[1]*nhl[2] +nhl[2])
fcmb= open(_combfname,'r')
cmb2= np.zeros((ncmb2,2),dtype=int)
cmb3= np.zeros((ncmb3,3),dtype=int)
print('pairs:')
for i2 in range(ncmb2):
buff= fcmb.readline().split()
cmb2[i2,0]= int(buff[0])
cmb2[i2,1]= int(buff[1])
print(' ',i2,': {0:1d}-{1:1d}'.format(cmb2[i2,0],cmb2[i2,1]))
print('triplets:')
for i3 in range(ncmb3):
buff= fcmb.readline().split()
cmb3[i3,0]= int(buff[0])
cmb3[i3,1]= int(buff[1])
cmb3[i3,2]= int(buff[2])
print(' ',i3,':', \
' {0:1d}-{1:1d}-{2:1d}'.format(cmb3[i3,0],cmb3[i3,1],cmb3[i3,2]))
fcmb.close()
return nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nangle
def read_NN_params(nl,nhl):
#.....read in.params.NN
fparam= open(_paramfname,'r')
buff= fparam.readline().split()
if nl == 1:
wgt11= np.zeros((nhl[0],nhl[1]))
wgt12= np.zeros(nhl[1])
for ihl0 in range(nhl[0]):
for ihl1 in range(nhl[1]):
buff= fparam.readline().split()
wgt11[ihl0,ihl1]= float(buff[0])
for ihl1 in range(nhl[1]):
buff= fparam.readline().split()
wgt12[ihl1]= float(buff[0])
elif nl == 2:
wgt21= np.zeros((nhl[0],nhl[1]))
wgt22= np.zeros((nhl[1],nhl[2]))
wgt23= np.zeros(nhl[2])
for ihl0 in range(nhl[0]):
for ihl1 in range(nhl[1]):
buff= fparam.readline().split()
wgt21[ihl0,ihl1]= float(buff[0])
for ihl1 in range(nhl[1]):
for ihl2 in range(nhl[2]):
buff= fparam.readline().split()
wgt22[ihl1,ihl2]= float(buff[0])
for ihl2 in range(nhl[2]):
buff= fparam.readline().split()
wgt23[ihl2]= float(buff[0])
fparam.close()
print('read in.params.NN')
#for ihl0 in range(n2+n3):
# print ihl0,': ',wgt11[ihl0,0:nhl[1]+1]
# for ihl1 in range(nhl[1]):
# print ihl1,': ',wgt12[ihl1]OB
if nl == 1:
return wgt11,wgt12
elif nl == 2:
return wgt21,wgt22,wgt23
def analyze(nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nagnle,
wgt11=None,wgt12=None,wgt21=None,wgt22=None,wgt23=None):
"""
Analyze the NN structure.
"""
pass
def draw(nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nagnle,
wgt11=None,wgt12=None,wgt21=None,wgt22=None,wgt23=None):
g= nx.Graph()
pos= {}
dy=-1.0
for ihl0 in range(nhl[0]):
g.add_node('0-{0:03d}'.format(ihl0))
pos['0-{0:03d}'.format(ihl0)]= [0,ihl0*dy]
dy= -float(nhl[0]-1)/(nhl[1]-1)
for ihl1 in range(nhl[1]):
g.add_node('1-{0:03d}'.format(ihl1))
pos['1-{0:03d}'.format(ihl1)]= [1,ihl1*dy]
if nl == 1:
dy= -float(nhl[0])/2
g.add_node('2')
pos['2']= [2,dy]
elif nl == 2:
dy= -float(nhl[0]-1)/(nhl[2]-1)
for ihl2 in range(nhl[2]):
g.add_node('2-{0:03d}'.format(ihl2))
pos['2-{0:03d}'.format(ihl2)]= [2,ihl2*dy]
dy= -float(nhl[0])/2
g.add_node('3')
pos['3']= [3,dy]
n= 0
nlabel= {}
for key in pos:
# print key,pos[key]
if key[0] != '0':
nlabel[key]= ''
else:
ineuron= int(key[2:5])
# print 'ineuron=',ineuron
if ineuron < n2*len(cmb2):
pair= ineuron / n2
isf2= ineuron % n2
nlabel[key]= '{0:1d}-'.format(cmb2[pair,0]) \
+'{0:1d}:'.format(cmb2[pair,1]) \
+' {0:02d}'.format(isf2)
else:
ine= ineuron -n2*len(cmb2)
triplet= ine / n3
isf3 = ine % n3
# print ' n3,triplet,isf3=',n3,triplet,isf3
nlabel[key]= '{0:1d}-'.format(cmb3[triplet,0]) \
+'{0:1d}-'.format(cmb3[triplet,1]) \
+'{0:1d}:'.format(cmb3[triplet,2]) \
+' {0:02d}'.format(isf3)
# print n,nlabel[n]
n += 1
# exit()
maxedge=0.0
if nl == 1:
for ihl0 in range(nhl[0]):
for ihl1 in range(nhl[1]):
maxedge= max(maxedge,np.abs(wgt11[ihl0,ihl1]))
for ihl1 in range(nhl[1]):
maxedge= max(maxedge,np.abs(wgt12[ihl1]))
elif nl == 2:
for ihl0 in range(nhl[0]):
for ihl1 in range(nhl[1]):
maxedge= max(maxedge,np.abs(wgt21[ihl0,ihl1]))
for ihl1 in range(nhl[1]):
for ihl2 in range(nhl[2]):
maxedge= max(maxedge,np.abs(wgt22[ihl1,ihl2]))
for ihl2 in range(nhl[2]):
maxedge= max(maxedge,np.abs(wgt23[ihl2]))
print('max of edge value= ',maxedge)
colors= []
elabels= {}
ic= 0
if nl == 1:
for ihl0 in range(nhl[0]):
for ihl1 in range(nhl[1]):
if np.abs(wgt11[ihl0,ihl1]) > threshold*maxedge:
val= wgt11[ihl0,ihl1]
g.add_edge('0-{0:03d}'.format(ihl0),'1-{0:03d}'.format(ihl1))
elabels[('0-{0:03d}'.format(ihl0),'1-{0:03d}'.format(ihl1))]='{0:7.4f}'.format(val)
for ihl1 in range(nhl[1]):
if np.abs(wgt12[ihl1]) > threshold*maxedge:
val= wgt12[ihl1]
g.add_edge('1-{0:03d}'.format(ihl1), '2')
elabels[('1-{0:03d}'.format(ihl1),'2')]= '{0:7.4f}'.format(val)
elif nl == 2:
for ihl0 in range(nhl[0]):
for ihl1 in range(nhl[1]):
if np.abs(wgt21[ihl0,ihl1]) > threshold*maxedge:
val= wgt21[ihl0,ihl1]
g.add_edge('0-{0:03d}'.format(ihl0),'1-{0:03d}'.format(ihl1))
elabels[('0-{0:03d}'.format(ihl0),'1-{0:03d}'.format(ihl1))]='{0:7.4f}'.format(val)
for ihl1 in range(nhl[1]):
for ihl2 in range(nhl[2]):
if np.abs(wgt22[ihl1,ihl2]) > threshold*maxedge:
val= wgt22[ihl1,ihl2]
g.add_edge('1-{0:03d}'.format(ihl1), '2-{0:03d}'.format(ihl2))
elabels[('1-{0:03d}'.format(ihl1),'2-{0:03d}'.format(ihl2))]= '{0:7.4f}'.format(val)
for ihl2 in range(nhl[2]):
if np.abs(wgt23[ihl2]) > threshold*maxedge:
val= wgt23[ihl2]
g.add_edge('2-{0:03d}'.format(ihl2), '3')
elabels[('2-{0:03d}'.format(ihl2),'3')]= '{0:7.4f}'.format(val)
for e in g.edges():
e1= e[0]
e2= e[1]
for l in elabels.keys():
if e1 in l and e2 in l:
colors.append(np.sqrt(np.abs(float(elabels[l]))))
# print 'len(edges)=',len(g.edges())
# print g.edges()
# print 'len(colors)=',len(colors)
# print colors
#exit()
#nx.draw_networkx_nodes(g,pos,node_size=30,node_color='b',node_shape='o')
#nx.draw_networkx_edges(g,pos)
nodes= nx.draw_networkx_nodes(g,pos,node_size=30,node_color='b')
edges= nx.draw_networkx_edges(g,pos,edge_color=colors,edge_cmap=plt.get_cmap('jet'))
if flag_weight:
nx.draw_networkx_edge_labels(g,pos,alpha=1.0,edge_labels=elabels,label_pos=0.5)
for key in pos:
pos[key][0] -= 0.2
nx.draw_networkx_labels(g,pos,nlabel,font_size=8)
plt.colorbar(edges)
plt.tick_params(axis='x',bottom='off',top='off',labelbottom='off')
plt.tick_params(axis='y',bottom='off',top='off',labelleft='off')
plt.show()
if __name__ == '__main__':
args= docopt(__doc__)
flag_weight= args['-w']
threshold= args['-t']
nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nagnle= read_NN_config()
if nl == 1:
wgt11,wgt12= read_NN_params(nl,nhl)
elif nl == 2:
wgt21,wgt22,wgt23= read_NN_params(nl,nhl)
if args['draw']:
if nl == 1:
draw(nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nagnle,
wgt11=wgt11,wgt12=wgt12)
elif nl == 2:
draw(nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nagnle,
wgt21=wgt21,wgt22=wgt22,wgt23=wgt23)
else:
analyze(nl,nsp,nhl,n2,n3,cmb2,cmb3,ngauss,ncos,npoly,nagnle)
| 33.53871
| 104
| 0.519958
|
6c4adcaf4598034a9670bacf6bf97b68dd0b3ce4
| 1,088
|
py
|
Python
|
content_notes/migrations/0001_initial.py
|
OpenCanada/website
|
6334ff412addc0562ac247080194e5d182e8e924
|
[
"MIT"
] | 10
|
2015-12-18T16:41:33.000Z
|
2018-11-11T08:36:46.000Z
|
content_notes/migrations/0001_initial.py
|
OpenCanada/website
|
6334ff412addc0562ac247080194e5d182e8e924
|
[
"MIT"
] | 96
|
2015-07-14T22:45:56.000Z
|
2017-07-25T19:59:48.000Z
|
content_notes/migrations/0001_initial.py
|
OpenCanada/website
|
6334ff412addc0562ac247080194e5d182e8e924
|
[
"MIT"
] | 9
|
2015-07-28T14:38:43.000Z
|
2019-01-04T17:38:42.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0062_auto_20150930_1633'),
]
operations = [
migrations.CreateModel(
name='EndNote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('text', wagtail.core.fields.RichTextField()),
('uuid', models.CharField(max_length=64, null=True, blank=True)),
('article', modelcluster.fields.ParentalKey(related_name='endnote_links', on_delete=django.db.models.deletion.SET_NULL, to='articles.ArticlePage', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 34
| 175
| 0.608456
|
b1346021c0f86a80d43581defe03917845d51d0e
| 49,529
|
py
|
Python
|
haruka/modules/translations/Indonesian.py
|
jarvisbotsavage/groupmanaging
|
a420cc556347ab947d28d0d736b8aa72680949ff
|
[
"MIT"
] | null | null | null |
haruka/modules/translations/Indonesian.py
|
jarvisbotsavage/groupmanaging
|
a420cc556347ab947d28d0d736b8aa72680949ff
|
[
"MIT"
] | null | null | null |
haruka/modules/translations/Indonesian.py
|
jarvisbotsavage/groupmanaging
|
a420cc556347ab947d28d0d736b8aa72680949ff
|
[
"MIT"
] | null | null | null |
RUN_STRINGS = (
"Kau pikir mau kemana dirimu?",
"Ya? apa? apakah mereka bisa lolos?",
"ZZzzZZzz... Hah? Apa? oh, ternyata mereka lagi, jangan dipikirkan.",
"Kembali ke sini!",
"Tidak begitu cepat...",
"Perhatikan kalau ada dinding!",
"Jangan tinggalkan aku sendirian dengan mereka!!",
"Kamu Lari, Kamu mati.",
"Bercanda, saya di mana-mana",
"Kamu akan menyesal...",
"Coba kamu klik /kickme, menyenangkan lho.",
"Ganggu orang lain saja sana, di sini nggak ada yang peduli."
"Kamu bisa saja lari, tapi Kamu tidak dapat bersembunyi.",
"Itu saja yang Kamu punya?"
"Aku di belakangmu...",
"Kamu nggak lagi sendirian!",
"Kita dapat melakukan ini dengan cara yang mudah, atau dengan cara yang keras.",
"Kamu hanya tidak mengerti, kan?",
"Ya, Kamu lebih baik lari!",
"Tolong, ingatkan seberapa pedulinya aku?",
"Aku akan lari lebih cepat jika aku jadi kau.",
"Itu pasti adalah droid yang kami mencari.",
"Semoga peluangmu selalu beruntung.",
"Kata-kata terakhir yang terkenal.",
"Dan mereka menghilang selamanya, tidak pernah terlihat lagi."
"Oh, lihat dirku! Aku ini keren lho, aku bisa lari dari bot! \n -orang ini",
"Ya Ya, tekan aja /kickme.",
"Di sini, mengambil ini cincin dan kepala ke Mordor sementara kau di itu.",
"Legenda berkata, mereka masih lari sampai sekarang...",
"Tidak seperti Harry Potter, orang tuamu tidak bisa melindungimu dariku.",
"Ketakutan mengarah pada kemarahan. Kemarahan menyebabkan kebencian. Benci menyebabkan penderitaan. Jika Kamu terus berlari dalam ketakutan, Kamu mungkin "
"menjadi penerus Vader.",
"Setelah aku hitung-hitung, aku telah memutuskan bahwa ketertarikanku pada kejahatanmu adalah 0.",
"Legenda berkata, mereka masih lari sampai sekarang.",
"Lanjutkan, kami nggak yakin butuh dirimu di sini.",
"Kamu pasti seorang penyi-. Tunggu. Kamu bukan Harry, lanjutkan.",
"JANGAN BERLARI DI LORONG!",
"Hasta la vista, baby.",
"Siapa yang melepaskan Anjingnya?",
"Lucu ya, karena tidak ada orang yang peduli di sini."
"Ah, sayang sekali. Padahal saya suka yang itu.",
"Terus terang, sayangku, aku tidak peduli.",
"Milkshakeku memancing semua anak laki-laki menuju ke halaman... Jadi lari yang lebih cepat!",
"Kamu tidak bisa MENERIMA KENYATAAN!",
"Di suatu masa lalu, di sebuah galaksi yang sangat jauh... Mungkin saja seseorang ada yang peduli itu. Tapi tidak lagi sekarang. ",
"Hei, lihat mereka! Mereka kabur dari banhammer tak terelakkan... Lucunya. ",
"Han menembak terlebih dahulu. Begitu juga aku.",
"Kamu mengejar apa, seekor kelinci putih?",
"Dokter kemungkinan akan mengatakan...... LARI! ",
)
INSULTS_STRINGS = (
"Owww ... Dasar bodoh pangkal kuadrat.",
"Jangan minum sambil mengetik",
"Aku pikir kamu lebih baik pulang ke rumah, atau ke RSJ sekalian.",
"Perintah tidak ditemukan. Seperti isi otakmu.",
"Apakah kamu sadar kalau tingkahmu itu membuatmu terlihat bodoh? Tampaknya tidak.",
"Harusnya kamu bisa menulis yang lebih baik dari itu.",
"Aturan bot nomor 544 bagian 9 mencegahku untuk membalas pesanmu.",
"Maaf, kami tidak menjual otak.",
"Percayalah kepadaku, Kamu nggak normal.",
"Aku yakin otakmu terasa seperti baru, sebab Kamu tidak pernah menggunakannya.",
"Jika saya ingin bunuh diri, saya akan menaiki egomu dan melompat ke IQmu.",
"Zombie itu makan otak... jadi kau aman. ",
"Kamu tidak berevolusi dari kera, mereka berevolusi dari Kamu."
"Kembalilah berbicara ke saya ketika jumlah IQmu sudah melebihi usiamu. ",
"Saya tidak mengatakan kamu bodoh, Saya hanya mengatakan kamu sial saat berfikir.",
"Bahasa apa yang kamu pakai? Karena itu terdengar seperti omong kosong",
"Kebodohan bukan sebuah kejahatan jadi kamu dibebaskan.",
"Kamu adalah bukti bahwa evolusi bisa terbalik.",
"Aku mau tanya berapa umurmu tapi aku tahu kamu tidak akan bisa menghitung sebanyak itu.",
"Sebagai makhluk asing, apa pendapatmu tentang ras manusia?",
"Otak bukanlah segalanya. Dalam kasusmu, otaknya nggak ada.",
"Biasanya orang hidup dan belajar. Kamu hanya sekedar hidup.",
"Aku tidak tahu apa yang membuatmu bodoh, tapi itu benar-benar manjur.",
"Teruslah berbicara, suatu hari nanti Kamu akan mengatakan sesuatu yang cerdas! (meskipun aku ragu) ",
"Kejutkan aku, dengan mengatakan sesuatu yang terdengar cerdas."
"IQ mu lebih kecil dari ukuran sepatumu.",
"Aduh! Neurotransmitermu sudah rusak. ",
"Apakah Kamu gila, dasar bodoh.",
"Setiap orang memiliki hak untuk menjadi bodoh tapi Kamu malah menyalagunakannya.",
"Aku menyesal kalau aku menyakiti perasaanmu ketika aku memanggilmu bodoh. Kupikir dirimu sudah tahu itu. ",
"Kamu harus mencoba mencicipi sianida.",
"Enzimmu harusnya digunakan untuk mencerna racun tikus.",
"Kamu harus mencoba tidur selamanya.",
"Ambil pistol dan tembak dirimu sendiri.",
"Kamu bisa membuat rekor dunia dengan melompat dari pesawat tanpa parasut.",
"Berhenti berbicara omong kosong dan lompatlah di depan kereta peluru yang sedang jalan.",
"Cobalah mandi dengan asam klorida daripada air.",
"Coba ini: jika Kamu menahan nafas air selama satu jam, Kamu bisa menahannya selamanya.",
"Go Green! Berhentilah menghirup oksigen. ",
"Tuhan sedang mencarimu. Kamu harus mati untuk bertemu diriNya. ",
"memberikanmu 100%. Sekarang, pergi sumbangkan darah. ",
"Coba lompat dari gedung dengan ratusan lantai, tetapi Kamu hanya bisa melakukannya sekali.",
"Kamu harusnya menyumbangkan otakmu karena kulihat kalau kamu tidak pernah menggunakannya.",
"Relawan untuk target di tempat latihan menembak.",
"Tembakan di kepala itu menyenangkan. Kamu coba sana. ",
"Kamu harus coba berenang bersama hiu putih.",
"Kamu cat dirimu sendiri dengan warna merah dan ikutlah berlari di maraton banteng.",
"Kamu bisa tetap di bawah air selama sisa hidupmu tanpa kembali.",
"Bagaimana kalau kamu berhenti bernapas selama 1 hari? Itu akan luar biasa. ",
"Cobalah memprovokasi harimau saat kalian berdua berada di kandang.",
"Sudahkah Kamu mencoba menembak diri sendiri sejauh 100 m menggunakan meriam.",
"Kamu harus mencoba memegang TNT di mulutmu dan menyalakannya.",
"Coba bermain lempar tangkap dengan RDX, itu menyenangkan loh.",
"Aku dengar phogine itu beracun tapi kurasa kamu tidak keberatan menghirupnya untuk bersenang-senang.",
"Luncurkan dirimu ke luar angkasa sambil melupakan oksigen di Bumi.",
"Kamu harus mencoba bermain ular dan tangga, dengan ular sungguhan dan tanpa tangga.",
"Menari telanjang di sepasang kabel HT.",
"Kawah berapi adalah kolam renang terbaik untukmu.",
"Kamu harus mencoba mandi air panas di gunung berapi.",
"Cobalah menghabiskan satu hari di peti mati dan itu akan menjadi milikmu selamanya.",
"Pukul Uranium dengan neutron yang bergerak lambat di hadapanmu. Ini akan menjadi pengalaman berharga. ",
"Kamu bisa menjadi orang pertama yang menginjakkan kaki di matahari. Selamat mencoba. ",
)
SLAP_TEMPLATES = (
"{user1} {hits} {user2} dengan {item}.",
"{user1} {hits} {user2} di wajahnya dengan {item}.",
"{user1} {hits} {user2} pelan-pelan dengan {item}.",
"{user1} {throws} sebuah {item} pada {user2}.",
"{user1} mengambil {item} dan {throws} di wajah {user2}.",
"{user1} meluncurkan {item} langsung ke arah {user2}.",
"{user1} mulai menampar {user2} dengan {item}.",
"{user1} menahan {user2} dan berulangkali {hits} dengan {item}.",
"{user1} meraih {item} dan {hits} {user2} dengan itu.",
"{user1} mengikat {user2} ke kursi dan {throws} sebuah {item} pada mereka.",
"{user1} memberi pertolongan untuk membantu {user2} belajar berenang di lava."
)
ITEMS = (
"wajan besi cor",
"trout besar",
"Tongkat bisbol",
"tongkat kriket",
"tongkat kayu",
"paku",
"printer",
"sekop",
"Monitor CRT",
"buku teks fisika",
"pemanggang roti",
"lukisan Richard Stallman",
"televisi",
"truk lima ton",
"Gulungan lakban",
"buku",
"laptop",
"televisi bekas",
"Karung batu",
"trout berwarna",
"ayam karet",
"Kelelawar berduri",
"Pemadam Api",
"beton",
"sebongkah tanah",
"sarang lebah",
"Sepotong daging busuk",
"beruang",
"sejumlah batu bata",
)
THROW = (
"melempar",
"melemparkan",
"membuang",
"melontarkan",
)
HIT = (
"memukul",
"memukul",
"menampar",
"menjatuhkan",
"memukul",
)
MARKDOWN_HELP = """
Markdown adalah alat pemformatan yang luar biasa yang didukung oleh telegram. {} memiliki beberapa peningkatan, untuk memastikan bahwa \
pesan yang disimpan diuraikan dengan benar, dan untuk memungkinkan Kamu membuat tombol.
- <code>_italic_</code>: membungkus teks dengan '_' akan menghasilkan teks yang miring
- <code>*bold*</code>: membungkus teks dengan '*' akan menghasilkan teks tebal
- <code>`code`</code>: membungkus teks dengan '`' akan menghasilkan teks monospace, juga dikenal sebagai 'code'
- <code>[sometext](someURL)</code>: ini akan membuat tautan - pesannya hanya akan menunjukkan <code> tulisan</code>, \
dan mengetuknya akan membuka halaman di <code>someURL</code>.
CONTOH: <code>[Tombol](buttonurl:example.com)</code>
- <code>[buttontext](buttonurl:someURL)</code>: ini adalah penyempurnaan khusus untuk memungkinkan pengguna untuk memiliki \
tombol telegram dalam markdown mereka. <code>buttontext</code> akan menjadi apa yang ditampilkan pada tombol, dan <code>someurl</code> \
akan menjadi url yang dibuka.
CONTOH: <code>[Tombol](buttonurl:example.com)</code>
Jika Kamu ingin beberapa tombol pada baris yang sama, gunakan: same, seperti:
<code>[one](buttonurl://example.com)
[two](buttonurl://google.com:same)</code>
Ini akan membuat dua tombol pada satu baris, bukan satu tombol per baris.
"""
IndonesianStrings = {
"send-start": """Hi {}, nama saya {}! Jika Kamu memiliki pertanyaan tentang cara menggunakan saya, baca /help.
Saya adalah bot manajer grup yang dikelola oleh [orang ini](tg://user?Id={654839744}). Saya adalah [Marie](https://github.com/PaulSonOfLars/tgbot) fork.
Saya dibangun di python3, menggunakan \
python-telegram-bot library, dan saya sepenuhnya opensource - Kamu dapat menemukan apa yang membuat saya\
[disini](https://github.com/peaktogoo/HarukaAya))!
Jangan ragu untuk mengirimkan pull request di github\
atau permintaan fitur jika kamu punya :)
Jika Kamu menikmati menggunakan saya dan / atau ingin membantu saya bertahan di alam liar, tekan /donate untuk membantu upgrade VPS saya!
""",
"send-help": """Hai disana! Nama saya *{}*.
Saya bot manajemen grup modular dengan beberapa tambahan fitur menyenangkan! Lihatlah beberapa fitur berikut untuk mendapatkan petunjuk tentang hal yang bisa saya bantu.
Perintah tersedia:
- /start: Perintah keren untuk memeriksa apakah bot masih hidup atau tidak
- /help: Bantuan.
- /help <nama modul>: Bantuan tentang modul.
- /donate: informasi tentang cara menyumbang!
- /lang: mengubah bahasa bot
- /settings:
-di PM: akan mengirimkan setelan untuk semua modul yang didukung.
-dalam grup: akan mengarahkan Kamu ke pm, dengan semua pengaturan chat.
{}
""",
"send-group-settings": """Hai disana! Ada beberapa pengaturan untuk *{}* - pergi ke depan dan pilih apa
yang membuatmu tertarik.""",
"Disabled connections to this chat for users": "Disabled connections to this chat for users",
"Enabled connections to this chat for users": "Enabled connections to this chat for users",
"Please enter on/yes/off/no in group!": "Please enter on/yes/off/no in group!",
"Successfully connected to *{}*": "Successfully connected to *{}*",
"Connection failed!": "Connection failed!",
"Connections to this chat not allowed!": "Connections to this chat not allowed!",
"Write chat ID to connect!": "Write chat ID to connect!",
"Usage limited to PMs only!": "Usage limited to PMs only!",
#Misc
"RUNS-K": RUN_STRINGS,
"SLAP_TEMPLATES-K": SLAP_TEMPLATES,
"ITEMS-K": ITEMS,
"HIT-K": HIT,
"THROW-K": THROW,
"ITEMP-K": ITEMS,
"ITEMR-K": ITEMS,
"MARKDOWN_HELP-K": MARKDOWN_HELP,
"INSULTS-K": INSULTS_STRINGS,
"The original sender, {}, has an ID of `{}`.\nThe forwarder, {}, has an ID of `{}`.":
"The original sender, {}, has an ID of `{}`.\nThe forwarder, {}, has an ID of `{}`.",
"{}'s id is `{}`.": "{}'s id is `{}`.",
"Your id is `{}`.": "Your id is `{}`.",
"This group's id is `{}`.": "This group's id is `{}`.",
"I can't extract a user from this.": "I can't extract a user from this.",
"<b>User info</b>:": "<b>User info</b>:",
"\nFirst Name: {}": "\nFirst Name: {}",
"\nLast Name: {}": "\nLast Name: {}",
"\nUsername: @{}": "\nUsername: @{}",
"\nPermanent user link: {}": "\nPermanent user link: {}",
"\n\nThis person is my owner - I would never do anything against them!":
"\n\nThis person is my owner - I would never do anything against them!",
"\nThis person is one of my sudo users! Nearly as powerful as my owner - so watch it.":
"\nThis person is one of my sudo users! Nearly as powerful as my owner - so watch it.",
"\nThis person is one of my support users! Not quite a sudo user, but can still gban you off the map.":
"\nThis person is one of my support users! Not quite a sudo user, but can still gban you off the map.",
"\nThis person has been whitelisted! That means I'm not allowed to ban/kick them.":
"\nThis person has been whitelisted! That means I'm not allowed to ban/kick them.",
"Its always banhammer time for me!": "Its always banhammer time for me!",
"It's {} in {}": "It's {} in {}",
"Please reply to a sticker to get its ID.": "Please reply to a sticker to get its ID.",
"Please reply to a sticker for me to upload its PNG.": "Please reply to a sticker for me to upload its PNG.",
"Write a location to check the weather.": "Write a location to check the weather.",
"I will keep an eye on both happy and sad times!": "I will keep an eye on both happy and sad times!",
"Today in {} is being {}, around {}°C.\n": "Today in {} is being {}, around {}°C.\n",
"Sorry, location not found.": "Sorry, location not found.",
"Deleting identifiable data...": "Deleting identifiable data...",
"Try forwarding the following message to me, and you'll see!":
"Try forwarding the following message to me, and you'll see!",
"/save test This is a markdown test. _italics_, *bold*, `code`, [URL](example.com) [button](buttonurl:github.com) [button2](buttonurl://google.com:same)":
"""/save test This is a markdown test. _italics_, *bold*, `code`, \
[URL](example.com)
[Botón](buttonurl:github.com)
[Botón2](buttonurl://google.com:same)""",
#Admin
"How am I meant to promote someone that's already an admin?": "How am I meant to promote someone that's already an admin?",
"I can't promote myself! Get an admin to do it for me.": "I can't promote myself! Get an admin to do it for me.",
"Successfully promoted in *{}*!": "Successfully promoted in *{}*!",
"This person CREATED the chat, how would I demote them?": "This person CREATED the chat, how would I demote them?",
"Can't demote what wasn't promoted!": "Can't demote what wasn't promoted!",
"I can't demote myself!": "I can't demote myself!",
"Successfully demoted in *{}*!": "Successfully demoted in *{}*!",
"Could not demote. I might not be admin, or the admin status was appointed by another user, so I can't act upon them!":
"Could not demote. I might not be admin, or the admin status was appointed by another user, so I can't act upon them!",
"I don't have access to the invite link, try changing my permissions!": "I don't have access to the invite link, try changing my permissions!",
"I can only give you invite links for supergroups and channels, sorry!": "I can only give you invite links for supergroups and channels, sorry!",
"Admins in": "Admins in",
"this chat": "this chat",
" (Creator)": " (Creator)",
#AFK
"{} is now AFK!": "{} is now AFK!",
"{} is no longer AFK!": "{} is no longer AFK!",
"{} is AFK!": "{} is AFK!",
"{} is AFK! says its because of: \n{}": "{} is AFK! says its because of: \n{}",
#Antiflood
"I like to leave the flooding to natural disasters. But you, you were just a disappointment. Get out.":
"I like to leave the flooding to natural disasters. But you, you were just a disappointment. Get out.",
"I can't kick people here, give me permissions first! Until then, I'll disable antiflood.":
"I can't kick people here, give me permissions first! Until then, I'll disable antiflood.",
"Antiflood has been disabled.": "Antiflood has been disabled.",
"Antiflood has to be either 0 (disabled), or a number bigger than 3 (enabled)!":
"Antiflood has to be either 0 (disabled), or a number bigger than 3 (enabled)!",
"Antiflood has been updated and set to {}": "Antiflood has been updated and set to {}",
"Unrecognised argument - please use a number, 'off', or 'no'.":
"Unrecognised argument - please use a number, 'off', or 'no'.",
"I'm not currently enforcing flood control!": "I'm not currently enforcing flood control!",
"I'm currently banning users if they send more than {} consecutive messages.":
"I'm currently banning users if they send more than {} consecutive messages.",
#Antispam
"I've enabled antispam security in this group. This will help protect you from spammers, unsavoury characters, and the biggest trolls.":
"I've enabled antispam security in this group. This will help protect you from spammers, unsavoury characters, and the biggest trolls.",
"I've disabled antispam security in this group. GBans wont affect your users anymore. You'll be less protected from any trolls and spammers though!":
"I've disabled antispam security in this group. GBans wont affect your users anymore. You'll be less protected from any trolls and spammers though!",
"Give me some arguments to choose a setting! on/off, yes/no!\n\nYour current setting is: {}\nWhen True, any gbans that happen will also happen in your group. When False, they won't, leaving you at the possible mercy of spammers.":
"Give me some arguments to choose a setting! on/off, yes/no!\n\nYour current setting is: {}\nWhen True, any gbans that happen will also happen in your group. When False, they won't, leaving you at the possible mercy of spammers.",
"Globally banned: <b>{}</b>": "Globally banned: <b>{}</b>",
"\nGlobally muted: <b>{}</b>": "\nGlobally muted: <b>{}</b>",
"\nReason: {}": "\nReason: {}",
#Bans
"I really wish I could ban admins...": "I really wish I could ban admins...",
"I'm not gonna BAN myself, are you crazy?": "I'm not gonna BAN myself, are you crazy?",
"Banned!": "Banned!",
"Well damn, I can't ban that user.": "Well damn, I can't ban that user.",
"You haven't specified a time to ban this user for!":
"You haven't specified a time to ban this user for!",
"Banned! User will be banned for {}.": "Banned! User will be banned for {}.",
#Blacklist
"<b>Current blacklisted words in {}:</b>\n": "<b>Current blacklisted words in {}:</b>\n",
"There are no blacklisted messages in <b>{}</b>!": "There are no blacklisted messages in <b>{}</b>!",
"Added <code>{}</code> to the blacklist in <b>{}</b>!":
"Added <code>{}</code> to the blacklist in <b>{}</b>!",
"Tell me which words you would like to add to the blacklist.":
"Tell me which words you would like to add to the blacklist.",
"Removed <code>{}</code> from the blacklist in <b>{}</b>!":
"Removed <code>{}</code> from the blacklist in <b>{}</b>!",
"This isn't a blacklisted trigger...!": "This isn't a blacklisted trigger...!",
"None of these triggers exist, so they weren't removed.":
"None of these triggers exist, so they weren't removed.",
"Removed <code>{}</code> triggers from the blacklist in <b>{}</b>! {} did not exist, so were not removed.":
"Removed <code>{}</code> triggers from the blacklist in <b>{}</b>! {} did not exist, so were not removed.",
"Tell me which words you would like to remove from the blacklist.":
"Tell me which words you would like to remove from the blacklist.",
#Filters
"*Filters in {}:*\n": "*Filters in {}:*\n",
"local filters": "local filters",
"*local filters:*\n": "*local filters:*\n",
"No filters in {}!": "No filters in {}!",
"There is no note message - You can't JUST have buttons, you need a message to go with it!":
"There is no note message - You can't JUST have buttons, you need a message to go with it!",
"You didn't specify what to reply with!": "You didn't specify what to reply with!",
"Handler '{}' added in *{}*!": "Handler '{}' added in *{}*!",
"No filters are active in {}!": "No filters are active in {}!",
"Yep, I'll stop replying to that in *{}*." : "Yep, I'll stop replying to that in *{}*.",
"That's not a current filter - run /filters for all active filters.":
"That's not a current filter - run /filters for all active filters.",
#Disable
"Disabled the use of `{}` in *{}*": "Disabled the use of `{}` in *{}*",
"That command can't be disabled": "That command can't be disabled",
"What should I disable?": "What should I disable?",
"Enabled the use of `{}` in *{}*": "Enabled the use of `{}` in *{}*",
"Is that even disabled?": "Is that even disabled?",
"What should I enable?": "What should I enable?",
"The following commands are toggleable:\n{}": "The following commands are toggleable:\n{}",
"No commands can be disabled.": "No commands can be disabled.",
"No commands are disabled in *{}*!": "No commands are disabled in *{}*!",
"No commands are disabled!": "No commands are disabled!",
"The following commands are currently restricted in *{}*:\n{}":
"The following commands are currently restricted in *{}*:\n{}",
#Locks
"Locked {} messages for all non-admins!": "Locked {} messages for all non-admins!",
"What are you trying to lock...? Try /locktypes for the list of lockables":
"What are you trying to lock...? Try /locktypes for the list of lockables",
"I'm not an administrator, or haven't got delete rights.":
"I'm not an administrator, or haven't got delete rights.",
"Unlocked {} for everyone!": "Unlocked {} for everyone!",
"What are you trying to unlock...? Try /locktypes for the list of lockables":
"What are you trying to unlock...? Try /locktypes for the list of lockables",
"What are you trying to unlock...?": "What are you trying to unlock...?",
"I see a bot, and I've been told to stop them joining... but I'm not admin!":
"I see a bot, and I've been told to stop them joining... but I'm not admin!",
"Only admins are allowed to add bots to this chat! Get outta here.":
"Only admins are allowed to add bots to this chat! Get outta here.",
"There are no current locks in *{}*.": "There are no current locks in *{}*.",
"These are the locks in *{}*:": "These are the locks in *{}*:",
"this chat": "this chat",
#Log channel
"Now, forward the /setlog to the group you want to tie this channel to!":
"Now, forward the /setlog to the group you want to tie this channel to!",
"This channel has been set as the log channel for {}.":
"This channel has been set as the log channel for {}.",
"Successfully set log channel!": "Successfully set log channel!",
"*The steps to set a log channel are:*\n • add bot to the desired channel\n • send /setlog to the channel\n • forward the /setlog to the group\n":
"""*The steps to set a log channel are:*
• add bot to the desired channel)
• send /setlog to the channel
• forward the /setlog to the group.""",
"Channel has been unlinked from {}": "Channel has been unlinked from {}",
"Log channel has been un-set.": "Log channel has been un-set.",
"No log channel has been set yet!": "No log channel has been set yet!",
#Users
"I've seen them in <code>{}</code> chats in total.":
"I've seen them in <code>{}</code> chats in total.",
"I've seen them in... Wow. Are they stalking me? They're in all the same places I am... oh. It's me.":
"I've seen them in... Wow. Are they stalking me? They're in all the same places I am... oh. It's me.",
#Msg_deleting
"Cannot delete all messages. The messages may be too old, I might not have delete rights, or this might not be a supergroup.":
"Cannot delete all messages. The messages may be too old, I might not have delete rights, or this might not be a supergroup.",
"Purge complete.": "Purge complete.",
"Reply to a message to select where to start purging from.":
"Reply to a message to select where to start purging from.",
"Whadya want to delete?": "Whadya want to delete?",
#Muting
"You'll need to either give me a username to mute, or reply to someone to be muted.":
"You'll need to either give me a username to mute, or reply to someone to be muted.",
"I'm not muting myself!": "I'm not muting myself!",
"Afraid I can't stop an admin from talking!": "Afraid I can't stop an admin from talking!",
"You'll need to either give me a username to unmute, or reply to someone to be unmuted.":
"You'll need to either give me a username to unmute, or reply to someone to be unmuted.",
"This user already has the right to speak in {}.": "This user already has the right to speak in {}.",
"Yep, {} can start talking again in {}!": "Yep, {} can start talking again in {}!",
"This user isn't even in the chat, unmuting them won't make them talk more than they already do!":
"This user isn't even in the chat, unmuting them won't make them talk more than they already do!",
"I really wish I could mute admins...": "I really wish I could mute admins...",
"I'm not gonna MUTE myself, are you crazy?" : "I'm not gonna MUTE myself, are you crazy?",
"You haven't specified a time to mute this user for!":
"You haven't specified a time to mute this user for!",
"Muted for {} in {}!": "Muted for {} in {}!",
"This user is already muted in {}!": "This user is already muted in {}!",
"Well damn, I can't mute that user.": "Well damn, I can't mute that user.",
"You'll need to either give me a username to restrict, or reply to someone to be restricted.":
"You'll need to either give me a username to restrict, or reply to someone to be restricted.",
"I'm not restricting myself!": "I'm not restricting myself!",
"Afraid I can't restrict admins!": "Afraid I can't restrict admins!",
"{} is restricted from sending media in {}!": "{} is restricted from sending media in {}!",
"This user is already restricted in {}!": "This user is already restricted in {}!",
"This user isn't in the {}!": "This user isn't in the {}!",
"You'll need to either give me a username to unrestrict, or reply to someone to be unrestricted.":
"You'll need to either give me a username to unrestrict, or reply to someone to be unrestricted.",
"This user already has the rights to send anything in {}.":
"This user already has the rights to send anything in {}.",
"Yep, {} can send media again in {}!": "Yep, {} can send media again in {}!",
"This user isn't even in the chat, unrestricting them won't make them send anything than they already do!":
"This user isn't even in the chat, unrestricting them won't make them send anything than they already do!",
"I really wish I could restrict admins...": "I really wish I could restrict admins...",
"I'm not gonna RESTRICT myself, are you crazy?": "I'm not gonna RESTRICT myself, are you crazy?",
"You haven't specified a time to restrict this user for!":
"You haven't specified a time to restrict this user for!",
"Well damn, I can't restrict that user.": "Well damn, I can't restrict that user.",
"{} is muted in {}!": "{} is muted in {}!",
"Restricted from sending media for {} in {}!": "Restricted from sending media for {} in {}!",
"Restricted for {} in {}!": "Restricted for {} in {}!",
#Notes
"Get rekt": "Get rekt.",
"Invalid Chat ID provided!": "Invalid Chat ID provided!", #Connections
"You don't seem to be referring to a user.": "You don't seem to be referring to a user.", #Admin, Bans, Muting
"I can't seem to find this user": "I can't seem to find this user", #Bans, Muting
"Yes": "Yes", #Antispam
"No": "No", #Antispam
#__main__
#Module names
"Admin": "Admin",
"AFK": "AFK",
"AntiFlood": "AntiFlood",
"Antispam Security": "Antispam Security",
"Bans": "Bans",
"Memes and etc.": "Memes and etc.",
"Word Blacklists": "Word Blacklists",
"Filters": "Filters",
"Federations": "Federations",
"Command disabling": "Command disabling",
"Locks": "Locks",
"Log Channels": "Log Channels",
"Misc": "Misc",
"Purges": "Purges",
"Muting & Restricting": "Muting & Restricting",
"Notes": "Notes",
"Reporting": "Reporting",
"RSS Feed": "RSS Feed",
"Rules": "Rules",
"Connections": "Connections",
"Bios and Abouts": "Bios and Abouts",
"Warnings": "Warnings",
"Welcomes/Goodbyes": "Welcomes/Goodbyes",
#Some main stuff
"Ini adalah bantuan untuk modul *{}*:\n{}": "Ini adalah bantuan untuk modul *{}*:\n{}",
"Back": "Back",
"send-help": """Hai disana! Nama saya {}.
Saya bot manajemen grup modular dengan beberapa tambahan menyenangkan! Lihatlah gagasan berikut untuk beberapa hal yang dapat saya bantu.
Perintah tersedia:
- /start: Perintah keren untuk memeriksa apakah bot masih hidup atau tidak
- /help: Bantuan.
- /help <nama modul>: Bantuan tentang modul.
- /donate: informasi tentang cara menyumbang!
- /lang: mengubah bahasa bot
- /settings:
-di PM: akan mengirimkan setelan untuk semua modul yang didukung.
-dalam grup: akan mengarahkan Kamu ke pm, dengan semua aturan.
{}
""",
"\nSemua perintah dapat digunakan dengan `/` atau `!`.\n": "\nSemua perintah dapat digunakan dengan `/` atau `!`.\n",
#Module helps
"Admin_help": """- /adminlist | /admin: daftar admin dalam obrolan
*Admin only:*
- /pin: diam-diam pin pesan yang dibalas - tambahkan 'loud' atau 'notify' untuk memberi notif kepada pengguna.
- /unpin: membatalkan pemasangan pesan yang saat ini disematkan
- /invitelink: mendapat tautan undangan
- /promote: mempromosikan yang dibalas pengguna
- /demote: menurunkan yang dibalas pengguna""",
"AFK_help": """ - /afk <reason>: tandai diri Kamu sebagai AFK.
- brb <reason>: sama dengan perintah afk - tetapi bukan perintah.
Ketika ditandai sebagai AFK, sebutan apa pun akan dijawab dengan pesan untuk mengatakan bahwa Kamu tidak tersedia! """,
"AntiFlood_help": """kau tahu bagaimana kadang-kadang, orang-orang yang bergabung, mengirim pesan 100 dan merusak obrolan? Dengan antiflood, itu tidak akan terjadi lagi!
Antiflood memungkinkan Kamu untuk mengambil tindakan pada pengguna yang mengirim lebih x pesan berturut-turut. Tindakan: ban/tendangan/mute/tban/tmute
Perintah yang tersedia adalah:
- /flood: mendapatkan pengaturan antiflood saat ini.
-/setflood <number/off>: menetapkan jumlah pesan di mana untuk mengambil tindakan pada pengguna.""",
"Locks_help": """Apakah stiker mengganggu Kamu? atau ingin menghindari orang-orang yang berbagi link? atau gambar? Kamu berada di tempat yang tepat!
Modul kunci memungkinkan Kamu untuk mengunci berjarak beberapa item yang umum di dunia telegram; bot akan secara otomatis menghapus mereka!
Perintah yang tersedia adalah:
- / lock <item(s)>: mengunci penggunaan "item". Sekarang, hanya admin akan mampu menggunakan jenis ini!
- /unlock <item(s)>: membuka "item". Setiap orang dapat menggunakannya lagi.
- /locks: Daftar status kunci dalam obrolan.
-/locktypes: menampilkan daftar semua hal yang dapat dikunci. (silahkan lihat ini!)
eg: kunci stiker dengan:
/lock sticker""",
"Command disabling_help": """tidak semua menginginkan setiap fitur yang ditawarkan. Beberapa perintah kiri terbaik tidak terpakai; untuk menghindari spam dan penyalahgunaan.
Ini memungkinkan Kamu untuk menonaktifkan beberapa digunakan perintah, sehingga tidak ada dapat menggunakan mereka. Ini juga akan memungkinkan Kamu untuk autodelete mereka, menghentikan orang-orang dari
Perintah yang tersedia adalah:
- /disable <commandname>: menghentikan pengguna dari menggunakan perintah "commandname" dalam kelompok ini.
- /enable <commandname>: memungkinkan pengguna untuk menggunakan perintah "commandname" dalam kelompok ini lagi.
-/listcmds: daftar semua perintah tersedia.
- /disabled: Daftar perintah Penyandang Cacat di chat ini.
Note:
Saat menonaktifkan perintah, perintah hanya mendapat dinonaktifkan untuk bebas-admin. Semua admins masih dapat menggunakan perintah-perintah tersebut.
Perintah Penyandang Cacat masih dapat diakses melalui /connect fitur. Jika Kamu akan tertarik untuk melihat ini dinonaktifkan juga, biarkan aku tahu dalam obrolan dukungan.""",
"Filters_help": """Membuat obrolan lebih ramai dengan filter; Bot akan membalas dengan kata-kata tertentu!
Filter tidak peka huruf besar-kecil; setiap kali seseorang mengatakan kata-kata pemicu Kamu, {} akan membalas sesuatu yang lain! dapat digunakan untuk membuat perintah Kamu sendiri, jika diinginkan.
- /filter: daftarkan semua filter aktif dalam obrolan ini.
*Admin only:*
- /filter <keyword> <reply message>: Setiap kali seseorang mengatakan "kata", bot akan membalas dengan "kalimat". Untuk beberapa filter kata, kutip kata pertama.
- /stop <filter keyword>: hentikan filter itu.
Contoh cara mengatur filter akan melalui:
`/filter halo Halo! Bagaimana kabarmu?`
Filter multi kata dapat diatur melalui:
`/filter "halo teman" Halo kembali! Lama tidak bertemu!`
Jika Kamu ingin menyimpan gambar, gif, atau stiker, atau data lain, lakukan hal berikut:
`/filter kata saat menjawab stiker atau data apa pun yang Kamu inginkan. Sekarang, setiap kali seseorang menyebut "kata", stiker itu akan dikirim sebagai balasan.`
Sekarang, siapa pun yang mengatakan "halo" akan dijawab dengan "Halo di sana! Apa kabar?".""",
"Bans_help": """Beberapa orang perlu dilarang di depan umum; spammer, gangguan, atau hanya troll.
Modul ini memungkinkan Kamu untuk melakukannya dengan mudah, dengan memaparkan beberapa tindakan umum, sehingga semua orang akan melihat!
Perintah yang tersedia adalah:
- /ban: melarang pengguna dari obrolan Kamu.
- /banme: melarang diri sendiri
- /tban: sementara melarang pengguna dari obrolan Kamu. setel waktu menggunakan int <d/h/m> (hari jam menit)
- /unban: batalkan pengguna dari obrolan Kamu.
- /sban: diam-diam melarang pengguna. (via gagang, atau balas)
- /mute: membisukan pengguna dalam obrolan Kamu.
- /tmute: untuk sementara menonaktifkan pengguna di obrolan Kamu. setel waktu menggunakan int <d/h/m> (hari jam menit)
- /unmute: unmutes pengguna dari chatting.
- /kick: tendangan pengguna dari chatting.
-/kickme: pengguna yang menggunakan ini, menendang dirisendiri!
Contoh sementara mumbungkam seseorang:
/tmute @username 2h; Hal ini mematikan pengguna selama 2 jam.""",
"Connections_help": """kadang-kadang, Kamu hanya ingin menambahkan beberapa catatan dan filter ke grup obrolan, tetapi Kamu tidak ingin semua orang untuk melihat; Ini adalah di mana sambungan datang...
Hal ini memungkinkan Kamu untuk menghubungkan ke database chatting, dan menambahkan sesuatu untuk itu tanpa chat mengetahui tentang hal itu! Untuk alasan yang jelas, Kamu perlu untuk menjadi seorang admin untuk menambahkan hal-hal; tapi setiap anggota dapat melihat data Kamu. (pengguna dilarang/menendang tidak bisa!)
Tindakan tersedia dengan kelompok-kelompok yang terhubung:
• Lihat dan edit catatan
• Lihat dan edit filter
• Lihat dan edit blacklist
• Mempromosikan/demote pengguna
• Lihat adminlist, lihat invitelink
• Menonaktifkan/mengaktifkan perintah chat
• Mute/bersuara pengguna di chat
• Membatasi/unrestrict pengguna di chat
• Lagi di masa depan!
-/connection <chatid>: terhubung ke remote chat
- /disconnect: Lepaskan dari chat
-/allowconnect on/yes/off/no: memungkinkan menghubungkan pengguna ke grup
Kamu dapat mengambil id chatting dengan menggunakan perintah /id dalam obrolan Kamu. Jangan terkejut jika id ada negatif; Semua kelompok yang super memiliki Id negatif.""",
"Log Channels_help": """*Admin only:*
- /logchannel: dapatkan info saluran log
- /setlog: atur saluran log.
- /unsetlog: hapus saluran log.
Pengaturan saluran log dilakukan dengan:
- menambahkan bot ke saluran yang diinginkan (sebagai admin!)
- mengirim /setlog di saluran
- meneruskan /setlog ke grup
""",
"Reporting_help": """ - /report <reason>: balas pesan untuk melaporkannya ke admin.
- @admin: balas pesan untuk melaporkannya ke admin.
CATATAN: tidak satu pun dari ini akan dipicu jika digunakan oleh admin
*Admin only:*
- /reports <on/off>: mengubah pengaturan laporan, atau melihat status saat ini.
- Jika dilakukan pada pm, matikan status Kamu.
- Jika dalam obrolan, matikan status obrolan itu.""",
"Notes_help": """Save data for future users with notes!
Catatan bagus untuk menyimpan informasi acak; nomor telepon, gif yang bagus, gambar lucu - apa saja!
Perintah yang tersedia adalah:
- /save <word> <sentence>: Simpan kalimat itu ke catatan yang disebut "word". Membalas pesan akan menyimpan pesan itu. Bahkan bekerja di media!
- /get <word>: dapatkan catatan terdaftar untuk kata itu.
- #<word>: sama dengan /get word
- /clear <word>: hapus catatan yang disebut "word"
- /notes: Daftar semua catatan dalam obrolan saat ini
- /saved: sama dengan /notes
Contoh cara menyimpan catatan adalah melalui:
/save data Ini beberapa data!
Sekarang, siapa pun yang menggunakan "/get data", atau "#data" akan dijawab dengan "Ini adalah beberapa data!".
Jika Kamu ingin menyimpan gambar, gif, atau stiker, atau data lain, lakukan hal berikut:
/save word saat membalas stiker atau data apa pun yang Kamu inginkan. Sekarang, catatan di "#word" berisi stiker yang akan dikirim sebagai balasan.
Kiat: untuk mengambil catatan tanpa memformat, gunakan /get <notename> noformat
Ini akan mengambil catatan dan mengirimkannya tanpa memformatnya; memberi Kamu markdown mentah, memungkinkan Kamu untuk mengedit dengan mudah""",
"Muting & Restricting_help": """Beberapa orang perlu dilarang di depan umum; spammer, gangguan, atau hanya troll.
Modul ini memungkinkan Kamu untuk melakukannya dengan mudah, dengan memaparkan beberapa tindakan umum, sehingga semua orang akan melihat!
Perintah yang tersedia adalah:
- /ban: melarang pengguna dari obrolan Kamu.
- /banme: melarang diri sendiri
- /tban: sementara melarang pengguna dari obrolan Kamu. setel waktu menggunakan int <d/h/m> (hari jam menit)
- /unban: batalkan pengguna dari obrolan Kamu.
- /sban: diam-diam melarang pengguna. (via gagang, atau balas)
- /mute: membisukan pengguna dalam obrolan Kamu.
- /tmute: untuk sementara menonaktifkan pengguna di obrolan Kamu. setel waktu menggunakan int <d/h/m> (hari jam menit)
- /unmute: unmutes pengguna dari chatting.
- /kick: tendangan pengguna dari chatting.
-/kickme: pengguna yang menggunakan ini, menendang dirisendiri!
Contoh sementara mumbungkam seseorang:
/tmute @username 2h; Hal ini mematikan pengguna selama 2 jam.""",
"Misc_help": """ - /id: get the current group id. Jika digunakan dengan membalas pesan, dapatkan id pengguna itu.
- /runs: balas string acak dari berbagai balasan.
- /insult: balas string acak dari berbagai balasan.
- /slap: menampar pengguna, atau ditampar jika bukan balasan.
- /info: dapatkan informasi tentang pengguna.
- /gdpr: menghapus informasi Kamu dari basis data bot. Obrolan pribadi saja.
- /stickerid: balas stiker ke saya untuk memberi tahu Kamu file ID-nya.
- /getsticker: balas stiker ke saya untuk mengunggah file PNG mentahnya.
- /markdownhelp: ringkasan singkat tentang bagaimana markdown berfungsi di telegram - hanya dapat dipanggil dalam obrolan pribadi.
- /git: Mendapatkan info tentang pengguna atau organisasi GitHub.
- /repo: Mendapatkan daftar repositori pengguna atau organisasi GitHub (Terbatas pada 40)
- /lyrics: Temukan lirik lagu favorit Kamu!
- /paste: Buat tempel atau url singkat menggunakan [dogbin](https://del.dog)
- /getpaste: Dapatkan konten tempel atau url singkat dari [dogbin](https://del.dog)
- /pastestats: Dapatkan statistik tempel atau url singkat dari [dogbin](https://del.dog)
- /ud: Ketikkan kata atau ungkapan yang ingin Kamu cari. Misalnya /ud Gay
- /removebotkeyboard: Punya keyboard bot jahat tersangkut di grup Kamu?
- /exec <language> <code> [/stdin <stdin>]: Jalankan kode dalam bahasa yang ditentukan. Kirim perintah kosong untuk mendapatkan bahasa yang didukung.""",
"Bios and Abouts_help": """ - /setbio <text>: saat menjawab, akan menambah bio pengguna lain
- /bio: akan mendapatkan bio Kamu atau pengguna lain. Ini tidak dapat diatur sendiri.
- /setme <text>: memperbarui info kamu
- /me: akan mendapatkan info Kamu atau pengguna lain""",
"Rules_help": """ - /rules: dapatkan aturan untuk obrolan ini.
*Admin only:*
- /setrules <rules>: tetapkan aturan untuk obrolan ini.
- /clearrules: hapus aturan untuk obrolan ini.""",
"Warnings_help": """ - /warns <userhandle>: dapatkan nomor pengguna, dan alasan, peringatan.
- /warnlist: daftar semua filter peringatan saat ini
*Admin only:*
- /warn <userhandle>: peringati seseorang. Setelah 3 peringatan, pengguna akan diblokir dari grup. Bisa juga digunakan sebagai balasan.
- /resetwarn <userhandle>: atur ulang peringatan untuk pengguna. Bisa juga digunakan sebagai balasan.
- /addwarn <keyword> <reply message>: atur filter peringatan pada kata kunci tertentu. Jika Kamu ingin kata kunci Kamu \
menjadi kalimat, sertakan dengan tanda kutip, seperti: `/addwarn "sangat marah" Ini adalah pengguna yang marah`.
- /nowarn <keyword>: hentikan filter peringatan
- /warnlimit <num>: mengatur batas peringatan
- /strongwarn <on/yes/off/no>: Jika diatur ke aktif, melebihi batas peringatan akan menghasilkan larangan. Lain, hanya akan menendang.
- /rmwarn <userhandle>: menghapus peringatan terbaru untuk pengguna. Bisa juga digunakan sebagai balasan.
- /unwarn <userhandle>: sama dengan /rmwarn""",
"Welcomes/Goodbyes_help": """
Beri anggota Kamu sambutan hangat dengan modul salam! Atau selamat tinggal yang menyedihkan... Tergantung!
Perintah yang tersedia adalah:
- /welcome <on/off/yes/no>: mengaktifkan / menonaktifkan pesan sambutan. Jika tidak ada opsi yang diberikan, mengembalikan pesan selamat datang saat ini dan pengaturan selamat datang.
- /goodbye <on/off/yes/no>: mengaktifkan / menonaktifkan pesan selamat tinggal. Jika tidak ada opsi yang diberikan, mengembalikan pesan selamat tinggal saat ini dan pengaturan selamat tinggal.
- /setwelcome <message>: set pesan sambutan baru Kamu! Markdown dan tombol didukung, serta tambalan.
- /resetwelcome: mengatur ulang pesan sambutan Kamu ke default; menghapus semua perubahan yang telah Kamu buat.
- /setgoodbye <message>: set pesan selamat tinggal Kamu! Markdown dan tombol didukung, serta tambalan.
- /resetgoodbye: mengatur ulang pesan selamat tinggal Kamu ke default; menghapus semua perubahan yang telah Kamu buat.
- /cleanwelcome <on/off/yes/no>: menghapus pesan sambutan lama; ketika orang baru bergabung, pesan lama dihapus.
- /cleanservice <on/off/yes/no>: menghapus semua pesan layanan; itu adalah "x bergabung dengan grup" yang Kamu lihat ketika orang bergabung.
- /welcomesecurity <off/soft/hard>: izin pengguna lunak untuk mengirim file media selama 24 jam, izin pengguna terbatas untuk mengirim saya $
Fillings:
Seperti disebutkan, Kamu dapat menggunakan tag tertentu untuk mengisi pesan selamat datang Kamu dengan pengguna atau info obrolan; ada:
{first}: Nama depan pengguna.
{last}: Nama belakang pengguna.
{fullname}: Nama lengkap.
{username}: Nama pengguna dari pengguna; jika tidak ada yang tersedia, sebutkan pengguna.
{mention}: Menyebutkan pengguna, menggunakan nama depan mereka.
{id}: Id pengguna.
{chatname}: Nama chat.
Contoh cara menggunakan tambalan adalah mengatur sambutan Kamu, melalui:
/setwelcome Hello {first}! Selamat datanf di {chatname}.
Kamu dapat mengaktifkan / menonaktifkan pesan sambutan seperti:
/welcome off
Jika Kamu ingin menyimpan gambar, gif, atau stiker, atau data lain, lakukan hal berikut:
/setwelcome saat membalas stiker atau data apa pun yang Kamu inginkan. Data ini sekarang akan dikirim untuk menyambut pengguna baru.
Tip: gunakan /welcome noformat untuk mengambil pesan selamat datang yang belum diformat.
Ini akan mengambil pesan selamat datang dan mengirimkannya tanpa memformatnya; memberi Kamu markdown mentah, memungkinkan Kamu untuk mengedit dengan mudah.
Ini juga bekerja dengan /goodbye.""",
"Word Blacklists_help":"""Kamu dapat mengatur filter daftar hitam untuk mengambil tindakan otomatis pada orang-orang ketika mereka mengatakan hal-hal tertentu. Ini dilakukan dengan menggunakan:
- /addblacklist <blacklist trigger> <blacklist reason>: daftar hitam pemicunya. Kamu dapat mengatur kalimat dengan menaruh tanda kutip di sekitar alasannya.
- /unblacklist <blacklist trigger>: berhenti daftar hitam pemicu daftar hitam tertentu.
- /rmblacklist <blacklist trigger>: sama dengan /unblacklist
- /blacklist: daftar semua filter daftar hitam aktif
/addblacklist "the admins suck" Respect your admins!
Ini akan menghapus pesan yang terdapat kata 'the admins suck'.
Jika Kamu telah mengaktifkan mode daftar hitam alternatif, itu akan memperingatkan, mencekal, menendang, atau membisukan pengguna dengan pesan yang menyebutkan alasannya.
Top tip:
Daftar hitam memungkinkan Kamu menggunakan beberapa pengubah untuk mencocokkan karakter "unknown". Misalnya, Kamu dapat menggunakan? karakter untuk mencocokkan satu kemunculan $
Kamu juga dapat menggunakan * pengubah, yang sesuai dengan sejumlah karakter apapun. Jika Kamu ingin daftar hitam Url, ini akan memungkinkan Kamu untuk menyesuaikan secara penuh $
Sebagai contoh, berikut akan melarang bit.ly link:
/addblacklist "bit.ly/*" kita tidak menyukai layanan pemendek!
Jika Kamu ingin hanya cocok bit.ly/ link diikuti oleh tiga karakter, Kamu dapat menggunakan:
/addblacklist "bit.ly/???" Kita tidak menyukai layanan pemendek!
Ini akan cocok bit.ly/abc, tetapi tidak bit.ly/abcd.""",
"Purges_help": """perlu menghapus banyak pesan? Itulah mengapa purge ada!
Perintah yang tersedia adalah:
- /purge: menghapus semua pesan dari pesan Kamu menjawab, untuk pesan yang sekarang.
- /purge X: menghapus X pesan setelah pesan Kamu membalas (termasuk menjawab pesan)
-/del: menghapus pesan yang Kamu jawab.
""",
"Federations_help": """Ah, manajemen grup. Hal ini semua menyenangkan dan permainan, sampai Kamu mulai mendapatkan spammer di, dan Kamu perlu untuk melarang mereka. Maka Kamu perlu untuk mulai melarang lebih dan lebih, dan itu akan menyakitkan.
Tapi kemudian Kamu memiliki beberapa kelompok, dan Kamu tidak ingin spammer ini dalam salah satu grup Kamu - bagaimana Kamu dapat menangani? Apakah Kamu memiliki melarang mereka secara manual, di semua kelompok Kamu?
Terinspirasi oleh [Rose bot](t.me/MissRose_bot)
Jangan lagi! Dengan Federasi, Kamu dapat membuat ban di tumpang-tindih satu obrolan untuk semua obrolan lain.
Kamu bahkan dapat menunjuk admin federasi, sehingga admin tepercaya Kamu dapat melarang semua obrolan yang ingin Kamu lindungi.
Commands:
- /newfed <fedname>: membuat federasi baru dengan nama yang diberikan. Pengguna hanya diperbolehkan memiliki satu federasi. Metode ini juga dapat digunakan untuk mengubah nama federasi. (max 64 characters)
- /delfed: menghapus federasi Kamu, dan informasi apa pun yang berkaitan dengannya. Tidak akan membatalkan pencekalan pengguna yang diblokir.
- /fedinfo <FedID>: informasi tentang federasi yang ditentukan.
- /joinfed <FedID>: bergabung dengan obrolan saat ini ke federasi. Hanya pemilik obrolan yang dapat melakukan ini. Setiap obrolan hanya bisa dalam satu federasi.
- /leavefed <FedID>: meninggalkan federasi yang diberikan. Hanya pemilik obrolan yang dapat melakukan ini.
- /fpromote <user>: mempromosikan pengguna untuk memberi makan admin. Pemilik Fed saja.
- /fdemote <user>: menurunkan pengguna dari admin yang diumpankan ke pengguna normal. Hanya pemilik Fed.
- /fban <user>: melarang pengguna dari semua federasi tempat obrolan ini berlangsung, dan eksekutor memiliki kendali atas.
- /unfban <user>: batalkan pengguna dari semua federasi tempat obrolan ini berlangsung, dan bahwa pelaksana memiliki kendali atas.
- /setfrules: Tetapkan aturan federasi
- /frules: Tampilkan aturan federasi
- /chatfed: Tampilkan federasi tempat obrolan
- /fedadmins: Tampilkan admin federasi""",
#GDPR
"send-gdpr": """Data pribadi Kamu telah dihapus.\n\nPerhatikan bahwa ini tidak akan membatalkan blokir \
kamu dari obrolan apa pun, karena itu adalah data telegram, bukan data Bot.
Flooding, warns, dan gbans tetap tersimpan, seperti pada \
[this](https://ico.org.uk/for-organisations/guide-to-the-general-data-protection-regulation-gdpr/individual-rights/right-to-erasure/), "
yang dengan jelas menyatakan bahwa hak untuk dihapus tidak berlaku \
\"untuk pelaksanaan tugas yang dilakukan untuk kepentingan umum\", seperti halnya \
kasus untuk potongan data tersebut."""
}
| 57.25896
| 318
| 0.71657
|
a3ef930b8006160f9dfad2785ba4fce5e91a89c3
| 397
|
py
|
Python
|
superlists/superlists/wsgi.py
|
ology/TDD-with-Python
|
6aa89edbc5c69ee22d32f317f853aaa53888aa74
|
[
"Artistic-2.0"
] | null | null | null |
superlists/superlists/wsgi.py
|
ology/TDD-with-Python
|
6aa89edbc5c69ee22d32f317f853aaa53888aa74
|
[
"Artistic-2.0"
] | 9
|
2019-12-04T22:54:17.000Z
|
2022-02-10T08:45:16.000Z
|
superlists/superlists/wsgi.py
|
ology/TDD-with-Python
|
6aa89edbc5c69ee22d32f317f853aaa53888aa74
|
[
"Artistic-2.0"
] | 1
|
2020-01-20T12:44:56.000Z
|
2020-01-20T12:44:56.000Z
|
"""
WSGI config for superlists project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'superlists.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
cc583cbb377b85996bbde0b56f9eef1b4b8a02fb
| 3,933
|
py
|
Python
|
tests/test_runner/test_debug_sql.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_runner/test_debug_sql.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_runner/test_debug_sql.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import unittest
from django.db import connection
from django.test import TestCase
from django.test.runner import DiscoverRunner
from django.utils import six
from .models import Person
@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
class TestDebugSQL(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='pass').count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='fail').count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='error').count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
old_config = runner.setup_databases()
stream = six.StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
stream.seek(0)
return stream.read()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
if six.PY3:
expected_outputs = [
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'error');'''),
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'fail');'''),
]
else:
expected_outputs = [
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'error');'''),
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'fail');'''),
]
verbose_expected_outputs = [
# Output format changed in Python 3.5+
x.format('' if sys.version_info < (3, 5) else 'TestDebugSQL.') for x in [
'runTest (test_runner.test_debug_sql.{}FailingTest) ... FAIL',
'runTest (test_runner.test_debug_sql.{}ErrorTest) ... ERROR',
'runTest (test_runner.test_debug_sql.{}PassingTest) ... ok',
]
]
if six.PY3:
verbose_expected_outputs += [
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'pass');'''),
]
else:
verbose_expected_outputs += [
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'pass');'''),
]
| 36.757009
| 101
| 0.555301
|
b9e61fed79c2b4789dec613681d922f49fc7878a
| 559
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTwomorefreethoughtsCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTwomorefreethoughtsCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTwomorefreethoughtsCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractTwomorefreethoughtsCom(item):
'''
Parser for 'twomorefreethoughts.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 25.409091
| 104
| 0.640429
|
9b6811b3fa1c9eb467eba2c3dc1262f847dff4c0
| 4,469
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/notification/jabber.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/notification/jabber.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/notification/jabber.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: jabber
short_description: Send a message to jabber user or chat room
description:
- Send a message to jabber
options:
user:
type: str
description:
- User as which to connect
required: true
password:
type: str
description:
- password for user to connect
required: true
to:
type: str
description:
- user ID or name of the room, when using room use a slash to indicate your nick.
required: true
msg:
type: str
description:
- The message body.
required: true
host:
type: str
description:
- host to connect, overrides user info
port:
type: int
description:
- port to connect to, overrides default
default: 5222
encoding:
type: str
description:
- message encoding
# informational: requirements for nodes
requirements:
- python xmpp (xmpppy)
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
- name: Send a message to a user
community.general.jabber:
user: mybot@example.net
password: secret
to: friend@example.net
msg: Ansible task finished
- name: Send a message to a room
community.general.jabber:
user: mybot@example.net
password: secret
to: mychaps@conference.example.net/ansiblebot
msg: Ansible task finished
- name: Send a message, specifying the host and port
community.general.jabber:
user: mybot@example.net
host: talk.example.net
port: 5223
password: secret
to: mychaps@example.net
msg: Ansible task finished
'''
import time
import traceback
HAS_XMPP = True
XMPP_IMP_ERR = None
try:
import xmpp
except ImportError:
XMPP_IMP_ERR = traceback.format_exc()
HAS_XMPP = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
port=dict(required=False, default=5222, type='int'),
encoding=dict(required=False),
),
supports_check_mode=True
)
if not HAS_XMPP:
module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
jid = xmpp.JID(module.params['user'])
user = jid.getNode()
server = jid.getDomain()
port = module.params['port']
password = module.params['password']
try:
to, nick = module.params['to'].split('/', 1)
except ValueError:
to, nick = module.params['to'], None
if module.params['host']:
host = module.params['host']
else:
host = server
if module.params['encoding']:
xmpp.simplexml.ENCODING = module.params['encoding']
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
conn = xmpp.Client(server, debug=[])
if not conn.connect(server=(host, port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
if not conn.auth(user, password, 'Ansible'):
module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
# some old servers require this, also the sleep following send
conn.sendInitPresence(requestRoster=0)
if nick: # sending to room instead of user, need to join
msg.setType('groupchat')
msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
join = xmpp.Presence(to=module.params['to'])
join.setTag('x', namespace='http://jabber.org/protocol/muc')
conn.send(join)
time.sleep(1)
else:
msg.setType('chat')
msg.setTo(to)
if not module.check_mode:
conn.send(msg)
time.sleep(1)
conn.disconnect()
except Exception as e:
module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
if __name__ == '__main__':
main()
| 26.760479
| 103
| 0.640412
|
d51eea49ca0c35aa4c3047acc5c2fee3678ffd13
| 487
|
py
|
Python
|
bootstrap.py
|
kerr-huang/blade
|
a98f35a8bbed13a14213c7c696485b742a13e7aa
|
[
"BSD-3-Clause"
] | 2
|
2021-06-29T13:42:22.000Z
|
2021-09-06T10:57:34.000Z
|
bootstrap.py
|
kerr-huang/blade
|
a98f35a8bbed13a14213c7c696485b742a13e7aa
|
[
"BSD-3-Clause"
] | null | null | null |
bootstrap.py
|
kerr-huang/blade
|
a98f35a8bbed13a14213c7c696485b742a13e7aa
|
[
"BSD-3-Clause"
] | 5
|
2021-06-29T13:42:26.000Z
|
2022-02-08T02:41:34.000Z
|
# Copyright (c) 2013 Tencent Inc.
# All rights reserved.
# Author: Feng Chen <phongchen@tencent.com>
"""This is the entry point to load and run blade package.
"""
import sys
import os.path
# Load package from blade.zip or source dir?
# blade_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'blade.zip'))
blade_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src/blade'))
sys.path.insert(0, blade_path)
import blade_main
blade_main.main(blade_path)
| 22.136364
| 84
| 0.73922
|
b87900311a39d33afcad5ce2a4cdc31e7dd31de7
| 507
|
py
|
Python
|
tests/igniter/test_tools.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
tests/igniter/test_tools.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
tests/igniter/test_tools.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from uuid import uuid4
from igniter.tools import validate_path_string
def test_validate_path_string(tmp_path):
# test path
status1, _ = validate_path_string(tmp_path.as_posix())
assert status1 is True
status2, _ = validate_path_string("booo" + str(uuid4()))
assert status2 is False
# todo: change when Pype token is implemented
status3, reason = validate_path_string(str(uuid4()))
assert status3 is False
assert reason == "Not implemented yet"
| 29.823529
| 60
| 0.717949
|
81b787864490af536ec740d3132cbe24b8e17cae
| 7,528
|
py
|
Python
|
main.py
|
arodriguez23434/arodriguez_2016_matcutter
|
06226b0bc15260a3e7aebdb731cd1b70e25e0261
|
[
"Unlicense"
] | null | null | null |
main.py
|
arodriguez23434/arodriguez_2016_matcutter
|
06226b0bc15260a3e7aebdb731cd1b70e25e0261
|
[
"Unlicense"
] | null | null | null |
main.py
|
arodriguez23434/arodriguez_2016_matcutter
|
06226b0bc15260a3e7aebdb731cd1b70e25e0261
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import math
#Initialize list of occupied slices and recieve user input for material specifications
ocp_list=list()
print("---Sheet Material Slicer and Cost Calculator---")
ocp_xmat=float(input("What is the length of the material sheet? "));
ocp_ymat=float(input("What is the width of the material sheet? "));
ocp_basecost=float(input("What is the cost of one of these sheets? "));
ocp_xslice=float(input("What is the length of the slices you want to make? "));
ocp_yslice=float(input("What is the width of the slices you want to make? "));
ocp_quantdesired=int(input("How many sliced units do you want? "));
ocp_wastearea=float(input("What is the cost of disposal per unit area? "));
ocp_wastecost=float(input("What is the unit area for waste disposal? "));
print("\nProcessing...")
#Find out the largest x or y coordinate in the list of occupied slices.
#Useful for skipping already occupied slices for performing a second, modified scan.
def get_max_coord(xory):
if (xory==0 or xory=='x'): temp_int=0
elif (xory==1 or xory=='y'): temp_int=1
else: print("\nERROR: get_max_coord called with invalid input, defaulting to x");temp_int=0
temp_max=0
while (temp_int<len(ocp_list)):
if (temp_max<ocp_list[temp_int]): temp_max=ocp_list[temp_int]
temp_int+=2
return temp_max
#Check to see if a pair of coordinates are in any generic rectangle
#Used in the main scan function to check for occupied slices.
def scan_slice(xcheck,ycheck,xint,yint,xfin,yfin,tick):
slice_present=0
temp_iterate=xint
while (temp_iterate<=xfin):
#Is the x coordinate within the slice?
if (temp_iterate==xcheck): slice_present+=1;
temp_iterate+=tick
temp_iterate=yint
while (temp_iterate<=yfin):
#Is the y coordinate within the slice?
if (temp_iterate==ycheck): slice_present+=1;
temp_iterate+=tick
if (slice_present<2): return 0 #If not, the slice is free
else: return 1 #If both coordinates are within the slice, the slice is occupied
return
#The main scanning function
#Here, we check the x and y coordinates of our list of occupied slices and compare them
#to an iterating x and y coordinate pair that iterates based on the size of our slices.
#If the coordinates are not the list, then the slice is free and is added to the list.
def scan_material(xbase,ybase,xslice,yslice,ocp_list):
xscan=0;yscan=get_max_coord('y');
#Look in the table of recorded coordinates, then start at the bottom-most slice
while (yscan+yslice<=ybase): #Line-by-line scan of y-axis
xscan=0 #We will be iterating the entire x-axis as we go down the y-axis
while (xscan+xslice<=xbase): #Line-by-line scan of x in every y-axis scan
is_occupied=0;temp_interval=0
while temp_interval < len(ocp_list): #Check all existing points for conflicts
if (xslice<yslice): temp_tick=yslice/xslice
else: temp_tick=xslice/yslice
#We want to support decimals while hitting the coordinates we want.
#So, we iterate each coordinate scan based on larger slice/smaller slice ratio.
is_occupied+=scan_slice(xscan+xslice,yscan+yslice,ocp_list[temp_interval],ocp_list[temp_interval+1],ocp_list[temp_interval+2],ocp_list[temp_interval+3],temp_tick)
temp_interval+=4
if (is_occupied<1): #Does a conflict exist? If not, add coordinates to list
ocp_list+=[xscan,yscan,xscan+xslice,yscan+yslice]
xscan+=xslice
yscan+=yslice
yscan=0;
while (yscan+yslice<=ybase):
xscan=get_max_coord('x')
#Now, we will repeat the process from the right-most slice from the top of the y-axis
while (xscan+xslice<=xbase):
is_occupied=0;temp_interval=0
while temp_interval < len(ocp_list):
if (xslice<yslice): temp_tick=yslice/xslice
else: temp_tick=xslice/yslice
is_occupied+=scan_slice(xscan+xslice,yscan+yslice,ocp_list[temp_interval],ocp_list[temp_interval+1],ocp_list[temp_interval+2],ocp_list[temp_interval+3],temp_tick)
temp_interval+=4
if (is_occupied<1):
ocp_list+=[xscan,yscan,xscan+xslice,yscan+yslice]
xscan+=xslice
yscan+=yslice
return
#Print out the list of occupied slices in a user-readable fashion.
def print_coords():
temp_i=0
print("\nThe sheet can be sliced up into rectangular units with these coordinates:")
while (temp_i<len(ocp_list)-3):
print("({0:.5f},{1:.5f}) to ({2:.5f},{3:.5f})".format(ocp_list[temp_i],ocp_list[temp_i+1],ocp_list[temp_i+2],ocp_list[temp_i+3]))
temp_i+=4
return
#Calculate the total area, the sum of all the areas of the slice, and then the cost
#The cost will be based on material + wasted amount
def calculate_cost(xbase,ybase,temp_basecost,temp_wastecost,temp_wastearea,temp_quantdesired):
area_base=xbase*ybase
area_slices=0
slice_quantity=0
temp_i=0
while (temp_i<len(ocp_list)):
#Calculate the total area occupied
area_slices+=(ocp_list[temp_i+2]-ocp_list[temp_i])*(ocp_list[temp_i+3]-ocp_list[temp_i+1])
slice_quantity+=1 #Calculate how many sliced units can be made from a single material
temp_i+=4
area_waste=area_base-area_slices #Calculate the total area wasted
#Calculate how many material units we will need; always round up
temp_quanthave=math.ceil(temp_quantdesired/slice_quantity)
#Calculate the cost of waste disposal per slice
temp_costperwastedslice=(temp_wastecost*(area_waste/temp_wastearea))
#Calculate the number of slices leftover from final material
temp_leftoverwaste=round((temp_quanthave%(temp_quantdesired/slice_quantity))*slice_quantity)
#Calculate total waste cost; based on slices used times cost per slice + slices leftover times cost per slice
temp_wastecost=(((temp_quanthave-1)*temp_costperwastedslice)+(temp_leftoverwaste*temp_costperwastedslice))
print("\nArea of material sheet: {0:.5f}".format(area_base))
print("Area of used material: {0:.5f} vs. waste: {1:.5f}".format(area_slices,area_waste))
print("Sliced units desired: {0}".format(temp_quantdesired))
print("Slices from sheet: {0}".format(slice_quantity))
print("Material sheets required: {0}".format(temp_quanthave))
print("Cost of sheets: ${0:.2f} per sheet totaling to ${1:.2f}".format(temp_basecost,(temp_basecost*temp_quanthave)))
print("Cost of waste disposal: ${0:.2f} per sheet totaling to ${1:.2f}".format(temp_costperwastedslice,((temp_quanthave-1)*temp_costperwastedslice)))
print("Cost of waste disposal from leftovers in last sheet: ${0:.2f}".format((temp_leftoverwaste*temp_costperwastedslice)))
print("Aggregate waste cost: ${0:.2f}".format(temp_wastecost))
print("---\nTotal cost: ${0:.2f}".format((temp_basecost*temp_quanthave)+temp_wastecost))
return
#Find where slices can be made in the material sheet
scan_material(ocp_xmat,ocp_ymat,ocp_xslice,ocp_yslice,ocp_list)
#Find where slices rotated 90 degrees can be made in the same material sheet
scan_material(ocp_xmat,ocp_ymat,ocp_yslice,ocp_xslice,ocp_list)
#Print out the coordinates of these slices
print_coords()
#Calculate and print the total cost of the whole process - including waste disposal
calculate_cost(ocp_xmat,ocp_ymat,ocp_basecost,ocp_wastecost,ocp_wastearea,ocp_quantdesired)
| 54.158273
| 178
| 0.713204
|
29c2f172eefd6f4563398207d79b147c0f3f4181
| 18,261
|
py
|
Python
|
deepdoctection/dataflow/custom_serialize.py
|
deepdoctection/deepdoctection
|
7e0d7396e5ef8bf8109904e09c5d4ee56cb5a036
|
[
"Apache-2.0"
] | 39
|
2021-12-14T11:05:25.000Z
|
2022-03-31T18:50:58.000Z
|
deepdoctection/dataflow/custom_serialize.py
|
deepdoctection/deepdoctection
|
7e0d7396e5ef8bf8109904e09c5d4ee56cb5a036
|
[
"Apache-2.0"
] | 17
|
2022-01-04T14:32:26.000Z
|
2022-03-29T14:01:36.000Z
|
deepdoctection/dataflow/custom_serialize.py
|
deepdoctection/deepdoctection
|
7e0d7396e5ef8bf8109904e09c5d4ee56cb5a036
|
[
"Apache-2.0"
] | 4
|
2022-01-11T16:40:17.000Z
|
2022-03-30T02:09:55.000Z
|
# -*- coding: utf-8 -*-
# File: custom_serialize.py
# Copyright 2021 Dr. Janis Meyer. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adding some methods that convert incoming data to dataflows.
"""
import itertools
import json
import os
from collections import defaultdict
from typing import DefaultDict, Dict, List, Optional, Sequence, Union
from dataflow.dataflow import DataFlow, JoinData, MapData
from jsonlines import Reader, Writer
from ..utils.context import timed_operation
from ..utils.detection_types import JsonDict, Pathlike
from ..utils.fs import is_file_extension
from ..utils.logger import logger
from ..utils.pdf_utils import PDFStreamer
from ..utils.tqdm import get_tqdm
from .common import FlattenData
from .custom import CacheData, CustomDataFromIterable, CustomDataFromList
__all__ = ["SerializerJsonlines", "SerializerFiles", "SerializerCoco", "SerializerPdfDoc"]
def _reset_df_and_get_length(df: DataFlow) -> int:
df.reset_state()
try:
length = len(df)
except NotImplementedError:
length = 0
return length
class SerializerJsonlines:
"""
Serialize a dataflow from a jsonlines file. Alternatively, save a dataflow of JSON objects to a .jsonl file.
**Example:**
.. code-block:: python
df = SerializerJsonlines.load("path/to/file.jsonl")
will yield each json object of the file.
"""
@staticmethod
def load(path: Pathlike, max_datapoints: Optional[int] = None) -> CustomDataFromIterable:
"""
:param path: a path to a .jsonl file.
:param max_datapoints: Will stop the iteration once max_datapoints have been streamed
:return: dataflow to iterate from
"""
file = open(path, "r") # pylint: disable=W1514,R1732
iterator = Reader(file)
return CustomDataFromIterable(iterator, max_datapoints=max_datapoints)
@staticmethod
def save(df: DataFlow, path: Pathlike, file_name: str, max_datapoints: Optional[int] = None) -> None:
"""
Writes a dataflow iteratively to a .jsonl file. Every datapoint must be a dict where all items are serializable.
As the length of the dataflow cannot be determined in every case max_datapoint prevents generating an
unexpectedly large file
:param df: The dataflow to write from.
:param path: The path, the .jsonl file to write to.
:param file_name: name of the target file.
:param max_datapoints: maximum number of datapoint to consider writing to a file.
"""
assert os.path.isdir(path), f"not a dir {path}"
assert is_file_extension(file_name, ".jsonl")
with open(os.path.join(path, file_name), "w") as file: # pylint: disable=W1514
writer = Writer(file)
length = _reset_df_and_get_length(df)
if length == 0:
logger.info("cannot estimate length of dataflow")
if max_datapoints is not None:
if max_datapoints < length:
logger.info("dataflow larger than max_datapoints")
for k, dp in enumerate(df):
if max_datapoints is None:
writer.write(dp)
elif k < max_datapoints:
writer.write(dp)
else:
break
class SerializerFiles:
"""
Serialize files from a directory and all subdirectories. Only one file type can be serialized. Once specified, all
other types will be filtered out.
"""
@staticmethod
def load(
path: Pathlike,
file_type: Union[str, Sequence[str]],
max_datapoints: Optional[int] = None,
shuffle: Optional[bool] = False,
sort: Optional[bool] = True,
) -> DataFlow:
"""
Generates a dataflow where a datapoint consists of a string of names of files with respect to some file type.
If you want to load the files you need to do this in a following step by yourself.
:param path: A path to some base directory. Will inspect all subdirectories, as well
:param file_type: A file type (suffix) to look out for (single str or list of stings)
:param max_datapoints: Stop iteration after passing max_datapoints
:param shuffle: Shuffle the files, so that the order of appearance in dataflow is random.
:param sort: If set to "True" it will sort all selected files by its string
:return: dataflow to iterate from
"""
if shuffle:
sort = False
it1 = os.walk(path, topdown=False)
it2 = os.walk(path, topdown=False)
df1 = CustomDataFromIterable(it1)
df2 = CustomDataFromIterable(it2)
df1 = MapData(df1, lambda dp: None if len(dp[2]) == 0 else dp)
df2 = MapData(df2, lambda dp: None if len(dp[2]) == 0 else dp)
df1 = MapData(df1, lambda dp: [dp[0]] * len(dp[2]))
df2 = MapData(df2, lambda dp: dp[2])
df1 = FlattenData(df1)
df2 = FlattenData(df2)
df3 = JoinData(df_lists=[df1, df2])
df3 = MapData(df3, lambda dp: os.path.join(dp[0], dp[1]))
df = MapData(df3, lambda dp: dp if is_file_extension(dp, file_type) else None)
if max_datapoints is not None or sort:
df_list = CacheData(df).get_cache()
if sort:
df_list.sort()
df = CustomDataFromList(df_list, max_datapoints=max_datapoints, shuffle=False)
elif shuffle:
df_list = CacheData(df).get_cache()
df = CustomDataFromList(df_list, shuffle=shuffle)
return df
@staticmethod
def save() -> None:
"""
Not implemented
"""
raise NotImplementedError
class CocoParser:
"""
A simplified version of the Microsoft COCO helper class for reading annotations. It currently supports only
bounding box annotations
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
def __init__(self, annotation_file: Optional[Pathlike] = None) -> None:
self.dataset: JsonDict = {}
self.anns: Dict[int, JsonDict] = {}
self.cats: Dict[int, JsonDict] = {}
self.imgs: Dict[int, JsonDict] = {}
self.img_to_anns: DefaultDict[int, List[int]] = defaultdict(list)
self.cat_to_imgs: DefaultDict[int, List[int]] = defaultdict(list)
if annotation_file is not None:
with timed_operation(message="loading annotations to memory"):
with open(annotation_file, "r", encoding="UTF-8") as file:
dataset = json.load(file)
assert isinstance(dataset, dict), f"annotation file format {type(dataset)} not supported"
self.dataset = dataset
self._create_index()
def _create_index(self) -> None:
with timed_operation(message="creating index"):
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if "annotations" in self.dataset:
for ann in self.dataset["annotations"]:
img_to_anns[ann["image_id"]].append(ann)
anns[ann["id"]] = ann
if "images" in self.dataset:
for img in self.dataset["images"]:
imgs[img["id"]] = img
if "categories" in self.dataset:
for cat in self.dataset["categories"]:
cats[cat["id"]] = cat
if "annotations" in self.dataset and "categories" in self.dataset:
for ann in self.dataset["annotations"]:
cat_to_imgs[ann["category_id"]].append(ann["image_id"])
self.anns = anns
self.img_to_anns = img_to_anns
self.cat_to_imgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def info(self) -> None:
"""
Print information about the annotation file.
"""
for key, value in self.dataset["info"].items():
print(f"{key}: {value}")
def get_ann_ids(
self,
img_ids: Optional[Union[int, Sequence[int]]] = None,
cat_ids: Optional[Union[int, Sequence[int]]] = None,
area_range: Optional[Sequence[int]] = None,
is_crowd: Optional[bool] = None,
) -> Sequence[int]:
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param img_ids: get anns for given imgs
:param cat_ids: get anns for given cats
:param area_range: get anns for given area range (e.g. [0 inf])
:param is_crowd: get anns for given crowd label (False or True)
:return: ids: integer array of ann ids
"""
if img_ids is None:
img_ids = []
if cat_ids is None:
cat_ids = []
if area_range is None:
area_range = []
img_ids = [img_ids] if isinstance(img_ids, int) else img_ids
cat_ids = [cat_ids] if isinstance(cat_ids, int) else cat_ids
if len(img_ids) == len(cat_ids) == len(area_range) == 0:
anns = self.dataset["annotations"]
else:
if not len(img_ids) == 0:
lists = [self.img_to_anns[img_id] for img_id in img_ids if img_id in self.img_to_anns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset["annotations"]
anns = anns if len(cat_ids) == 0 else [ann for ann in anns if ann["category_id"] in cat_ids]
anns = (
anns if len(area_range) == 0 else [ann for ann in anns if area_range[0] < ann["area"] < area_range[1]]
)
if is_crowd is not None:
ids = [ann["id"] for ann in anns if ann["iscrowd"] == is_crowd]
else:
ids = [ann["id"] for ann in anns]
return ids
def get_cat_ids(
self,
category_names: Optional[Union[str, Sequence[str]]] = None,
supercategory_names: Optional[Union[str, Sequence[str]]] = None,
category_ids: Optional[Union[int, Sequence[int]]] = None,
) -> Sequence[int]:
"""
Filtering parameters. default skips that filter.
:param category_names: get cats for given cat names
:param supercategory_names: get cats for given supercategory names
:param category_ids: get cats for given cat ids
:return: ids: integer array of cat ids
"""
if category_names is None:
category_names = []
if supercategory_names is None:
supercategory_names = []
if category_ids is None:
category_ids = []
category_names = [category_names] if isinstance(category_names, str) else category_names
supercategory_names = [supercategory_names] if isinstance(supercategory_names, str) else supercategory_names
category_ids = [category_ids] if isinstance(category_ids, int) else category_ids
if len(category_names) == len(supercategory_names) == len(category_ids) == 0:
cats = self.dataset["categories"]
else:
cats = self.dataset["categories"]
cats = cats if len(category_names) == 0 else [cat for cat in cats if cat["name"] in category_names]
cats = (
cats
if len(supercategory_names) == 0
else [cat for cat in cats if cat["supercategory"] in supercategory_names]
)
cats = cats if len(category_ids) == 0 else [cat for cat in cats if cat["id"] in category_ids]
ids = [cat["id"] for cat in cats]
return ids
def get_image_ids(
self, img_ids: Optional[Union[int, Sequence[int]]] = None, cat_ids: Optional[Union[int, Sequence[int]]] = None
) -> Sequence[int]:
"""
Get img ids that satisfy given filter conditions.
:param img_ids: get imgs for given ids
:param cat_ids: get imgs with all given cats
:return: ids: integer array of img ids
"""
if img_ids is None:
img_ids = []
if cat_ids is None:
cat_ids = []
img_ids = [img_ids] if isinstance(img_ids, int) else img_ids
cat_ids = [cat_ids] if isinstance(cat_ids, int) else cat_ids
if len(img_ids) == len(cat_ids) == 0:
ids = set(self.imgs.keys())
else:
ids = set(img_ids)
for i, cat_id in enumerate(cat_ids):
if i == 0 and len(ids) == 0:
ids = set(self.cat_to_imgs[cat_id])
else:
ids &= set(self.cat_to_imgs[cat_id])
return list(ids)
def load_anns(self, ids: Optional[Union[int, Sequence[int]]] = None) -> List[JsonDict]:
"""
Load anns with the specified ids.
:param ids: integer ids specifying anns
:return: anns: loaded ann objects
"""
if ids is None:
ids = []
ids = [ids] if isinstance(ids, int) else ids
return [self.anns[id] for id in ids]
def load_cats(self, ids: Optional[Union[int, Sequence[int]]] = None) -> List[JsonDict]:
"""
Load cats with the specified ids.
:param ids: integer ids specifying cats
:return: cats: loaded cat objects
"""
if ids is None:
ids = []
ids = [ids] if isinstance(ids, int) else ids
return [self.cats[idx] for idx in ids]
def load_imgs(self, ids: Optional[Union[int, Sequence[int]]] = None) -> List[JsonDict]:
"""
Load anns with the specified ids.
:param ids: integer ids specifying img
:return: imgs: loaded img objects
"""
if ids is None:
ids = []
ids = [ids] if isinstance(ids, int) else ids
return [self.imgs[idx] for idx in ids]
class SerializerCoco:
"""
Class for serializing annotation files in Coco format. Coco comes in JSON format which is a priori not
serialized. This class implements only the very basic methods to generate a dataflow. It wraps the coco class
from pycocotools and assembles annotations that belong to the image. Note, that the conversion into the core
:class:`Image` has to be done by yourself.
"""
@staticmethod
def load(path: Pathlike, max_datapoints: Optional[int] = None) -> DataFlow:
"""
Loads a .json file and generates a dataflow.
**Example:**
.. code-block:: python
{'images':[img1,img2,...], 'annotations':[ann1,ann2,...],...}
it will generate a dataflow with datapoints
.. code-block:: python
{'image':{'id',...},'annotations':[{'id':…,'bbox':...}]}
for each single image id.
:param max_datapoints: Will stop the iteration once max_datapoints have been streamed.
:param path: a path to a .json file.
:return: dataflow to iterate from
"""
assert os.path.isfile(path), path
file = os.path.split(path)[1]
assert is_file_extension(file, ".json"), file
with timed_operation("Start loading .json file and serializing"):
coco = CocoParser(path)
img_ids = coco.get_image_ids()
imgs = coco.load_imgs(img_ids)
with get_tqdm(total=len(imgs)) as status_bar:
for img in imgs:
img["annotations"] = coco.img_to_anns[img["id"]]
status_bar.update()
df = CustomDataFromList(imgs, max_datapoints=max_datapoints)
return df
@staticmethod
def save() -> None:
"""
Not implemented
"""
raise NotImplementedError
class SerializerPdfDoc:
"""
Serialize a pdf document with an arbitrary number of pages.
**Example:**
.. code-block:: python
df = SerializerPdfDoc.load("path/to/document.pdf")
will yield datapoints:
.. code-block:: python
{"path": "path/to/document.pdf", "file_name" document_page_1.pdf, "pdf_bytes": b"some-bytes"}
"""
@staticmethod
def load(path: Pathlike, max_datapoints: Optional[int] = None) -> DataFlow:
"""
Loads the document page wise and returns a dataflow accordingly.
:param path: Path to the pdf document.
:param max_datapoints: The maximum number of pages to stream.
:return: A dict with structure {"path":... ,"file_name": ..., "pdf_bytes": ...}. The file name is a
concatenation of the physical file name and the current page number.
"""
file_name = os.path.split(path)[1]
prefix, suffix = os.path.splitext(file_name)
df = CustomDataFromIterable(PDFStreamer(path=path), max_datapoints=max_datapoints)
df = MapData(df, lambda dp: {"path": path, "file_name": prefix + f"_{dp[1]}" + suffix, "pdf_bytes": dp[0]})
return df
@staticmethod
def save(path: Pathlike) -> None:
"""
Not implemented
"""
raise NotImplementedError
@staticmethod
def split(path: Pathlike, path_target: Optional[Pathlike] = None, max_datapoint: Optional[int] = None) -> None:
"""
Split a Document into single pages.
"""
if path_target is None:
path_target, _ = os.path.split(path)
assert os.path.isdir(path_target), f"not a dir {path_target}"
df = SerializerPdfDoc.load(path, max_datapoint)
for dp in df:
with open(os.path.join(path_target, dp["file_name"]), "wb") as page:
page.write(dp["pdf_bytes"])
| 36.376494
| 120
| 0.606758
|
e4bd45795ace0530d55af6d5aa40c79351a51967
| 9,872
|
py
|
Python
|
train_naive.py
|
simonwey/DecoupleNet
|
3e9e09d512230cb0d95e9db98c5838ca9ff799da
|
[
"Apache-2.0"
] | null | null | null |
train_naive.py
|
simonwey/DecoupleNet
|
3e9e09d512230cb0d95e9db98c5838ca9ff799da
|
[
"Apache-2.0"
] | null | null | null |
train_naive.py
|
simonwey/DecoupleNet
|
3e9e09d512230cb0d95e9db98c5838ca9ff799da
|
[
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys, os
DIR = os.path.abspath(__file__) # 当前脚本
n = 3 # 与当前脚本的相对位置
for i in range(n): # 第1次循环,得到父目录;的二次循环得到,父目录 的 父目录, 第3次得到 父目录 的 父目录 的父目录
DIR = os.path.dirname(DIR)
sys.path.append(DIR)
import argparse
import os
import pprint
import shutil
import time
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
from lib.config import cfg
from lib.config import update_config
from lib.core.loss import JointsMSELoss
from lib.core.train import train_dcp_naive
from lib.core.validate import validate_dcp_naive
import lib.dataset
import lib.models
from lib.utils.utils import get_optimizer
from lib.utils.utils import save_checkpoint
from lib.utils.utils import create_logger
from lib.utils.utils import get_dcp_cnn_model_summary
from lib.utils.utils import set_seed
# --------------------------------------------------------------------------------
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
default='experiments/crowdpose/hrnet/w32_256x192-decouple-naive.yaml',
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
# philly
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--prevModelDir',
help='prev Model directory',
type=str,
default='')
parser.add_argument('--local_rank',
type=int,
default=0)
parser.add_argument('--exp_id',
type=str,
default='Train_Dcp_Naive-2')
args = parser.parse_args()
return args
# --------------------------------------------------------------------------------
def main():
args = parse_args()
update_config(cfg, args)
# close debug
cfg.defrost()
cfg.DEBUG.DEBUG = False
cfg.freeze()
logger, final_output_dir, tb_log_dir = create_logger(
cfg, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(cfg)
dist = False
# if len(cfg.GPUS) > 1:
# dist = True
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# 分布式训练 1)
if dist:
torch.distributed.init_process_group('nccl', init_method='env://')
set_seed(seed_id=0)
# GPU list: 按卡数分配 | 否则按指定的来
if cfg.ENV == 2:
cfg.defrost()
cfg.GPUS = list(range(torch.cuda.device_count()))
cfg.freeze()
device = cfg.GPUS[args.local_rank]
torch.cuda.set_device(device)
print("=> on GPU{}".format(device))
model = eval('lib.models.'+cfg.MODEL.NAME+'.get_pose_net')(cfg, is_train=True)
writer_dict = None
if cfg.LOG:
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
dump_input = torch.rand((1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
### this is used to visualize the network
### throws an assertion error on cube3, works well on bheem
# writer_dict['writer'].add_graph(model, (dump_input, ))
logger.info(get_dcp_cnn_model_summary(model, dump_input))
model = model.cuda()
if dist:
model = torch.nn.parallel.DistributedDataParallel(model)
else:
model = torch.nn.DataParallel(model, device_ids=cfg.GPUS)
# ------------------------------------------
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = eval('lib.dataset.'+cfg.DATASET.TRAIN_DATASET)(
cfg=cfg, image_dir=cfg.DATASET.TRAIN_IMAGE_DIR, annotation_file=cfg.DATASET.TRAIN_ANNOTATION_FILE,
dataset_type=cfg.DATASET.TRAIN_DATASET_TYPE,
image_set=cfg.DATASET.TRAIN_SET, is_train=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
valid_dataset = eval('lib.dataset.'+cfg.DATASET.TEST_DATASET)(
cfg=cfg, image_dir=cfg.DATASET.TEST_IMAGE_DIR, annotation_file=cfg.DATASET.TEST_ANNOTATION_FILE,
dataset_type=cfg.DATASET.TEST_DATASET_TYPE,
image_set=cfg.DATASET.TEST_SET, is_train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
train_sampler = None
if dist:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
shuffle=cfg.TRAIN.SHUFFLE and train_sampler is None,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY,
sampler=train_sampler
)
val_sampler = None
if dist:
val_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
shuffle=False,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY,
sampler=val_sampler
)
# # # # # ---------------------------------------------
best_perf = 0.0
last_epoch = -1
perf_indicator = 0.0
save_freq = cfg.EPOCH_EVAL_FREQ // 5 + 1
is_best = True
optimizer = get_optimizer(cfg, model)
begin_epoch = cfg.TRAIN.BEGIN_EPOCH
checkpoint_file = os.path.join(
final_output_dir, 'checkpoint_resume.pth'
)
# # # # ------------------------------------------------
logger.info('=> updated lr schedule is {}'.format(cfg.TRAIN.LR_STEP))
# 断点自动恢复训练
if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
logger.info("[AUTO_RESUME] ======> loading checkpoint '{}'".format(checkpoint_file))
checkpoint = torch.load(checkpoint_file, map_location='cpu')
begin_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
last_epoch = begin_epoch - 1
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint '{}' (epoch {})".format(
checkpoint_file, checkpoint['epoch']))
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch
)
# ----------------------------------------------------
criterion = criterion.cuda()
for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
# # # train for one epoch
if cfg.LOG:
logger.info('====== training: lr={}, {} th epoch ======'
.format(optimizer.state_dict()['param_groups'][0]['lr'], epoch))
train_dcp_naive(cfg, train_loader, model, criterion, optimizer, writer_dict)
lr_scheduler.step()
if epoch % cfg.EPOCH_EVAL_FREQ == 0 or epoch > (cfg.TRAIN.END_EPOCH - 15):
perf_indicator = validate_dcp_naive(cfg, valid_loader, valid_dataset, model,
final_output_dir, writer_dict, epoch=epoch, lambda_vals=[0, 1], log=logger)
if perf_indicator >= best_perf:
best_perf = perf_indicator
is_best = True
else:
is_best = False
if cfg.LOG and (epoch % save_freq == 0 or epoch > (cfg.TRAIN.END_EPOCH - 15)):
logger.info('=> model AP: {} | saving checkpoint to {}'.format(perf_indicator, final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': cfg.MODEL.NAME,
'state_dict': model.module.state_dict(),
'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, is_best, final_output_dir, filename='checkpoint_{}.pth'.format(epoch + 1))
# # ----------------------------------------------
if cfg.LOG:
final_model_state_file = os.path.join(
final_output_dir, 'final_state.pth'
)
logger.info('=> saving final model state to {}'.format(
final_model_state_file)
)
torch.save(model.module.state_dict(), final_model_state_file) # .module
writer_dict['writer'].close()
# --------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| 35.007092
| 109
| 0.5859
|
354651db8dad6e93e0eea22256455bb4105e04fa
| 1,321
|
py
|
Python
|
sunbear/version.py
|
OxfordHED/sunbear
|
9c7f368c4086f69868e7e5d87ea0b40700610e19
|
[
"BSD-3-Clause"
] | null | null | null |
sunbear/version.py
|
OxfordHED/sunbear
|
9c7f368c4086f69868e7e5d87ea0b40700610e19
|
[
"BSD-3-Clause"
] | null | null | null |
sunbear/version.py
|
OxfordHED/sunbear
|
9c7f368c4086f69868e7e5d87ea0b40700610e19
|
[
"BSD-3-Clause"
] | 1
|
2020-08-11T12:40:57.000Z
|
2020-08-11T12:40:57.000Z
|
import os
import subprocess as sp
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
# taken from numpy/numpy
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = sp.Popen(cmd, stdout=sp.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def _get_git_version():
cwd = os.getcwd()
# go to the main directory
fdir = os.path.dirname(os.path.abspath(__file__))
maindir = os.path.join(fdir, "..")
os.chdir(maindir)
# get git version
res = git_version()
# restore the cwd
os.chdir(cwd)
return res
def get_version():
if ISRELEASED:
return VERSION
# unreleased version
GIT_REVISION = _get_git_version()
return VERSION + ".dev0+" + GIT_REVISION[:7]
| 22.775862
| 69
| 0.583649
|
2dce69d7c75b6029b5863c97e83f1509d2ca1da8
| 2,815
|
py
|
Python
|
pyccolo/emit_event.py
|
smacke/pyccolo
|
58a6ea2b57143fcba37d1f52f7fa4d29c712f336
|
[
"BSD-3-Clause"
] | 65
|
2022-01-20T09:37:06.000Z
|
2022-03-08T23:56:36.000Z
|
pyccolo/emit_event.py
|
smacke/pyccolo
|
58a6ea2b57143fcba37d1f52f7fa4d29c712f336
|
[
"BSD-3-Clause"
] | null | null | null |
pyccolo/emit_event.py
|
smacke/pyccolo
|
58a6ea2b57143fcba37d1f52f7fa4d29c712f336
|
[
"BSD-3-Clause"
] | 3
|
2022-01-21T18:16:31.000Z
|
2022-01-27T09:19:18.000Z
|
# -*- coding: utf-8 -*-
import logging
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING, List
from pyccolo.trace_events import BEFORE_EXPR_EVENTS
if TYPE_CHECKING:
from pyccolo.tracer import BaseTracer
logger = logging.getLogger(__name__)
_BEFORE_EXPR_EVENT_NAMES = {evt.value for evt in BEFORE_EXPR_EVENTS}
_TRACER_STACK: "List[BaseTracer]" = []
_allow_event_handling = True
_allow_reentrant_event_handling = False
@contextmanager
def allow_reentrant_event_handling():
global _allow_reentrant_event_handling
orig_allow_reentrant_handling = _allow_reentrant_event_handling
_allow_reentrant_event_handling = True
try:
yield
finally:
_allow_reentrant_event_handling = orig_allow_reentrant_handling
def _make_ret(event, ret):
if event in _BEFORE_EXPR_EVENT_NAMES and not callable(ret):
return lambda *_: ret
else:
return ret
SkipAll = object()
def _emit_tracer_loop(
event,
node_id,
frame,
kwargs,
):
global _allow_reentrant_event_handling
global _allow_event_handling
orig_allow_reentrant_event_handling = _allow_reentrant_event_handling
is_reentrant = not _allow_event_handling
reentrant_handlers_only = is_reentrant and not _allow_reentrant_event_handling
_allow_event_handling = False
for tracer in _TRACER_STACK:
_allow_reentrant_event_handling = False
if not tracer._file_passes_filter_impl(
event, frame.f_code.co_filename, is_reentrant=is_reentrant
):
continue
_allow_reentrant_event_handling = orig_allow_reentrant_event_handling
new_ret = tracer._emit_event(
event,
node_id,
frame,
reentrant_handlers_only=reentrant_handlers_only,
**kwargs,
)
if isinstance(new_ret, tuple) and len(new_ret) > 1 and new_ret[0] is SkipAll:
kwargs["ret"] = new_ret[1]
break
else:
kwargs["ret"] = new_ret
def _emit_event(event, node_id, **kwargs):
global _allow_event_handling
global _allow_reentrant_event_handling
frame = sys._getframe().f_back
if frame.f_code.co_filename == __file__:
# weird shit happens if we instrument this file, so exclude it.
return _make_ret(event, kwargs.get("ret", None))
orig_allow_event_handling = _allow_event_handling
orig_allow_reentrant_event_handling = _allow_reentrant_event_handling
try:
_emit_tracer_loop(
event,
node_id,
frame,
kwargs,
)
finally:
_allow_event_handling = orig_allow_event_handling
_allow_reentrant_event_handling = orig_allow_reentrant_event_handling
return _make_ret(event, kwargs.get("ret", None))
| 29.322917
| 85
| 0.71119
|
5aeafeb48fc5022b542b81873dc89f8e309bf017
| 1,850
|
py
|
Python
|
chiadoge/types/unfinished_block.py
|
Jsewill/chiadogecoin
|
55511228301a0b4d00c8f4da270be8b434777470
|
[
"Apache-2.0"
] | 2
|
2021-07-05T14:34:35.000Z
|
2022-01-01T21:27:52.000Z
|
chiadoge/types/unfinished_block.py
|
Jsewill/chiadogecoin
|
55511228301a0b4d00c8f4da270be8b434777470
|
[
"Apache-2.0"
] | null | null | null |
chiadoge/types/unfinished_block.py
|
Jsewill/chiadogecoin
|
55511228301a0b4d00c8f4da270be8b434777470
|
[
"Apache-2.0"
] | 1
|
2021-07-07T11:08:36.000Z
|
2021-07-07T11:08:36.000Z
|
from dataclasses import dataclass
from typing import List, Optional
from chiadoge.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from chiadoge.types.blockchain_format.program import SerializedProgram
from chiadoge.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from chiadoge.types.blockchain_format.vdf import VDFProof
from chiadoge.types.end_of_slot_bundle import EndOfSubSlotBundle
from chiadoge.util.ints import uint32
from chiadoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class UnfinishedBlock(Streamable):
# Full block, without the final VDFs
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
transactions_generator: Optional[SerializedProgram] # Program that generates transactions
transactions_generator_ref_list: List[
uint32
] # List of block heights of previous generators referenced in this block
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def partial_hash(self):
return self.reward_chain_block.get_hash()
def is_transaction_block(self) -> bool:
return self.foliage.foliage_transaction_block_hash is not None
@property
def total_iters(self):
return self.reward_chain_block.total_iters
| 43.023256
| 104
| 0.794595
|
b559557340b9855ba7a637f101f096f514aa5d3a
| 6,811
|
py
|
Python
|
var/spack/repos/builtin/packages/geant4/package.py
|
wjge/spack
|
6d9f0262de6269b895673315a1c36c4f6888419e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/geant4/package.py
|
wjge/spack
|
6d9f0262de6269b895673315a1c36c4f6888419e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-19T02:33:37.000Z
|
2020-10-19T02:33:37.000Z
|
var/spack/repos/builtin/packages/geant4/package.py
|
wjge/spack
|
6d9f0262de6269b895673315a1c36c4f6888419e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Geant4(CMakePackage):
"""Geant4 is a toolkit for the simulation of the passage of particles
through matter. Its areas of application include high energy, nuclear
and accelerator physics, as well as studies in medical and space
science."""
homepage = "http://geant4.cern.ch/"
url = "https://gitlab.cern.ch/geant4/geant4/-/archive/v10.6.0/geant4-v10.6.0.tar.gz"
maintainers = ['drbenmorgan']
version('10.6.2', sha256='e381e04c02aeade1ed8cdd9fdbe7dcf5d6f0f9b3837a417976b839318a005dbd')
version('10.6.1', sha256='4fd64149ae26952672a81ce5579d3806fda4bd251d486897093ac57633a42b7e')
version('10.6.0', sha256='eebe6a170546064ff81ab3b00f513ccd1d4122a026514982368d503ac55a4ee4')
version('10.5.1', sha256='2397eb859dc4de095ff66059d8bda9f060fdc42e10469dd7890946293eeb0e39')
version('10.4.3', sha256='67f3bb6405a2c77e573936c2b933f5a4a33915aa379626a2eb3012009b91e1da')
version('10.4.0', sha256='e919b9b0a88476e00c0b18ab65d40e6a714b55ee4778f66bac32a5396c22aa74')
version('10.3.3', sha256='bcd36a453da44de9368d1d61b0144031a58e4b43a6d2d875e19085f2700a89d8')
_cxxstd_values = ('11', '14', '17')
variant('cxxstd',
default=_cxxstd_values[0],
values=_cxxstd_values,
multi=False,
description='Use the specified C++ standard when building.')
variant('threads', default=True, description='Build with multithreading')
variant('vecgeom', default=False, description='Enable vecgeom support')
variant('opengl', default=False, description='Optional OpenGL support')
variant('x11', default=False, description='Optional X11 support')
variant('motif', default=False, description='Optional motif support')
variant('qt', default=False, description='Enable Qt support')
variant('python', default=False, description='Enable Python bindings')
depends_on('cmake@3.5:', type='build')
depends_on('cmake@3.8:', type='build', when='@10.6.0:')
depends_on('geant4-data@10.6.2', when='@10.6.2')
depends_on('geant4-data@10.6.1', when='@10.6.1')
depends_on('geant4-data@10.6.0', when='@10.6.0')
depends_on('geant4-data@10.5.1', when='@10.5.1')
depends_on('geant4-data@10.4.3', when='@10.4.3')
depends_on('geant4-data@10.4.0', when='@10.4.0')
depends_on('geant4-data@10.3.3', when='@10.3.3')
depends_on("expat")
depends_on("zlib")
# Python, with boost requirement dealt with in cxxstd section
depends_on('python@3:', when='+python')
extends('python', when='+python')
conflicts('+python', when='@:10.6.1',
msg='Geant4 <= 10.6.1 cannont be built with Python bindings')
for std in _cxxstd_values:
# CLHEP version requirements to be reviewed
depends_on('clhep@2.3.3.0: cxxstd=' + std,
when='@10.3.3: cxxstd=' + std)
# Spack only supports Xerces-c 3 and above, so no version req
depends_on('xerces-c cxxstd=' + std, when='cxxstd=' + std)
# Vecgeom specific versions for each Geant4 version
depends_on('vecgeom@1.1.5 cxxstd=' + std,
when='@10.6.0:10.6.99 +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.0 cxxstd=' + std,
when='@10.5.0:10.5.99 +vecgeom cxxstd=' + std)
depends_on('vecgeom@0.5.2 cxxstd=' + std,
when='@10.4.0:10.4.99 +vecgeom cxxstd=' + std)
depends_on('vecgeom@0.3rc cxxstd=' + std,
when='@10.3.0:10.3.99 +vecgeom cxxstd=' + std)
# Boost.python, conflict handled earlier
depends_on('boost@1.70: +python cxxstd=' + std,
when='+python cxxstd=' + std)
# Visualization driver dependencies
depends_on("gl", when='+opengl')
depends_on("glx", when='+opengl+x11')
depends_on("libx11", when='+x11')
depends_on("libxmu", when='+x11')
depends_on("motif", when='+motif')
depends_on("qt@5:", when="+qt")
# As released, 10.03.03 has issues with respect to using external
# CLHEP.
patch('CLHEP-10.03.03.patch', level=1, when='@10.3.3')
# These patches can be applied independent of the cxxstd value?
patch('cxx17.patch', when='@:10.3.99 cxxstd=17')
patch('cxx17_geant4_10_0.patch', level=1, when='@10.4.0 cxxstd=17')
patch('geant4-10.4.3-cxx17-removed-features.patch',
level=1, when='@10.4.3 cxxstd=17')
def cmake_args(self):
spec = self.spec
# Core options
options = [
'-DGEANT4_BUILD_CXXSTD=c++{0}'.format(
self.spec.variants['cxxstd'].value),
'-DGEANT4_USE_SYSTEM_CLHEP=ON',
'-DGEANT4_USE_SYSTEM_EXPAT=ON',
'-DGEANT4_USE_SYSTEM_ZLIB=ON',
'-DGEANT4_USE_G3TOG4=ON',
'-DGEANT4_USE_GDML=ON',
'-DXERCESC_ROOT_DIR={0}'.format(spec['xerces-c'].prefix)
]
# Multithreading
options.append(self.define_from_variant('GEANT4_BUILD_MULTITHREADED',
'threads'))
if '+threads' in spec:
# Locked at global-dynamic to allow use cases that load the
# geant4 libs at application runtime
options.append('-DGEANT4_BUILD_TLS_MODEL=global-dynamic')
# install the data with geant4
datadir = spec['geant4-data'].prefix.share
dataver = '{0}-{1}'.format(spec['geant4-data'].name,
spec['geant4-data'].version.dotted)
datapath = join_path(datadir, dataver)
options.append('-DGEANT4_INSTALL_DATADIR={0}'.format(datapath))
# Vecgeom
if '+vecgeom' in spec:
options.append('-DGEANT4_USE_USOLIDS=ON')
options.append('-DUSolids_DIR=%s' % spec[
'vecgeom'].prefix.lib.CMake.USolids)
# Visualization options
if 'platform=darwin' not in spec:
if "+x11" in spec and "+opengl" in spec:
options.append('-DGEANT4_USE_OPENGL_X11=ON')
if "+motif" in spec and "+opengl" in spec:
options.append('-DGEANT4_USE_XM=ON')
if "+x11" in spec:
options.append('-DGEANT4_USE_RAYTRACER_X11=ON')
if '+qt' in spec:
options.append('-DGEANT4_USE_QT=ON')
options.append(
'-DQT_QMAKE_EXECUTABLE=%s' %
spec['qt'].prefix.bin.qmake)
# Python
if spec.version > Version('10.6.1'):
options.append(self.define_from_variant('GEANT4_USE_PYTHON',
'python'))
return options
| 42.836478
| 96
| 0.622669
|
7ae1a6b08699d0aa6216ad0a92817646a2528bca
| 21,112
|
py
|
Python
|
awkward/generate.py
|
nsmith-/awkward-array
|
5ee75c2b59049a714e8c7bf3ac2bc0414268abff
|
[
"BSD-3-Clause"
] | null | null | null |
awkward/generate.py
|
nsmith-/awkward-array
|
5ee75c2b59049a714e8c7bf3ac2bc0414268abff
|
[
"BSD-3-Clause"
] | null | null | null |
awkward/generate.py
|
nsmith-/awkward-array
|
5ee75c2b59049a714e8c7bf3ac2bc0414268abff
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import collections
import numbers
import awkward.array.base
import awkward.util
from awkward.array.chunked import ChunkedArray, AppendableArray
from awkward.array.jagged import JaggedArray
from awkward.array.masked import BitMaskedArray, IndexedMaskedArray
from awkward.array.objects import ObjectArray
from awkward.array.table import Table
from awkward.array.union import UnionArray
# FIXME: the following must be totally broken from upstream changes
def fromiter(iterable, chunksize=1024, maskmissing=True, references=False):
if references:
raise NotImplementedError # keep all ids in a hashtable to create pointers (IndexedArray)
tobytes = lambda x: x.tobytes()
tostring = lambda x: codecs.utf_8_decode(x.tobytes())[0]
def insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj):
if len(chunks) == 0 or offsets[-1] - offsets[-2] == len(chunks[-1]):
chunks.append(newchunk(obj))
offsets.append(offsets[-1])
if ismine(obj, chunks[-1]):
chunks[-1] = promote(obj, chunks[-1])
fillobj(obj, chunks[-1], offsets[-1] - offsets[-2])
offsets[-1] += 1
elif isinstance(chunks[-1], IndexedMaskedArray) and len(chunks[-1]._content) == 0:
chunks[-1]._content = newchunk(obj)
nextindex = chunks[-1]._nextindex
chunks[-1]._nextindex += 1
chunks[-1]._index[offsets[-1] - offsets[-2]] = nextindex
chunks[-1]._content = promote(obj, chunks[-1]._content)
fillobj(obj, chunks[-1]._content, nextindex)
offsets[-1] += 1
elif isinstance(chunks[-1], IndexedMaskedArray) and ismine(obj, chunks[-1]._content):
nextindex = chunks[-1]._nextindex
chunks[-1]._nextindex += 1
chunks[-1]._index[offsets[-1] - offsets[-2]] = nextindex
chunks[-1]._content = promote(obj, chunks[-1]._content)
fillobj(obj, chunks[-1]._content, nextindex)
offsets[-1] += 1
elif isinstance(chunks[-1], UnionArray) and any(isinstance(content, IndexedMaskedArray) and ismine(obj, content._content) for content in chunks[-1]._contents):
for tag in range(len(chunks[-1]._contents)):
if isinstance(chunks[-1]._contents[tag], IndexedMaskedArray) and ismine(obj, chunks[-1]._contents[tag]._content):
nextindex_union = chunks[-1]._nextindex[tag]
chunks[-1]._nextindex[tag] += 1
nextindex_mask = chunks[-1]._contents[tag]._nextindex
chunks[-1]._contents[tag]._nextindex += 1
chunks[-1]._contents[tag]._index[nextindex_union] = nextindex_mask
chunks[-1]._contents[tag]._content = promote(obj, chunks[-1]._contents[tag]._content)
fillobj(obj, chunks[-1]._contents[tag]._content, nextindex_mask)
chunks[-1]._tags[offsets[-1] - offsets[-2]] = tag
chunks[-1]._index[offsets[-1] - offsets[-2]] = nextindex_union
offsets[-1] += 1
break
else:
if not isinstance(chunks[-1], UnionArray):
chunks[-1] = UnionArray(numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.INDEXTYPE),
numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.INDEXTYPE),
[chunks[-1]])
chunks[-1]._nextindex = [offsets[-1] - offsets[-2]]
chunks[-1]._tags[: offsets[-1] - offsets[-2]] = 0
chunks[-1]._index[: offsets[-1] - offsets[-2]] = numpy.arange(offsets[-1] - offsets[-2], dtype=awkward.array.base.AwkwardArray.INDEXTYPE)
chunks[-1]._contents = list(chunks[-1]._contents)
if not any(ismine(obj, content) for content in chunks[-1]._contents):
chunks[-1]._nextindex.append(0)
chunks[-1]._contents.append(newchunk(obj))
for tag in range(len(chunks[-1]._contents)):
if ismine(obj, chunks[-1]._contents[tag]):
nextindex = chunks[-1]._nextindex[tag]
chunks[-1]._nextindex[tag] += 1
chunks[-1]._contents[tag] = promote(obj, chunks[-1]._contents[tag])
fillobj(obj, chunks[-1]._contents[tag], nextindex)
chunks[-1]._tags[offsets[-1] - offsets[-2]] = tag
chunks[-1]._index[offsets[-1] - offsets[-2]] = nextindex
offsets[-1] += 1
break
def fill(obj, chunks, offsets):
if obj is None:
# anything with None -> IndexedMaskedArray
if len(chunks) == 0 or offsets[-1] - offsets[-2] == len(chunks[-1]):
chunks.append(IndexedMaskedArray(numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.INDEXTYPE), []))
chunks[-1]._nextindex = 0
offsets.append(offsets[-1])
if isinstance(chunks[-1], UnionArray) and any(isinstance(content, IndexedMaskedArray) for content in chunks[-1]._contents):
for tag in range(len(chunks[-1]._contents)):
if isinstance(chunks[-1]._contents[tag], IndexedMaskedArray):
nextindex = chunks[-1]._nextindex[tag]
chunks[-1]._nextindex[tag] += 1
chunks[-1]._contents[tag]._index[nextindex] = chunks[-1]._contents[tag]._maskedwhen
chunks[-1]._tags[offsets[-1] - offsets[-2]] = tag
chunks[-1]._index[offsets[-1] - offsets[-2]] = nextindex
offsets[-1] += 1
break
else:
if not isinstance(chunks[-1], IndexedMaskedArray):
chunks[-1] = IndexedMaskedArray(numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.INDEXTYPE), chunks[-1])
chunks[-1]._index[: offsets[-1] - offsets[-2]] = numpy.arange(offsets[-1] - offsets[-2], dtype=awkward.array.base.AwkwardArray.INDEXTYPE)
chunks[-1]._nextindex = offsets[-1] - offsets[-2]
chunks[-1]._index[offsets[-1] - offsets[-2]] = chunks[-1]._maskedwhen
offsets[-1] += 1
elif isinstance(obj, (bool, numpy.bool, numpy.bool_)):
# bool -> Numpy bool_
def newchunk(obj):
return numpy.empty(chunksize, dtype=numpy.bool_)
def ismine(obj, x):
return isinstance(x, numpy.ndarray) and x.dtype == numpy.dtype(numpy.bool_)
def promote(obj, x):
return x
def fillobj(obj, array, where):
array[where] = obj
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, (numbers.Integral, numpy.integer)):
# int -> Numpy int64, float64, or complex128 (promotes to largest)
def newchunk(obj):
return numpy.empty(chunksize, dtype=numpy.int64)
def ismine(obj, x):
return isinstance(x, numpy.ndarray) and issubclass(x.dtype.type, numpy.number)
def promote(obj, x):
return x
def fillobj(obj, array, where):
array[where] = obj
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, (numbers.Real, numpy.floating)):
# float -> Numpy int64, float64, or complex128 (promotes to largest)
def newchunk(obj):
return numpy.empty(chunksize, dtype=numpy.int64)
def ismine(obj, x):
return isinstance(x, numpy.ndarray) and issubclass(x.dtype.type, numpy.number)
def promote(obj, x):
if issubclass(x.dtype.type, numpy.floating):
return x
else:
return x.astype(numpy.float64)
def fillobj(obj, array, where):
array[where] = obj
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, (numbers.Complex, numpy.complex, numpy.complexfloating)):
# complex -> Numpy int64, float64, or complex128 (promotes to largest)
def newchunk(obj):
return numpy.empty(chunksize, dtype=numpy.complex128)
def ismine(obj, x):
return isinstance(x, numpy.ndarray) and issubclass(x.dtype.type, numpy.number)
def promote(obj, x):
if issubclass(x.dtype.type, numpy.complexfloating):
return x
else:
return x.astype(numpy.complex128)
def fillobj(obj, array, where):
array[where] = obj
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, bytes):
# bytes -> ObjectArray of JaggedArray
def newchunk(obj):
out = ObjectArray(tobytes, JaggedArray.fromoffsets(
numpy.zeros(chunksize + 1, dtype=awkward.array.base.AwkwardArray.INDEXTYPE),
AppendableArray.empty(lambda: numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.CHARTYPE))))
out._content._starts[0] = 0
return out
def ismine(obj, x):
return isinstance(x, ObjectArray) and (x._generator is tobytes or x._generator is tostring)
def promote(obj, x):
return x
def fillobj(obj, array, where):
array._content._stops[where] = array._content._starts[where] + len(obj)
array._content._content.extend(numpy.fromstring(obj, dtype=awkward.array.base.AwkwardArray.CHARTYPE))
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, awkward.util.string):
# str -> ObjectArray of JaggedArray
def newchunk(obj):
out = ObjectArray(tostring, JaggedArray.fromoffsets(
numpy.zeros(chunksize + 1, dtype=awkward.array.base.AwkwardArray.INDEXTYPE),
AppendableArray.empty(lambda: numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.CHARTYPE))))
out._content._starts[0] = 0
return out
def ismine(obj, x):
return isinstance(x, ObjectArray) and (x._generator is tobytes or x._generator is tostring)
def promote(obj, x):
if x._generator is tostring:
return x
else:
return ObjectArray(tostring, x._content)
def fillobj(obj, array, where):
bytes = codecs.utf_8_encode(obj)[0]
array._content._stops[where] = array._content._starts[where] + len(bytes)
array._content._content.extend(numpy.fromstring(bytes, dtype=awkward.array.base.AwkwardArray.CHARTYPE))
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, dict):
# dict keys -> Table columns
def newchunk(obj):
return Table(chunksize, collections.OrderedDict((n, []) for n in obj))
if maskmissing:
def ismine(obj, x):
return isinstance(x, Table)
def promote(obj, x):
for n in obj:
if not n in x._content:
x._content[n] = IndexedMaskedArray(numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.INDEXTYPE), [])
x._content[n]._index[: offsets[-1] - offsets[-2]] = x._content[n]._maskedwhen
x._content[n]._nextindex = 0
return x
else:
def ismine(obj, x):
return isinstance(x, Table) and all(n in x._content for n in obj)
def promote(obj, x):
return x
def fillobj(obj, array, where):
for n in obj:
if len(array._content[n]) == 0:
subchunks = []
suboffsets = [offsets[-2]]
else:
subchunks = [array._content[n]]
suboffsets = [offsets[-2], offsets[-1]]
fill(obj[n], subchunks, suboffsets)
array._content[n] = subchunks[-1]
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
elif isinstance(obj, tuple):
# tuple items -> Table columns
def newchunk(obj):
return Table(chunksize, collections.OrderedDict(("_" + str(i), []) for i in range(len(obj))))
def ismine(obj, x):
return isinstance(x, Table) and list(x._content) == ["_" + str(i) for i in range(len(obj))]
def promote(obj, x):
return x
def fillobj(obj, array, where):
for i, x in enumerate(obj):
n = "_" + str(i)
if len(array._content[n]) == 0:
subchunks = []
suboffsets = [offsets[-2]]
else:
subchunks = [array._content[n]]
suboffsets = [offsets[-2], offsets[-1]]
fill(x, subchunks, suboffsets)
array._content[n] = subchunks[-1]
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
else:
try:
it = iter(obj)
except TypeError:
# object attributes -> Table columns
def newchunk(obj):
return NamedTable(chunksize, obj.__class__.__name__, collections.OrderedDict((n, []) for n in dir(obj) if not n.startswith("_")))
if maskmissing:
def ismine(obj, x):
return isinstance(x, NamedTable) and obj.__class__.__name__ == x._name
def promote(obj, x):
for n in dir(obj):
if not n.startswith("_") and not n in x._content:
x._content[n] = IndexedMaskedArray(numpy.empty(chunksize, dtype=awkward.array.base.AwkwardArray.INDEXTYPE), [])
x._content[n]._index[: offsets[-1] - offsets[-2]] = x._content[n]._maskedwhen
x._content[n]._nextindex = 0
return x
else:
def ismine(obj, x):
return isinstance(x, NamedTable) and obj.__class__.__name__ == x._name and all(n in x._content for n in dir(obj) if not n.startswith("_"))
def promote(obj, x):
return x
def fillobj(obj, array, where):
for n in dir(obj):
if not n.startswith("_"):
if len(array._content[n]) == 0:
subchunks = []
suboffsets = [offsets[-2]]
else:
subchunks = [array._content[n]]
suboffsets = [offsets[-2], offsets[-1]]
fill(getattr(obj, n), subchunks, suboffsets)
array._content[n] = subchunks[-1]
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
else:
# iterable -> JaggedArray (and recurse)
def newchunk(obj):
out = JaggedArray.fromoffsets(numpy.zeros(chunksize + 1, dtype=awkward.array.base.AwkwardArray.INDEXTYPE), PartitionedArray([0], []))
out._starts[0] = 0
out._content._offsets = [0] # as an appendable list, not a Numpy array
return out
def ismine(obj, x):
return isinstance(x, JaggedArray)
def promote(obj, x):
return x
def fillobj(obj, array, where):
array._stops[where] = array._starts[where]
for x in it:
fill(x, array._content._chunks, array._content._offsets)
array._stops[where] += 1
insert(obj, chunks, offsets, newchunk, ismine, promote, fillobj)
def trim(length, array):
if isinstance(array, numpy.ndarray):
if len(array) == length:
return array # the length is right: don't copy it
else:
return numpy.array(array[:length]) # copy so that the base can be deleted
elif isinstance(array, PartitionedArray):
for i in range(len(array._chunks)):
array._chunks[i] = trim(array._offsets[i + 1] - array._offsets[i], array._chunks[i])
return array
elif isinstance(array, IndexedMaskedArray):
index = trim(length, array._index)
selection = (index != array._maskedwhen)
content = trim(index[selection][-1] + 1, array._content)
if isinstance(content, numpy.ndarray):
# for simple types, IndexedMaskedArray wastes space; convert to an Arrow-like BitMaskedArray
mask = numpy.zeros(length, dtype=awkward.array.base.AwkwardArray.MASKTYPE)
mask[selection] = True
newcontent = numpy.empty(length, dtype=content.dtype)
newcontent[selection] = content
return BitMaskedArray.fromboolmask(mask, newcontent, maskedwhen=False, lsb=True)
else:
# for complex types, IndexedMaskedArray saves space; keep it
return IndexedMaskedArray(index, content)
elif isinstance(array, UnionArray):
tags = trim(length, array._tags)
index = trim(length, array._index)
contents = []
for tag, content in enumerate(array._contents):
length = index[tags == tag][-1] + 1
contents.append(trim(length, content))
return UnionArray(tags, index, contents)
elif isinstance(array, JaggedArray):
offsets = array.offsets # fill creates aliased starts/stops
if len(offsets) != length + 1:
offsets = numpy.array(offsets[: length + 1])
return JaggedArray.fromoffsets(offsets, trim(offsets[-1], array._content))
elif isinstance(array, NamedTable):
return NamedTable(length, array._name, collections.OrderedDict((n, trim(length, x)) for n, x in array._content.items()))
elif isinstance(array, Table):
return Table(length, collections.OrderedDict((n, trim(length, x)) for n, x in array._content.items()))
elif isinstance(array, ObjectArray):
return ObjectArray(array._generator, trim(length, array._content))
else:
raise AssertionError(array)
chunks = []
offsets = [0]
length = 0
for x in iterable:
fill(x, chunks, offsets)
length += 1
return trim(length, PartitionedArray(offsets, chunks))
| 43.440329
| 167
| 0.558924
|
3e90c107a4c854a890a3424a0a8e1507adfd48a0
| 12,375
|
py
|
Python
|
tests/selpolltest.py
|
karpierz/libpcap
|
6432ae6dd9cddcf0d160de927ca33231a1e2a3d0
|
[
"BSD-3-Clause"
] | 42
|
2018-01-28T03:43:02.000Z
|
2022-03-03T09:19:13.000Z
|
tests/selpolltest.py
|
karpierz/libpcap
|
6432ae6dd9cddcf0d160de927ca33231a1e2a3d0
|
[
"BSD-3-Clause"
] | 9
|
2019-05-01T09:55:04.000Z
|
2021-11-10T23:26:57.000Z
|
tests/selpolltest.py
|
karpierz/libpcap
|
6432ae6dd9cddcf0d160de927ca33231a1e2a3d0
|
[
"BSD-3-Clause"
] | 12
|
2019-07-31T18:55:03.000Z
|
2022-02-01T22:25:25.000Z
|
#!/usr/bin/env python
# Copyright (c) 2016-2021, Adam Karpierz
# Licensed under the BSD license
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 2000
# The Regents of the University of California. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that: (1) source code distributions
# retain the above copyright notice and this paragraph in its entirety, (2)
# distributions including binary code include the above copyright notice and
# this paragraph in its entirety in the documentation or other materials
# provided with the distribution, and (3) all advertising materials mentioning
# features or use of this software display the following acknowledgement:
# ``This product includes software developed by the University of California,
# Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
# the University nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import sys
import os
import getopt
import ctypes as ct
import libpcap as pcap
from pcaptestutils import * # noqa
#ifndef lint
copyright = "@(#) Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, "\
"1995, 1996, 1997, 2000\n"\
"The Regents of the University of California. "\
"All rights reserved.\n"
#endif
def select(rlist, wlist, xlist, timeout=None):
import select
if timeout is None:
return select.select(rlist, wlist, xlist)
else:
return select.select(rlist, wlist, xlist, timeout)
# Tests how select() and poll() behave on the selectable file descriptor
# for a pcap.pcap_t.
#
# This would be significantly different on Windows, as it'd test
# how WaitForMultipleObjects() would work on the event handle for a
# pcap.pcap_t.
def main(argv=sys.argv[1:]):
global program_name
program_name = os.path.basename(sys.argv[0])
try:
opts, args = getopt.getopt(argv, "i:sptnq")
except getopt.GetoptError:
usage()
device = None
doselect = False
dopoll = False
mechanism = None
dotimeout = False
dononblock = False
quiet = False
for opt, optarg in opts:
if opt == '-i':
device = optarg.encode("utf-8")
elif opt == '-s':
doselect = True
mechanism = "select() and pcap.dispatch()"
elif opt == '-p':
dopoll = True
mechanism = "poll() and pcap.dispatch()"
elif opt == '-t':
dotimeout = True
elif opt == '-n':
dononblock = True
elif opt == '-q':
quiet = True
else:
usage()
expression = args
if doselect and dopoll:
print("selpolltest: choose select (-s) or poll (-p), but not both",
file=sys.stderr)
return 1
if dotimeout and not doselect and not dopoll:
print("selpolltest: timeout (-t) requires select (-s) or poll (-p)",
file=sys.stderr)
return 1
ebuf = ct.create_string_buffer(pcap.PCAP_ERRBUF_SIZE)
if device is None:
devlist = ct.POINTER(pcap.pcap_if_t)()
if pcap.findalldevs(ct.byref(devlist), ebuf) == -1:
error("{}", ebuf2str(ebuf))
if not devlist:
error("no interfaces available for capture")
device = devlist[0].name
pcap.freealldevs(devlist)
ebuf[0] = b"\0"
pd = pcap.open_live(device, 65535, 0, 1000, ebuf)
if not pd:
error("{}", ebuf2str(ebuf))
elif ebuf.value:
warning("{}", ebuf2str(ebuf))
localnet = pcap.bpf_u_int32()
netmask = pcap.bpf_u_int32()
if pcap.lookupnet(device, ct.byref(localnet), ct.byref(netmask), ebuf) < 0:
localnet = pcap.bpf_u_int32(0)
netmask = pcap.bpf_u_int32(0)
warning("{}", ebuf2str(ebuf))
fcode = pcap.bpf_program()
cmdbuf = " ".join(expression).encode("utf-8")
if pcap.compile(pd, ct.byref(fcode), cmdbuf, 1, netmask) < 0:
error("{}", geterr2str(pd))
if pcap.setfilter(pd, ct.byref(fcode)) < 0:
error("{}", geterr2str(pd))
selectable_fd = -1
if doselect or dopoll:
# We need either an FD on which to do select()/poll()
# or, if there isn't one, a timeout to use in select()/
# poll().
try:
selectable_fd = pcap.get_selectable_fd(pd)
except AttributeError:
error("pcap.get_selectable_fd is not available on this platform")
if selectable_fd == -1:
print("Listening on {}, using {}, with a timeout".format(
device2str(device), mechanism))
try:
required_timeout = pcap.get_required_select_timeout(pd)
except AttributeError:
error("pcap.get_required_select_timeout is not available "
"on this platform")
if not required_timeout:
error("select()/poll() isn't supported on {}, "
"even with a timeout", device2str(device))
required_timeout = required_timeout[0]
# As we won't be notified by select() or poll()
# that a read can be done, we'll have to periodically
# try reading from the device every time the required
# timeout expires, and we don't want those attempts
# to block if nothing has arrived in that interval,
# so we want to force non-blocking mode.
dononblock = True
else:
print("Listening on {}, using {}".format(
device2str(device), mechanism))
required_timeout = None
else:
print("Listening on {}, using pcap.dispatch()".format(
device2str(device)))
if dononblock:
if pcap.setnonblock(pd, 1, ebuf) == -1:
error("pcap.setnonblock failed: {}", ebuf2str(ebuf))
status = 0
if doselect:
while True:
try:
required_timeout = pcap.get_required_select_timeout(pd)
if dotimeout:
seltimeout = (0 + (required_timeout.tv_usec
if required_timeout is not None and
required_timeout.tv_usec < 1000
else 1000) / 1000000.0)
elif required_timeout is not None:
seltimeout = (required_timeout.tv_sec +
required_timeout.tv_usec / 1000000.0)
else:
seltimeout = None
rfds, wfds, efds = select([selectable_fd], [],
[selectable_fd], seltimeout)
except select.error as exc:
print("Select returns error ({})".format(exc.strerror))
else:
if not quiet:
if not rfds and not wfds and not efds:
print("Select timed out: ", end="")
print() # <AK>: missing
else:
print("Select returned a descriptor: ", end="")
print("readable, "
if selectable_fd in rfds else
"not readable, ", end="")
print("exceptional condition"
if selectable_fd in efds else
"no exceptional condition", end="")
print()
packet_count = ct.c_int(0)
status = pcap.dispatch(pd, -1, countme,
ct.cast(ct.pointer(packet_count), ct.POINTER(ct.c_ubyte)))
if status < 0:
break
# Don't report this if we're using a
# required timeout and we got no packets,
# because that could be a very short timeout,
# and we don't want to spam the user with
# a ton of "no packets" reports.
if (status != 0 or packet_count.value != 0 or
required_timeout is not None):
print("{:d} packets seen, {:d} packets counted after "
"select returns".format(status, packet_count.value))
elif dopoll:
while True:
poller = select.poll()
poller.register(selectable_fd, select.POLLIN)
required_timeout = pcap.get_required_select_timeout(pd)
if dotimeout:
polltimeout = 1
elif (required_timeout is not None and
required_timeout.tv_usec >= 1000):
polltimeout = required_timeout.tv_usec // 1000
else:
polltimeout = None
try:
events = poller.poll(polltimeout)
except select.error as exc:
print("Poll returns error ({})".format(exc.strerror))
else:
if not quiet:
if not events:
print("Poll timed out")
else:
event = events[0][1]
print("Poll returned a descriptor: ", end="")
print("readable, "
if event & select.POLLIN else
"not readable, ", end="")
print("exceptional condition, "
if event & select.POLLERR else
"no exceptional condition, ", end="")
print("disconnect, "
if event & select.POLLHUP else
"no disconnect, ", end="")
print("invalid"
if event & select.POLLNVAL else
"not invalid", end="")
print()
packet_count = ct.c_int(0)
status = pcap.dispatch(pd, -1, countme,
ct.cast(ct.pointer(packet_count),
ct.POINTER(ct.c_ubyte)))
if status < 0:
break
# Don't report this if we're using a
# required timeout and we got no packets,
# because that could be a very short timeout,
# and we don't want to spam the user with
# a ton of "no packets" reports.
if (status != 0 or packet_count.value != 0 or
required_timeout is not None):
print("{:d} packets seen, {:d} packets counted after "
"poll returns".format(status, packet_count.value))
else:
while True:
packet_count = ct.c_int(0)
status = pcap.dispatch(pd, -1, countme,
ct.cast(ct.pointer(packet_count),
ct.POINTER(ct.c_ubyte)))
if status < 0:
break
print("{:d} packets seen, {:d} packets counted after "
"pcap.dispatch returns".format(status, packet_count.value))
if status == pcap.PCAP_ERROR_BREAK:
# We got interrupted, so perhaps we didn't manage to finish a
# line we were printing. Print an extra newline, just in case.
print()
sys.stdout.flush()
if status == -1:
# Error. Report it.
print("{}: pcap.dispatch: {}".format(program_name, geterr2str(pd)),
file=sys.stderr)
pcap.freecode(ct.byref(fcode))
pcap.close(pd)
return 1 if status == -1 else 0
@pcap.pcap_handler
def countme(arg, hdr, pkt):
counterp = ct.cast(arg, ct.POINTER(ct.c_int))
counterp[0] += 1
def usage():
print("Usage: {} [ -sptnq ] [ -i interface ] "
"[ expression ]".format(program_name), file=sys.stderr)
sys.exit(1)
if __name__.rpartition(".")[-1] == "__main__":
sys.exit(main())
| 38.793103
| 80
| 0.540768
|
e637ba86f2b6e720a1e9258d531a882c8a147745
| 78
|
py
|
Python
|
PROJECT_0000/faceLib/faceDetectRetinaFace.py
|
blitzkrieg0000/BioFace
|
ff45e3608ad08ab41ef93f4e962c85dafb00709b
|
[
"MIT"
] | 1
|
2021-07-27T18:31:38.000Z
|
2021-07-27T18:31:38.000Z
|
PROJECT_0000/faceLib/faceDetectRetinaFace.py
|
blitzkrieg0000/BioFace
|
ff45e3608ad08ab41ef93f4e962c85dafb00709b
|
[
"MIT"
] | null | null | null |
PROJECT_0000/faceLib/faceDetectRetinaFace.py
|
blitzkrieg0000/BioFace
|
ff45e3608ad08ab41ef93f4e962c85dafb00709b
|
[
"MIT"
] | null | null | null |
#TensorFlow ile bazı paketler çakıştığı için orijinal "retinaface" yüklenmedi.
| 78
| 78
| 0.846154
|
50b6b34e2f1d32b0d2f7f67c081b46f49da63bc2
| 367
|
py
|
Python
|
lambda_package/localcontext.py
|
TriNimbus/lambda-package-template
|
7291437fb11716b518d7046fff0ae8bfd192ee19
|
[
"MIT"
] | 2
|
2016-11-22T18:55:08.000Z
|
2019-12-15T02:28:02.000Z
|
lambda_package/localcontext.py
|
TriNimbus/lambda-package-template
|
7291437fb11716b518d7046fff0ae8bfd192ee19
|
[
"MIT"
] | null | null | null |
lambda_package/localcontext.py
|
TriNimbus/lambda-package-template
|
7291437fb11716b518d7046fff0ae8bfd192ee19
|
[
"MIT"
] | 1
|
2018-07-30T13:36:59.000Z
|
2018-07-30T13:36:59.000Z
|
from lambda_package.utility import Utility
class LocalContext(object):
"""A class to simulate the Lambda context locally."""
@property
def invoked_function_arn(self):
"""Simulate the Lambda ARN that comes into the context object. """
return 'arn:aws:lambda:us-east-1:{0}:function:func-name'.format(
Utility.aws_account_id())
| 33.363636
| 74
| 0.689373
|
f15cd34caf738fcab122211c8d928283a6937de9
| 7,121
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-0279-Perfect-Squares.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0279-Perfect-Squares.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0279-Perfect-Squares.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0279-Perfect-Squares.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-02-24
=================================================================="""
import sys
import time
# from typing import List
# import collections
"""
LeetCode - 0279 - (Medium) - Perfect Squares
https://leetcode.com/problems/perfect-squares/
Description & Requirement:
Given an integer n, return the least number of perfect square numbers that sum to n.
A perfect square is an integer that is the square of an integer;
in other words, it is the product of some integer with itself.
For example, 1, 4, 9, and 16 are perfect squares while 3 and 11 are not.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
Constraints:
1 <= n <= 10^4
"""
class Solution:
def numSquares(self, n: int) -> int:
# exception case
assert isinstance(n, int) and n > 0
if n == 1:
return 1
# main method: (1. dfs & backtrace; 2. dynamic programming. 3. mathematics)
# [Lagrange's four-square theorem](https://en.wikipedia.org/wiki/Lagrange%27s_four-square_theorem)
# Lagrange's four-square theorem, also known as Bachet's conjecture,
# states that every natural number can be represented as the sum of four integer squares.
# That is, the squares form an additive basis of order four.
# return self._numSquaresDfs(n) # TLE
# return self._numSquaresDp(n) # Time: O(n * \sqrt(n)); Space: O(n)
return self._numSquaresMath(n) # Time: O(\sqrt(n)); Space: O(n) or O(1)
def _numSquaresDfs(self, n: int) -> int:
assert n >= 2
def __get_perfect_square_list(limit: int) -> list:
_ps_list = []
_num = 1
while _num * _num <= limit:
_ps_list.append(_num * _num)
_num += 1
return _ps_list
def __reverse_list(origin_list: list):
left_idx, right_idx = 0, len(origin_list) - 1
while left_idx < right_idx:
origin_list[left_idx], origin_list[right_idx] = origin_list[right_idx], origin_list[left_idx]
left_idx += 1
right_idx -= 1
# 1 <= n <= 10^4, so 1 <= len(ps_list) <= 100
ps_list = __get_perfect_square_list(n) # now ps_list is ascending order
if n in ps_list:
return 1
__reverse_list(ps_list) # now ps_list is descending order, greedily find the min len combination
len_ps = len(ps_list)
res = [4] # at most 4, according to Lagrange's four-square theorem
def __dfs(cur_combo_len: int, cur_sum: int, cur_ps_index: int):
if cur_combo_len >= res[0] or cur_ps_index >= len_ps or cur_sum > n:
return
if cur_sum > n:
return
if cur_sum == n:
if cur_combo_len == 2:
res[0] = 2
if cur_combo_len < res[0]:
res[0] = cur_combo_len
return
for next_ps_index in range(cur_ps_index, len_ps): # explore more numbers (numbers can be reused)
__dfs(cur_combo_len + 1, cur_sum + ps_list[cur_ps_index], next_ps_index) # go deeper
for start_ps_index in range(len_ps): # start from every number
if res[0] == 2:
return 2
__dfs(0, 0, start_ps_index)
return res[0]
def _numSquaresDp(self, n: int) -> int:
"""
dp[i] is the min len of combination to make up integer i
dp equation: dp[i] = 1 + min(dp[i - j * j]), where j is an integer such that j * j <= i
dp init: all elements = 4, according to Lagrange's four-square theorem
dp aim: get dp[-1]
"""
assert n >= 2
# dp[i] is the min len of combination to make up integer i
# dp init: all elements = 4, according to Lagrange's four-square theorem
dp = [4 for _ in range(n + 1)]
dp[0] = 0
# dp equation: dp[i] = 1 + min(dp[i - j * j]), where j is an integer such that j * j <= i
for i in range(1, n + 1):
cur_min = 4
j = 1
while j * j <= i:
cur_min = min(cur_min, dp[i - j * j])
j += 1
dp[i] = min(4, cur_min + 1)
# dp aim: get dp[-1]
return dp[-1]
def _numSquaresMath(self, n: int) -> int:
"""
if n == 4^k * (8m + 7), then n can only be represented as the sum of 4 perfect square numbers
if result == 1, then n == k^2, just check if n is a perfect square number
if result == 2, then n can be represented as (i^2 + j^2), so just enumerate all (n - i^2), see if it's a ps
else n can only be represented as the sum of 3 perfect square numbers
Runtime: 42 ms, faster than 98.58% of Python3 online submissions for Perfect Squares.
Memory Usage: 14.2 MB, less than 76.60% of Python3 online submissions for Perfect Squares.
"""
assert n >= 2
def __get_perfect_square_list_set(limit: int):
_ps_list = []
_ps_set = set()
_num = 1
while _num * _num <= limit:
_num_square = _num * _num
_ps_list.append(_num_square)
_ps_set.add(_num_square)
_num += 1
return _ps_list, _ps_set
# 1 <= n <= 10^4, so 1 <= len(ps_list) <= 100
ps_list, ps_set = __get_perfect_square_list_set(n) # now ps_list is ascending order
# if result == 1, then n == k^2, just check if n is a perfect square number
if n in ps_set:
return 1
# if n == 4^k * (8m + 7), then n can only be represented as the sum of 4 perfect square numbers
test_4 = n
while test_4 > 4 and test_4 & 3 == 0: # get rid of 4^k factors (& 3 == 0 <-> % 4 == 0)
test_4 >>= 2
if test_4 % 8 == 7:
return 4
# if result == 2, then n can be represented as (i^2 + j^2), so just enumerate all (n - i^2), see if it's a ps
for ps in ps_list:
if (n - ps) in ps_set:
return 2
# else n can only be represented as the sum of 3 perfect square numbers
return 3
def main():
# Example 1: Output: 3
# n = 12
# Example 2: Output: 2
# n = 13
# Example 3: Output: 4 (DFS (TLE) 4161.92700 ms; DP 203.00500 ms; Math 0.03900 ms)
n = 8935
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.numSquares(n)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 35.078818
| 117
| 0.552731
|
5de999683a8b34259af15f4160c3b3823d3cd59c
| 5,062
|
py
|
Python
|
examples/to_test_classification.py
|
Jetafull/pytorch_tabular
|
9f65cb2c4b9dd3aaee209d8db1e4b5238eddfeb5
|
[
"MIT"
] | null | null | null |
examples/to_test_classification.py
|
Jetafull/pytorch_tabular
|
9f65cb2c4b9dd3aaee209d8db1e4b5238eddfeb5
|
[
"MIT"
] | null | null | null |
examples/to_test_classification.py
|
Jetafull/pytorch_tabular
|
9f65cb2c4b9dd3aaee209d8db1e4b5238eddfeb5
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from sklearn.datasets import fetch_covtype
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PowerTransformer
from torch.functional import norm
# from torch.utils import data
from pytorch_tabular.config import (
DataConfig,
ExperimentConfig,
ExperimentRunManager,
ModelConfig,
OptimizerConfig,
TrainerConfig,
)
from pytorch_tabular.models.category_embedding.category_embedding_model import (
CategoryEmbeddingModel,
)
from pytorch_tabular.models.category_embedding.config import (
CategoryEmbeddingModelConfig,
)
from pytorch_tabular.models.node.config import NodeConfig
from pytorch_tabular.tabular_datamodule import TabularDatamodule
from pytorch_tabular.tabular_model import TabularModel
# import wget
from pytorch_tabular.utils import get_balanced_sampler, get_class_weighted_cross_entropy
# torch.manual_seed(0)
# np.random.seed(0)
# torch.set_deterministic(True)
BASE_DIR = Path.home().joinpath("data")
datafile = BASE_DIR.joinpath("covtype.data.gz")
datafile.parent.mkdir(parents=True, exist_ok=True)
url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz"
)
if not datafile.exists():
wget.download(url, datafile.as_posix())
target_name = ["Covertype"]
cat_col_names = [
"Wilderness_Area1",
"Wilderness_Area2",
"Wilderness_Area3",
"Wilderness_Area4",
"Soil_Type1",
"Soil_Type2",
"Soil_Type3",
"Soil_Type4",
"Soil_Type5",
"Soil_Type6",
"Soil_Type7",
"Soil_Type8",
"Soil_Type9",
"Soil_Type10",
"Soil_Type11",
"Soil_Type12",
"Soil_Type13",
"Soil_Type14",
"Soil_Type15",
"Soil_Type16",
"Soil_Type17",
"Soil_Type18",
"Soil_Type19",
"Soil_Type20",
"Soil_Type21",
"Soil_Type22",
"Soil_Type23",
"Soil_Type24",
"Soil_Type25",
"Soil_Type26",
"Soil_Type27",
"Soil_Type28",
"Soil_Type29",
"Soil_Type30",
"Soil_Type31",
"Soil_Type32",
"Soil_Type33",
"Soil_Type34",
"Soil_Type35",
"Soil_Type36",
"Soil_Type37",
"Soil_Type38",
"Soil_Type39",
"Soil_Type40",
]
num_col_names = [
"Elevation",
"Aspect",
"Slope",
"Horizontal_Distance_To_Hydrology",
"Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am",
"Hillshade_Noon",
"Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points",
]
feature_columns = num_col_names + cat_col_names + target_name
df = pd.read_csv(datafile, header=None, names=feature_columns)
# cat_col_names = []
# num_col_names = [
# "Elevation", "Aspect"
# ]
# feature_columns = (
# num_col_names + cat_col_names + target_name)
# df = df.loc[:,feature_columns]
df.head()
train, test = train_test_split(df, random_state=42)
train, val = train_test_split(train, random_state=42)
num_classes = len(set(train[target_name].values.ravel()))
data_config = DataConfig(
target=target_name,
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
continuous_feature_transform=None, # "quantile_normal",
normalize_continuous_features=False,
)
model_config = CategoryEmbeddingModelConfig(
task="classification",
metrics=["f1", "accuracy"],
metrics_params=[{"num_classes": num_classes}, {}],
)
# model_config = NodeConfig(
# task="classification",
# depth=4,
# num_trees=1024,
# input_dropout=0.0,
# metrics=["f1", "accuracy"],
# metrics_params=[{"num_classes": num_classes, "average": "macro"}, {}],
# )
trainer_config = TrainerConfig(
gpus=1, fast_dev_run=False, max_epochs=5, batch_size=1024
)
experiment_config = ExperimentConfig(
project_name="PyTorch Tabular Example",
run_name="node_forest_cov",
exp_watch="gradients",
log_target="wandb",
log_logits=True,
)
optimizer_config = OptimizerConfig()
# tabular_model = TabularModel(
# data_config="examples/data_config.yml",
# model_config="examples/model_config.yml",
# optimizer_config="examples/optimizer_config.yml",
# trainer_config="examples/trainer_config.yml",
# # experiment_config=experiment_config,
# )
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
# experiment_config=experiment_config,
)
sampler = get_balanced_sampler(train[target_name].values.ravel())
# cust_loss = get_class_weighted_cross_entropy(train[target_name].values.ravel())
tabular_model.fit(
train=train,
validation=val,
# loss=cust_loss,
train_sampler=sampler,
)
result = tabular_model.evaluate(test)
print(result)
# test.drop(columns=target_name, inplace=True)
# pred_df = tabular_model.predict(test)
# pred_df.to_csv("output/temp2.csv")
# tabular_model.save_model("test_save")
# new_model = TabularModel.load_from_checkpoint("test_save")
# result = new_model.evaluate(test)
| 26.642105
| 88
| 0.729554
|
00c07b9257f5f72927b9abb0e524db4233798688
| 4,277
|
py
|
Python
|
code/word_selection.py
|
crypdick/knausj_talon
|
1c38f8c8d7bbd44a3bedddcc36850461cb35e000
|
[
"Unlicense"
] | null | null | null |
code/word_selection.py
|
crypdick/knausj_talon
|
1c38f8c8d7bbd44a3bedddcc36850461cb35e000
|
[
"Unlicense"
] | null | null | null |
code/word_selection.py
|
crypdick/knausj_talon
|
1c38f8c8d7bbd44a3bedddcc36850461cb35e000
|
[
"Unlicense"
] | null | null | null |
import time
from talon import Context, Module, actions, clip
mod = Module()
@mod.action_class
class Actions:
def word_neck(index: int):
"""select the following word or the index'th word"""
word_neck(int(index))
def word_prev(index: int):
"""select the previous word or the index'th word"""
word_prev(int(index))
def small_word_neck(index: int):
"""select the following word or the index'th word"""
small_word_neck(int(index))
def small_word_prev(index: int):
"""select the previous word or the index'th word"""
small_word_prev(int(index))
def big_word_neck(index: int):
"""select the following word or the index'th word"""
big_word_neck(int(index))
def big_word_prev(index: int):
"""select the previous word or the index'th word"""
big_word_prev(int(index))
alphanumeric = "abcdefghijklmnopqrstuvwxyz0123456789_"
def small_word_neck(index):
return word_neck(index, valid_characters=set(alphanumeric) - set("_"))
def small_word_prev(index):
return word_prev(index, valid_characters=set(alphanumeric) - set("_"))
def big_word_neck(index):
return word_neck(index, valid_characters=set(alphanumeric) | set("/\\-_.>=<"))
def big_word_prev(index):
return word_prev(index, valid_characters=set(alphanumeric) | set("/\\-_.>=<"))
def stop_selection(cursor_position):
assert cursor_position in ("left", "right")
with clip.capture() as s:
actions.edit.extend_right()
time.sleep(0.25)
actions.edit.copy()
current_highlight = s.get()
actions.edit.extend_left()
if len(current_highlight) > 1:
if cursor_position == "left":
actions.edit.left()
elif cursor_position == "right":
actions.edit.right()
def word_neck(word_index, valid_characters=alphanumeric):
with clip.revert():
stop_selection("right")
actions.edit.extend_line_end()
time.sleep(0.25)
actions.edit.copy()
actions.edit.left()
time.sleep(0.25)
text_right = clip.get().lower()
print(text_right)
print(word_index, type(word_index))
is_word = [character in valid_characters for character in text_right]
word_count = 1
i = 0
while i < (len(is_word) - 1) and not is_word[i]:
i += 1
# print("a start", i)
while i < (len(is_word) - 1) and word_count < word_index:
# print(i, is_word[i], word_count, word_index)
if not is_word[i] and is_word[i + 1]:
word_count += 1
i += 1
# warning: this is a hack, sorry
# print("i", i)
if i == 1 and is_word[0]:
i = 0
start_position = i
# print(text_right[start_position:])
while i < len(is_word) and is_word[i]:
i += 1
end_position = i
# print(start_position, end_position)
# cursor over to the found word
for i in range(0, start_position):
actions.edit.right()
# now select the word
for i in range(0, end_position - start_position):
actions.edit.extend_right()
def word_prev(word_index, valid_characters=alphanumeric):
with clip.revert():
stop_selection("left")
actions.edit.extend_line_start()
time.sleep(0.25)
actions.edit.copy()
actions.edit.right()
time.sleep(0.25)
text_right = clip.get().lower()
text_right = list(reversed(text_right))
is_word = [character in valid_characters for character in text_right]
word_count = 1
i = 0
while i < (len(is_word) - 1) and not is_word[i]:
i += 1
while i < (len(is_word) - 1) and word_count < word_index:
# print(i, is_word[i], word_count, word_index)
if not is_word[i] and is_word[i + 1]:
word_count += 1
i += 1
start_position = i
# print(text_right[start_position:])
while i < len(is_word) and is_word[i]:
i += 1
end_position = i
# print(start_position, end_position, text_right[start_position:end_position])
# cursor over to the found word
for i in range(0, start_position):
actions.edit.left()
# now select the word
for i in range(0, end_position - start_position):
actions.edit.extend_left()
| 27.772727
| 82
| 0.630348
|
b75b46d4923be01fcb1bcddbc5314854a4ed6d81
| 1,388
|
py
|
Python
|
integrations/airflow/tests/test_location.py
|
mobuchowski/marquez
|
a1964623e13e95ee98b93517f11cdf116a1d1184
|
[
"Apache-2.0"
] | 1
|
2021-11-02T08:14:47.000Z
|
2021-11-02T08:14:47.000Z
|
integrations/airflow/tests/test_location.py
|
mobuchowski/marquez
|
a1964623e13e95ee98b93517f11cdf116a1d1184
|
[
"Apache-2.0"
] | 7
|
2021-05-06T15:16:13.000Z
|
2022-03-01T23:10:43.000Z
|
integrations/airflow/tests/test_location.py
|
mobuchowski/marquez
|
a1964623e13e95ee98b93517f11cdf116a1d1184
|
[
"Apache-2.0"
] | 1
|
2021-04-19T17:51:20.000Z
|
2021-04-19T17:51:20.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from unittest.mock import patch
import pytest
from tests.mocks.git_mock import execute_git_mock
from marquez_airflow.utils import get_location
log = logging.getLogger(__name__)
@patch('marquez_airflow.utils.execute_git',
side_effect=execute_git_mock)
def test_dag_location(git_mock):
assert ('https://github.com/MarquezProject/marquez/blob/'
'abcd1234/integrations/airflow/tests/test_dags/'
'test_dag.py' == get_location("tests/test_dags/test_dag.py"))
@patch('marquez_airflow.utils.execute_git',
side_effect=execute_git_mock)
def test_bad_file_path(git_mock):
log.debug("test_bad_file_path()")
with pytest.raises(FileNotFoundError):
# invalid file
get_location("dags/missing-dag.py")
if __name__ == "__main__":
pytest.main([sys.argv[0]])
| 32.27907
| 74
| 0.748559
|
16580fe2c26e221d4b5c58ecef8b9708f337711a
| 11,673
|
py
|
Python
|
main.py
|
analyticFolk/detr
|
cd98687d683c670627dfa29424ca6e01de8767f4
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
analyticFolk/detr
|
cd98687d683c670627dfa29424ca6e01de8767f4
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
analyticFolk/detr
|
cd98687d683c670627dfa29424ca6e01de8767f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
# custom dataset params
parser.add_argument('--custom_dataset_path', type=str)
parser.add_argument('--custom_dataset', type=str)
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 46.321429
| 116
| 0.64405
|
375561d3679f61bb93569d8d824d034f6cb552df
| 3,829
|
py
|
Python
|
Face.py
|
Munthir-Shishani/face-check-in
|
ca6656e1d7860657bffddeac0c8a57267c419d6f
|
[
"BSD-3-Clause"
] | null | null | null |
Face.py
|
Munthir-Shishani/face-check-in
|
ca6656e1d7860657bffddeac0c8a57267c419d6f
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T00:30:03.000Z
|
2022-03-12T00:30:03.000Z
|
Face.py
|
Munthir-Shishani/face-check-in
|
ca6656e1d7860657bffddeac0c8a57267c419d6f
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import io
import glob
import os
import sys
import time
import uuid
import requests
from datetime import datetime
import pytz
from urllib.parse import urlparse
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType
from urllib3.exceptions import NewConnectionError
from azure.cognitiveservices.vision.face.models._models_py3 import APIErrorException
KEY = os.environ['FACE_SUBSCRIPTION_KEY']
ENDPOINT = os.environ['FACE_ENDPOINT']
PERSON_GROUP_ID = 'employees'
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
def add_new(image, name=None, human_id=None):
try:
print('Person group:', PERSON_GROUP_ID)
if human_id is None:
human = face_client.person_group_person.create(PERSON_GROUP_ID, name)
human_id = human.person_id
if name is not None and human_id is not None:
face_client.person_group_person.update(PERSON_GROUP_ID, human_id, name)
face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, human_id, image, detection_model="detection_02")
face_client.person_group.train(PERSON_GROUP_ID)
while (True):
training_status = face_client.person_group.get_training_status(PERSON_GROUP_ID)
print("Training status: {}.".format(training_status.status))
print()
if (training_status.status is TrainingStatusType.succeeded):
break
elif (training_status.status is TrainingStatusType.failed):
sys.exit('Training the person group has failed.')
time.sleep(5)
return training_status.status
except NewConnectionError as error:
print(error)
return error
except APIErrorException as error:
print(error)
return error
except IndexError as error:
print(error)
return error
def default_dict(counter, name='Unknown', confidence=0.0, person_id='', face_id=''):
now = datetime.now(pytz.timezone('Asia/Amman'))
_buffer = {}
result = {}
_buffer['Name'] = name
_buffer['Date'] = now.strftime("%d/%m/%Y")
_buffer['Time'] = now.strftime("%H:%M:%S")
_buffer['Confidence'] = confidence
_buffer['Person ID'] = person_id
_buffer['Face ID'] = face_id
result[counter] = _buffer
return result
def who_is_it(image):
counter = 0
face_ids = []
result = {}
try:
faces = face_client.face.detect_with_stream(image, recognition_model="recognition_02", detection_model="detection_02")
if not faces:
return 'No face'
else:
for face in faces:
face_ids.append(face.face_id)
results = face_client.face.identify(face_ids, PERSON_GROUP_ID, max_num_of_candidates_returned=1, confidence_threshold=0.6)
if not results:
return 'No face'
for candidate in results:
if candidate.candidates != []:
person = face_client.person_group_person.get(PERSON_GROUP_ID, candidate.candidates[0].person_id)
result.update(default_dict(counter, person.name, candidate.candidates[0].confidence, person.person_id, face_ids[counter]))
else:
result.update(default_dict(counter))
counter += 1
return result
except NewConnectionError as error:
print(error)
return error
except APIErrorException as error:
print(error)
return error
except IndexError as error:
print(error)
return error
| 35.453704
| 142
| 0.678506
|
bf0e43e62f14f116c7495b6c3368e06731f90117
| 326
|
py
|
Python
|
cgnp_patchy/lib/moieties/MME.py
|
cjspindel/cgnp_patchy
|
12d401c90795ecddb9c4ea0433dc26c4d31d80b6
|
[
"MIT"
] | null | null | null |
cgnp_patchy/lib/moieties/MME.py
|
cjspindel/cgnp_patchy
|
12d401c90795ecddb9c4ea0433dc26c4d31d80b6
|
[
"MIT"
] | null | null | null |
cgnp_patchy/lib/moieties/MME.py
|
cjspindel/cgnp_patchy
|
12d401c90795ecddb9c4ea0433dc26c4d31d80b6
|
[
"MIT"
] | null | null | null |
import mbuild as mb
import numpy as np
class MME(mb.Compound):
""" Coarse-grained alkane bead containing a CH2-CH2-CH3 group """
def __init__(self):
super(MME, self).__init__()
self.add(mb.Particle(name="_MME"))
self.add(mb.Port(anchor=self[0], orientation=[0, 1, 0], separation=0.15), 'up')
| 29.636364
| 87
| 0.644172
|
b20d33938c4a8aa87b15577ef54b08ecc6657da8
| 261
|
py
|
Python
|
git_master_main.py
|
ahrenstein/thefuck-rules
|
e6a7f3b9ce5b9eff9068a5e4dd3d298738549102
|
[
"MIT"
] | 4
|
2021-01-18T14:29:22.000Z
|
2022-02-25T13:00:37.000Z
|
git_master_main.py
|
ahrenstein/thefuck-rules
|
e6a7f3b9ce5b9eff9068a5e4dd3d298738549102
|
[
"MIT"
] | null | null | null |
git_master_main.py
|
ahrenstein/thefuck-rules
|
e6a7f3b9ce5b9eff9068a5e4dd3d298738549102
|
[
"MIT"
] | null | null | null |
def match(command):
return ('error: pathspec \'master\' did not match any file(s) known to git' in command.output.lower())
def get_new_command(command):
return command.script.replace("master","main")
# Optional:
priority = 5
enabled_by_default = True
| 26.1
| 106
| 0.724138
|
055e66149a807f68f4fe496640334f0924e5e67e
| 20,134
|
py
|
Python
|
pybotters/models/gmocoin.py
|
yota-p/pybotters
|
4174779fb9ad6abd4cbe2ec59a62510e96653d1b
|
[
"MIT"
] | null | null | null |
pybotters/models/gmocoin.py
|
yota-p/pybotters
|
4174779fb9ad6abd4cbe2ec59a62510e96653d1b
|
[
"MIT"
] | null | null | null |
pybotters/models/gmocoin.py
|
yota-p/pybotters
|
4174779fb9ad6abd4cbe2ec59a62510e96653d1b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import asyncio
import logging
from datetime import datetime, timezone
from dateutil import parser
from decimal import Decimal
from enum import Enum, auto
from typing import Any, Awaitable, Optional, cast
import aiohttp
from pybotters.store import DataStore, DataStoreManager
from pybotters.typedefs import Item
from ..auth import Auth
from ..ws import ClientWebSocketResponse
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict
logger = logging.getLogger(__name__)
def parse_datetime(x: Any) -> datetime:
if isinstance(x, str):
try:
exec_date = x.replace('T', ' ')[:-1]
exec_date = exec_date + '00000000'
dt = datetime(
int(exec_date[0:4]),
int(exec_date[5:7]),
int(exec_date[8:10]),
int(exec_date[11:13]),
int(exec_date[14:16]),
int(exec_date[17:19]),
int(exec_date[20:26]),
)
except Exception:
dt = parser.parse(x)
return dt
else:
raise ValueError(f'x only support str, but {type(x)} passed.')
class ApiType(Enum):
"""
API 区分
"""
Public = auto()
Private = auto()
class Channel(Enum):
"""
WebSocket API チャンネル
"""
# Public
TICKER = auto()
ORDER_BOOKS = auto()
TRADES = auto()
# Private
EXECUTION_EVENTS = auto()
ORDER_EVENTS = auto()
POSITION_EVENTS = auto()
POSITION_SUMMARY_EVENTS = auto()
@staticmethod
def from_str(name: str) -> "Channel":
if not hasattr(Channel, "_table"):
Channel._table = {
"ticker": Channel.TICKER,
"orderbooks": Channel.ORDER_BOOKS,
"trades": Channel.TRADES,
"executionEvents": Channel.EXECUTION_EVENTS,
"orderEvents": Channel.ORDER_EVENTS,
"positionEvents": Channel.POSITION_EVENTS,
"positionSummaryEvents": Channel.POSITION_SUMMARY_EVENTS,
}
return Channel._table[name]
class MessageType(Enum):
"""
メッセージタイプ
"""
NONE = auto()
ER = auto()
NOR = auto()
ROR = auto()
COR = auto()
OPR = auto()
UPR = auto()
ULR = auto()
CPR = auto()
INIT = auto()
UPDATE = auto()
PERIODIC = auto()
class Symbol(Enum):
"""
取り扱い銘柄
"""
BTC = auto()
ETH = auto()
BCH = auto()
LTC = auto()
XRP = auto()
BTC_JPY = auto()
ETH_JPY = auto()
BCH_JPY = auto()
LTC_JPY = auto()
XRP_JPY = auto()
class OrderSide(Enum):
"""
売買区分
"""
BUY = auto()
SELL = auto()
class ExecutionType(Enum):
"""
注文タイプ
"""
MARKET = auto()
LIMIT = auto()
STOP = auto()
class TimeInForce(Enum):
"""
執行数量条件
"""
FAK = auto()
FAS = auto()
FOK = auto()
SOK = auto()
class SettleType(Enum):
"""
決済区分
"""
OPEN = auto()
CLOSE = auto()
LOSS_CUT = auto()
class OrderType(Enum):
"""
取引区分
"""
NORMAL = auto()
LOSSCUT = auto()
class OrderStatus(Enum):
"""
注文ステータス
"""
WAITING = auto()
ORDERED = auto()
MODIFYING = auto()
CANCELLING = auto()
CANCELED = auto()
EXECUTED = auto()
EXPIRED = auto()
class CancelType(Enum):
"""
取消区分
"""
NONE = auto()
USER = auto()
POSITION_LOSSCUT = auto()
INSUFFICIENT_BALANCE = auto()
INSUFFICIENT_MARGIN = auto()
ACCOUNT_LOSSCUT = auto()
MARGIN_CALL = auto()
MARGIN_CALL_LOSSCUT = auto()
EXPIRED_FAK = auto()
EXPIRED_FOK = auto()
EXPIRED_SOK = auto()
CLOSED_ORDER = auto()
SOK_TAKER = auto()
PRICE_LIMIT = auto()
class Ticker(TypedDict):
ask: Decimal
bid: Decimal
high: Decimal
last: Decimal
low: Decimal
symbol: Symbol
timestamp: datetime
volume: Decimal
class OrderLevel(TypedDict):
symbol: Symbol
side: OrderSide
price: Decimal
size: Decimal
class OrderBook(TypedDict):
asks: list[OrderLevel]
bids: list[OrderLevel]
symbol: Symbol
timestamp: datetime
class Trade(TypedDict):
price: Decimal
side: OrderSide
size: Decimal
timestamp: datetime
symbol: Symbol
class Execution(TypedDict):
execution_id: int
order_id: int
symbol: Symbol
side: OrderSide
settle_type: SettleType
size: Decimal
price: Decimal
timestamp: datetime
loss_gain: Decimal
fee: Decimal
# properties that only appears websocket message
position_id: Optional[int]
execution_type: Optional[ExecutionType]
order_price: Optional[Decimal]
order_size: Optional[Decimal]
order_executed_size: Optional[Decimal]
order_timestamp: Optional[datetime]
time_in_force: Optional[str]
class Order(TypedDict):
order_id: int
symbol: Symbol
settle_type: SettleType
execution_type: ExecutionType
side: OrderSide
order_status: OrderStatus
order_timestamp: datetime
price: Decimal
size: Decimal
executed_size: Decimal
losscut_price: Decimal
time_in_force: TimeInForce
# properties that only appears websocket message
cancel_type: Optional[CancelType]
class Position(TypedDict):
position_id: int
symbol: Symbol
side: OrderSide
size: Decimal
orderd_size: Decimal
price: Decimal
loss_gain: Decimal
leverage: Decimal
losscut_price: Decimal
timestamp: datetime
class PositionSummary(TypedDict):
symbol: Symbol
side: OrderSide
average_position_rate: Decimal
position_loss_gain: Decimal
sum_order_quantity: Decimal
sum_position_quantity: Decimal
timestamp: datetime
class TickerStore(DataStore):
_KEYS = ["symbol"]
def _onmessage(self, mes: Ticker) -> None:
self._update([cast(Item, mes)])
class OrderBookStore(DataStore):
_KEYS = ["symbol", "side", "price"]
def _init(self) -> None:
self.timestamp: Optional[datetime] = None
def sorted(self, query: Optional[Item] = None) -> dict[OrderSide, list[OrderLevel]]:
if query is None:
query = {}
result: dict[OrderSide, list[OrderLevel]] = {
OrderSide.BUY: [],
OrderSide.SELL: [],
}
for item in self:
if all(k in item and query[k] == item[k] for k in query):
result[item["side"]].append(cast(OrderLevel, item))
result[OrderSide.SELL].sort(key=lambda x: x["price"])
result[OrderSide.BUY].sort(key=lambda x: x["price"], reverse=True)
return result
def _onmessage(self, mes: OrderBook) -> None:
data = mes["asks"] + mes["bids"]
result = self.find({"symbol": mes["symbol"]})
self._delete(result)
self._insert(cast("list[Item]", data))
self.timestamp = mes["timestamp"]
class TradeStore(DataStore):
def _onmessage(self, mes: Trade) -> None:
self._insert([cast(Item, mes)])
class OrderStore(DataStore):
_KEYS = ["order_id"]
def _onresponse(self, data: list[Order]) -> None:
self._insert(cast("list[Item]", data))
def _onmessage(self, mes: Order) -> None:
if mes["order_status"] in (OrderStatus.WAITING, OrderStatus.ORDERED):
self._update([cast(Item, mes)])
else:
self._delete([cast(Item, mes)])
def _onexecution(self, mes: Execution) -> None:
current = cast(Order, self.get({"order_id": mes["order_id"]}))
if (
mes["order_executed_size"]
and current
and current["executed_size"] < mes["order_executed_size"]
):
current["executed_size"] = mes["order_executed_size"]
remain = current["size"] - current["executed_size"]
if remain == 0:
self._delete([cast(Item, current)])
else:
self._update([cast(Item, current)])
class ExecutionStore(DataStore):
_KEYS = ["execution_id"]
def sorted(self, query: Optional[Item] = None) -> list[Execution]:
if query is None:
query = {}
result = []
for item in self:
if all(k in item and query[k] == item[k] for k in query):
result.append(item)
result.sort(key=lambda x: x["execution_id"], reverse=True)
return result
def _onresponse(self, data: list[Execution]) -> None:
self._insert(cast("list[Item]", data))
def _onmessage(self, mes: Execution) -> None:
self._insert([cast(Item, mes)])
class PositionStore(DataStore):
_KEYS = ["position_id"]
def _onresponse(self, data: list[Position]) -> None:
self._update(cast("list[Item]", data))
def _onmessage(self, mes: Position, type: MessageType) -> None:
if type == MessageType.OPR:
self._insert([cast(Item, mes)])
elif type == MessageType.CPR:
self._delete([cast(Item, mes)])
else:
self._update([cast(Item, mes)])
class PositionSummaryStore(DataStore):
_KEYS = ["symbol", "side"]
def _onresponse(self, data: list[PositionSummary]) -> None:
self._update(cast("list[Item]", data))
def _onmessage(self, mes: PositionSummary) -> None:
self._update([cast(Item, mes)])
class MessageHelper:
@staticmethod
def to_tickers(data: list[Item]) -> list["Ticker"]:
return [MessageHelper.to_ticker(x) for x in data]
@staticmethod
def to_ticker(data: Item) -> "Ticker":
return Ticker(
ask=Decimal(data["ask"]),
bid=Decimal(data["bid"]),
high=Decimal(data["high"]),
last=Decimal(data["last"]),
low=Decimal(data["low"]),
symbol=Symbol[data["symbol"]],
timestamp=parse_datetime(data.get("timestamp")),
volume=Decimal(data["volume"]),
)
@staticmethod
def to_orderbook(data: Item) -> "OrderBook":
return OrderBook(
asks=[
OrderLevel(
symbol=Symbol[data["symbol"]],
side=OrderSide.SELL,
price=Decimal(ol["price"]),
size=Decimal(ol["size"]),
)
for ol in data["asks"]
],
bids=[
OrderLevel(
symbol=Symbol[data["symbol"]],
side=OrderSide.BUY,
price=Decimal(ol["price"]),
size=Decimal(ol["size"]),
)
for ol in data["bids"]
],
symbol=Symbol[data["symbol"]],
timestamp=parse_datetime(data.get("timestamp")),
)
@staticmethod
def to_trades(data: list[Item]) -> list["Trade"]:
return [MessageHelper.to_trade(x) for x in data]
@staticmethod
def to_trade(data: Item) -> "Trade":
return Trade(
price=Decimal(data["price"]),
side=OrderSide[data["side"]],
size=Decimal(data["size"]),
timestamp=parse_datetime(data.get("timestamp")),
symbol=Symbol[data["symbol"]],
)
@staticmethod
def to_executions(data: list[Item]) -> list["Execution"]:
return [MessageHelper.to_execution(x) for x in data]
@staticmethod
def to_execution(data: Item) -> "Execution":
return Execution(
order_id=data["orderId"],
execution_id=data["executionId"],
symbol=Symbol[data["symbol"]],
settle_type=SettleType[data["settleType"]],
side=OrderSide[data["side"]],
price=Decimal(data.get("executionPrice", data.get("price"))),
size=Decimal(data.get("executionSize", data.get("size"))),
timestamp=parse_datetime(
data.get("executionTimestamp", data.get("timestamp"))
),
loss_gain=Decimal(data["lossGain"]),
fee=Decimal(data["fee"]),
# properties that only appears websocket message
position_id=data["positionId"] if "positionId" in data else None,
execution_type=ExecutionType[data["executionType"]]
if "executionType" in data
else None,
order_price=Decimal(data["orderPrice"]) if "orderPrice" in data else None,
order_size=Decimal(data["orderSize"]) if ("orderSize" in data) else None,
order_executed_size=Decimal(data["orderExecutedSize"])
if "orderExecutedSize" in data
else None,
order_timestamp=parse_datetime(data["orderTimestamp"])
if "orderTimestamp" in data
else None,
time_in_force=data.get("timeInForce", None),
)
@staticmethod
def to_orders(data: list[Item]) -> list["Order"]:
return [MessageHelper.to_order(x) for x in data]
@staticmethod
def to_order(data: Item) -> "Order":
status = OrderStatus[data.get("status", data.get("orderStatus"))]
timestamp = parse_datetime(data.get("orderTimestamp", data.get("timestamp")))
return Order(
order_id=data["orderId"],
symbol=Symbol[data["symbol"]],
settle_type=SettleType[data["settleType"]],
execution_type=ExecutionType[data["executionType"]],
side=OrderSide[data["side"]],
order_status=status,
cancel_type=CancelType[data.get("cancelType", CancelType.NONE.name)],
order_timestamp=timestamp,
price=Decimal(data.get("price", data.get("orderPrice"))),
size=Decimal(data.get("size", data.get("orderSize"))),
executed_size=Decimal(
data.get("executedSize", data.get("orderExecutedSize"))
),
losscut_price=Decimal(data["losscutPrice"]),
time_in_force=data["timeInForce"],
)
@staticmethod
def to_positions(data: list[Item]) -> list["Position"]:
return [MessageHelper.to_position(x) for x in data]
@staticmethod
def to_position(data: Item) -> "Position":
return Position(
position_id=data["positionId"],
symbol=Symbol[data["symbol"]],
side=OrderSide[data["side"]],
size=Decimal(data["size"]),
orderd_size=Decimal(data["orderdSize"]),
price=Decimal(data["price"]),
loss_gain=Decimal(data["lossGain"]),
leverage=Decimal(data["leverage"]),
losscut_price=Decimal(data["losscutPrice"]),
timestamp=parse_datetime(data.get("timestamp")),
)
@staticmethod
def to_position_summaries(data: list[Item]) -> list["PositionSummary"]:
return [MessageHelper.to_position_summary(x) for x in data]
@staticmethod
def to_position_summary(data: Item) -> "PositionSummary":
return PositionSummary(
symbol=Symbol[data["symbol"]],
side=OrderSide[data["side"]],
average_position_rate=Decimal(data["averagePositionRate"]),
position_loss_gain=Decimal(data["positionLossGain"]),
sum_order_quantity=Decimal(data["sumOrderQuantity"]),
sum_position_quantity=Decimal(data["sumPositionQuantity"]),
timestamp=parse_datetime(data.get("timestamp"))
if data.get("timestamp")
else datetime.now(timezone.utc),
)
class GMOCoinDataStore(DataStoreManager):
"""
GMOコインのデータストアマネージャー
"""
def _init(self) -> None:
self.create("ticker", datastore_class=TickerStore)
self.create("orderbooks", datastore_class=OrderBookStore)
self.create("trades", datastore_class=TradeStore)
self.create("orders", datastore_class=OrderStore)
self.create("positions", datastore_class=PositionStore)
self.create("executions", datastore_class=ExecutionStore)
self.create("position_summary", datastore_class=PositionSummaryStore)
self.token: Optional[str] = None
async def initialize(self, *aws: Awaitable[aiohttp.ClientResponse]) -> None:
"""
対応エンドポイント
- GET /private/v1/latestExecutions (DataStore: executions)
- GET /private/v1/activeOrders (DataStore: orders)
- GET /private/v1/openPositions (DataStore: positions)
- GET /private/v1/positionSummary (DataStore: position_summary)
- POST /private/v1/ws-auth (Property: token)
"""
for f in asyncio.as_completed(aws):
resp = await f
data = await resp.json()
if (
resp.url.path == "/private/v1/latestExecutions"
and "list" in data["data"]
):
self.executions._onresponse(
MessageHelper.to_executions(data["data"]["list"])
)
if resp.url.path == "/private/v1/activeOrders" and "list" in data["data"]:
self.orders._onresponse(MessageHelper.to_orders(data["data"]["list"]))
if resp.url.path == "/private/v1/openPositions" and "list" in data["data"]:
self.positions._onresponse(
MessageHelper.to_positions(data["data"]["list"])
)
if (
resp.url.path == "/private/v1/positionSummary"
and "list" in data["data"]
):
self.position_summary._onresponse(
MessageHelper.to_position_summaries(data["data"]["list"])
)
if resp.url.path == "/private/v1/ws-auth":
self.token = data["data"]
asyncio.create_task(self._token(resp.__dict__['_raw_session']))
def _onmessage(self, msg: Item, ws: ClientWebSocketResponse) -> None:
if "channel" in msg:
msg_type = MessageType[msg.get("msgType", MessageType.NONE.name)]
channel: Channel = Channel.from_str(msg["channel"])
# Public
if channel == Channel.TICKER:
self.ticker._onmessage(MessageHelper.to_ticker(msg))
elif channel == Channel.ORDER_BOOKS:
self.orderbooks._onmessage(MessageHelper.to_orderbook(msg))
elif channel == Channel.TRADES:
self.trades._onmessage(MessageHelper.to_trade(msg))
# Private
elif channel == Channel.EXECUTION_EVENTS:
self.orders._onexecution(MessageHelper.to_execution(msg))
self.executions._onmessage(MessageHelper.to_execution(msg))
elif channel == Channel.ORDER_EVENTS:
self.orders._onmessage(MessageHelper.to_order(msg))
elif channel == Channel.POSITION_EVENTS:
self.positions._onmessage(MessageHelper.to_position(msg), msg_type)
elif channel == Channel.POSITION_SUMMARY_EVENTS:
self.position_summary._onmessage(MessageHelper.to_position_summary(msg))
async def _token(self, session: aiohttp.ClientSession):
while not session.closed:
await session.put(
'https://api.coin.z.com/private/v1/ws-auth',
data={"token": self.token},
auth=Auth,
)
await asyncio.sleep(1800.0) # 30 minutes
@property
def ticker(self) -> TickerStore:
return self.get("ticker", TickerStore)
@property
def orderbooks(self) -> OrderBookStore:
return self.get("orderbooks", OrderBookStore)
@property
def trades(self) -> TradeStore:
return self.get("trades", TradeStore)
@property
def orders(self) -> OrderStore:
"""
アクティブオーダーのみ(約定・キャンセル済みは削除される)
"""
return self.get("orders", OrderStore)
@property
def positions(self) -> PositionStore:
return self.get("positions", PositionStore)
@property
def executions(self) -> ExecutionStore:
return self.get("executions", ExecutionStore)
@property
def position_summary(self) -> PositionSummaryStore:
return self.get("position_summary", PositionSummaryStore)
| 29.565345
| 88
| 0.590047
|
1c08ed6ffa4f8b177c56a947da9b49980ab0a2c2
| 107
|
py
|
Python
|
mmocr/models/kie/heads/__init__.py
|
yuexy/mmocr
|
82488024db159266e66ea6b0d6f84a5a18e87362
|
[
"Apache-2.0"
] | 2,261
|
2021-04-08T03:45:41.000Z
|
2022-03-31T23:37:46.000Z
|
mmocr/models/kie/heads/__init__.py
|
yuexy/mmocr
|
82488024db159266e66ea6b0d6f84a5a18e87362
|
[
"Apache-2.0"
] | 789
|
2021-04-08T05:40:13.000Z
|
2022-03-31T09:42:39.000Z
|
mmocr/models/kie/heads/__init__.py
|
yuexy/mmocr
|
82488024db159266e66ea6b0d6f84a5a18e87362
|
[
"Apache-2.0"
] | 432
|
2021-04-08T03:56:16.000Z
|
2022-03-30T18:44:43.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
from .sdmgr_head import SDMGRHead
__all__ = ['SDMGRHead']
| 21.4
| 47
| 0.757009
|
9f4b1b27cbe0812616d33e5409a8fbd7119a5b66
| 7,105
|
py
|
Python
|
Method/reproduce_results/celoe_clp/reproduce_learning_concepts_with_length_predictor_semantic_bible_kb.py
|
dice-group/LearnALCLengths
|
cb019ba234092a323f3785517d1cc6152a5ef7a4
|
[
"MIT"
] | null | null | null |
Method/reproduce_results/celoe_clp/reproduce_learning_concepts_with_length_predictor_semantic_bible_kb.py
|
dice-group/LearnALCLengths
|
cb019ba234092a323f3785517d1cc6152a5ef7a4
|
[
"MIT"
] | null | null | null |
Method/reproduce_results/celoe_clp/reproduce_learning_concepts_with_length_predictor_semantic_bible_kb.py
|
dice-group/LearnALCLengths
|
cb019ba234092a323f3785517d1cc6152a5ef7a4
|
[
"MIT"
] | null | null | null |
import sys, os, json
import numpy as np, random
base_path = os.path.dirname(os.path.realpath(__file__)).split('reproduce_results')[0]
sys.path.append(base_path)
from celoe_clp.fast_concept_learner import CELOECLP
from ontolearn import KnowledgeBase
from ontolearn.refinement_operators import ExpressRefinement
from ontolearn.metrics import F1, Accuracy
from ontolearn.heuristics import CELOEHeuristic
from ontolearn.learning_problem import PosNegLPStandard
from owlapy.model import OWLNamedIndividual
from owlapy.model._iri import IRI
from util.data import Data
data_path = base_path+"Datasets/semantic_bible/Train_data/Data.json"
with open(data_path, "r") as file:
data = json.load(file)
data = list(data.items())
with open(base_path+"Datasets/semantic_bible/Learning_problems/learning_problems.json", "r") as file_lp:
learning_problems = json.load(file_lp)
path_to_triples = base_path+"Datasets/semantic_bible/Triples/"
triples = Data({"path_to_triples":path_to_triples})
as_classification = True
kb_path = base_path+"Datasets/semantic_bible/semantic_bible.owl"
kb = KnowledgeBase(path=kb_path)
rho = ExpressRefinement(kb)
prefix = list(kb.individuals())[0].get_iri().as_str()
prefix = prefix[:prefix.rfind("/")+1]
num_classes = max(v["target concept length"] for _,v in data)+1 if as_classification else 1
import argparse
if __name__=='__main__':
def str2bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ['t', 'true', 'y', 'yes', '1']:
return True
elif v.lower() in ['f', 'false', 'n', 'no', '0']:
return False
else:
raise ValueError('Ivalid boolean value.')
parser = argparse.ArgumentParser()
parser.add_argument('--max_exec_time', type=int, default=120, help="The maximum execution time of CELOE-CLP")
parser.add_argument('--max_num_lp', type=int, default=100, help="The maximum number of learning problems to solve")
parser.add_argument('--iter_bound', type=int, default=100, help="The maximum number of search steps")
parser.add_argument('--max_num_of_concepts_tested', type=int, default=30000, help="The maximum number of concepts to test during search process")
parser.add_argument('--terminate_on_goal', type=str2bool, const=True, default=True, nargs='?', help="Whether to terminate on goal")
parser.add_argument('--best_only', type=str2bool, const=True, default=True, nargs='?', help="Whether to pick the best node at each step of the search")
parser.add_argument('--calculate_min_max', type=str2bool, const=True, default=True, nargs='?', help="Whether to calculate min max horizontal expansion")
parser.add_argument('--max_results', type=int, default=10, help="The maximum number of nodes in the queue at each step of the search process")
args = parser.parse_args()
kwargs = {'knowledge_base': kb,
'learning_problem': None,
'refinement_operator': rho,
'quality_func': PosNegLPStandard,
'heuristic_func': CELOEHeuristic(),
'terminate_on_goal': args.terminate_on_goal,
'iter_bound': args.iter_bound,
'max_num_of_concepts_tested': args.max_num_of_concepts_tested,
'max_runtime': args.max_exec_time,
'max_results': args.max_results,
'best_only': args.best_only,
'calculate_min_max': args.calculate_min_max,
"learner_name":"GRU", "emb_model_name":"", "pretrained_embedding_path":base_path+"Datasets/semantic_bible/Model_weights/ConEx_GRU.pt",
"pretrained_length_learner":base_path+"Datasets/semantic_bible/Model_weights/GRU.pt",
"path_to_csv_embeddings":base_path+"Embeddings/semantic_bible/ConEx_entity_embeddings.csv",
"learning_rate":0.003, "decay_rate":0, "path_to_triples":path_to_triples,
"random_seed":1, "embedding_dim":20, "num_entities":len(triples.entities),
"num_relations":len(triples.relations), "num_ex":362, "input_dropout":0.0,
"kernel_size":4, "num_of_output_channels":8, "feature_map_dropout":0.1,
"hidden_dropout":0.1, "rnn_n_layers":2,'rnn_hidden':100, 'input_size':41,
'linear_hidden':200, 'out_size':num_classes, 'dropout_prob': 0.1, 'num_units':500,
'seed':10, 'seq_len':362,'kernel_w':5, 'kernel_h':11, 'stride_w':1, 'stride_h':7,
'conv_out':680, 'mlp_n_layers':4, "as_classification":as_classification
}
algo = CELOECLP(kwargs)
results = {}
count = 0
learning_problems = list(learning_problems.items())
random.seed(1)
random.shuffle(learning_problems)
print("#"*50)
print("On {} KG".format(data_path.split("/")[-3]))
print("#"*50)
n_probs = args.max_num_lp
for target_str, value in learning_problems:
count += 1
pos = value['positive examples']
neg = value['negative examples']
pos = set(map(OWLNamedIndividual, map(IRI.create, map(lambda x: prefix+x, pos))))
neg = set(map(OWLNamedIndividual, map(IRI.create, map(lambda x: prefix+x, neg))))
lps = PosNegLPStandard(kb, pos, neg)
Acc = Accuracy(lps) # Added to compute accuracy of the solution found
algo.lp = lps
algo.quality_func = F1(lps)
algo.clp.load_pretrained()
predicted_length = algo.clp.predict(pos, neg)
algo.operator.max_child_length = predicted_length
algo.clean()
algo.fit()
celoe_clp_results = algo.result_dict(target_str)
solution = celoe_clp_results.pop('Prediction-Obj')
for key in celoe_clp_results:
results.setdefault(key, []).append(celoe_clp_results[key])
_, acc = Acc.score(kb.individuals_set(solution))
results.setdefault('Accuracy', []).append(acc)
results.setdefault('Pred-Length', []).append(predicted_length)
if count == n_probs:
break
avg_results = {}
for key in results:
if not key in ["Learned Concept", "Prediction"]:
avg_results.setdefault(key, {}).update({"mean": np.mean(results[key]), "std": np.std(results[key])})
with open(base_path+"Datasets/semantic_bible/Results/concept_learning_results_celoe_clp.json", "w") as results_file:
json.dump(results, results_file, ensure_ascii=False, indent=3)
with open(base_path+"Datasets/semantic_bible/Results/concept_learning_avg_results_celoe_clp.json", "w") as avg_results_file:
json.dump(avg_results, avg_results_file, indent=3)
print()
print("Avg results: ", avg_results)
print()
| 47.366667
| 156
| 0.638424
|
fcba89e1631a6473b4440b285033bca0270cac21
| 73
|
py
|
Python
|
src/hdp/__init__.py
|
datadiarist/hplda
|
a81cf84ea76487e716641bb6dfbf36f18ceac91e
|
[
"MIT"
] | null | null | null |
src/hdp/__init__.py
|
datadiarist/hplda
|
a81cf84ea76487e716641bb6dfbf36f18ceac91e
|
[
"MIT"
] | 1
|
2021-09-24T04:30:51.000Z
|
2021-09-24T04:30:51.000Z
|
src/hdp/__init__.py
|
datadiarist/hplda
|
a81cf84ea76487e716641bb6dfbf36f18ceac91e
|
[
"MIT"
] | 1
|
2020-05-01T03:59:42.000Z
|
2020-05-01T03:59:42.000Z
|
from hdp import HDP
from hdp import text_prep
from hdp import perplexity
| 18.25
| 26
| 0.835616
|
636ac648eeb7b038679df3ae03a1b81787a665e0
| 11,929
|
py
|
Python
|
apps/kemures/metrics/MRR/mrr_overview.py
|
DiegoCorrea/ouvidoMusical
|
e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5
|
[
"MIT"
] | 1
|
2021-10-06T19:35:48.000Z
|
2021-10-06T19:35:48.000Z
|
apps/kemures/metrics/MRR/mrr_overview.py
|
DiegoCorrea/ouvido_musical-Back
|
e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5
|
[
"MIT"
] | null | null | null |
apps/kemures/metrics/MRR/mrr_overview.py
|
DiegoCorrea/ouvido_musical-Back
|
e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import os
import matplotlib.pyplot as plt
import pandas as pd
from apps.kemures.kernel.config.global_var import MRR_PATH_GRAPHICS, GRAPH_MARKERS, GRAPH_STYLE, GRAPH_COLORS
from apps.kemures.kernel.round.models import Round
from apps.kemures.metrics.MRR.DAO.models import MRR
from apps.kemures.metrics.MRR.runtime.models import MRRRunTime
class MRROverview:
def __init__(self, directory_to_save_graphics=MRR_PATH_GRAPHICS):
self.__logger = logging.getLogger(__name__)
self.__directory_to_save_graphics = str(directory_to_save_graphics)
if not os.path.exists(self.__directory_to_save_graphics):
os.makedirs(self.__directory_to_save_graphics)
rounds_df = pd.DataFrame.from_records(list(Round.objects.all().values()))
rounds_df = rounds_df.drop(columns=['finished_at', 'started_at'])
metric_df = pd.DataFrame.from_records(list(MRR.objects.all().values()))
metric_run_time_df = pd.DataFrame.from_records(list(MRRRunTime.objects.all().values()))
self.__metadata_to_process = rounds_df['metadata_used'].unique().tolist()
self.__song_set_size_list = rounds_df['song_set_size'].unique().tolist().sort()
self.__user_set_size_list = rounds_df['user_set_size'].unique().tolist().sort()
self.__at_size_list = metric_df['at'].unique().tolist()
self.__graph_style = GRAPH_STYLE[:len(self.__metadata_to_process)]
self.__graph_makers = GRAPH_MARKERS[:len(self.__metadata_to_process)]
self.__graph_colors = GRAPH_COLORS[:len(self.__metadata_to_process)]
self.__metric_results_collection_df = metric_df.copy()
self.__metric_results_collection_df = self.__metric_results_collection_df.join(
metric_run_time_df.set_index('id_id'), on='id')
self.__metric_results_collection_df = self.__metric_results_collection_df.join(rounds_df.set_index('id'),
on='round_id')
def make_time_graphics(self):
self.__all_time_graph_line()
self.__all_time_graph_box_plot()
def __all_time_graph_line(self):
self.__logger.info("[Start MRR Overview - Run Time - (Graph Line)]")
for at in self.__at_size_list:
plt.figure(figsize=(8, 6))
plt.grid(True)
plt.xlabel('Round', fontsize=18)
plt.ylabel('Time (seconds)', fontsize=18)
for size in self.__song_set_size_list:
runs_size_at_df = self.__metric_results_collection_df[
(self.__metric_results_collection_df['song_set_size'] == size) & (
self.__metric_results_collection_df['at'] == at)]
values = [(finished - start).total_seconds() for (finished, start) in
zip(runs_size_at_df['finished_at'], runs_size_at_df['started_at'])]
plt.plot(
[int(i + 1) for i in range(len(values))],
[value for value in values],
label=size
)
plt.legend(loc='best', prop={'size': 20})
plt.savefig(
self.__directory_to_save_graphics
+ 'mrr_all_time_graph_line_'
+ str(at)
+ '.eps',
format='eps',
dpi=300
)
plt.close()
self.__logger.info("[Finish MRR Overview - Run Time - (Graph Line)]")
def __all_time_graph_box_plot(self):
self.__logger.info("[Start MRR Overview - Run Time - (Graph Box Plot)]")
for at in self.__at_size_list:
plt.figure(figsize=(8, 8))
plt.grid(True)
plt.xlabel('Length of song set', fontsize=18)
plt.ylabel('Time (seconds)', fontsize=18)
box_plot_matrix = []
for size in self.__song_set_size_list:
runs_size_at_df = self.__metric_results_collection_df[
(self.__metric_results_collection_df['song_set_size'] == size) & (
self.__metric_results_collection_df['at'] == at)]
box_plot_matrix.append([(finished - start).total_seconds() for (finished, start) in
zip(runs_size_at_df['finished_at'], runs_size_at_df['started_at'])])
plt.boxplot(
box_plot_matrix,
labels=self.__song_set_size_list
)
plt.xticks(rotation=30)
plt.savefig(
self.__directory_to_save_graphics
+ 'mrr_all_time_graph_box_plot_'
+ str(at)
+ '.eps',
format='eps',
dpi=300
)
plt.close()
self.__logger.info("[Finish MRR Overview - Run Time - (Graph Box Plot)]")
def make_results_graphics(self):
self.__all_results_graph_line()
self.__all_results_graph_box_plot()
def __all_results_graph_line(self):
self.__logger.info("[Start MRR Overview - Results - (Graph Line)]")
for at in self.__at_size_list:
plt.figure(figsize=(8, 6))
plt.grid(True)
plt.xlabel('Round', fontsize=18)
plt.ylabel('Value', fontsize=18)
for size in self.__song_set_size_list:
runs_size_at_df = self.__metric_results_collection_df[
(self.__metric_results_collection_df['song_set_size'] == size) & (
self.__metric_results_collection_df['at'] == at)]
values = [value for value in runs_size_at_df['value'].tolist()]
plt.plot(
[int(i + 1) for i in range(len(values))],
[value for value in values],
label=size
)
plt.legend(loc='best', prop={'size': 20})
plt.savefig(
self.__directory_to_save_graphics
+ 'mrr_all_results_graph_line_'
+ str(at)
+ '.eps',
format='eps',
dpi=300
)
plt.close()
self.__logger.info("[Finish MRR Overview - Results - (Graph Line)]")
def __all_results_graph_box_plot(self):
self.__logger.info("[Start MRR Overview - Results - (Graph Box Plot)]")
for at in self.__at_size_list:
plt.figure(figsize=(8, 8))
plt.grid(True)
plt.xlabel('Length of song set', fontsize=18)
plt.ylabel('Value', fontsize=18)
box_plot_matrix = []
for size in self.__song_set_size_list:
runs_size_at_df = self.__metric_results_collection_df[
(self.__metric_results_collection_df['song_set_size'] == size) & (
self.__metric_results_collection_df['at'] == at)]
box_plot_matrix.append([value for value in runs_size_at_df['value'].tolist()])
plt.boxplot(
box_plot_matrix,
labels=self.__song_set_size_list
)
plt.xticks(rotation=30)
plt.savefig(
self.__directory_to_save_graphics
+ 'mrr_all_results_graph_box_plot_'
+ str(at)
+ '.eps',
format='eps',
dpi=300
)
plt.close()
self.__logger.info("[Finish MRR Overview - Results - (Graph Box Plot)]")
def make_graphics_by_metadata(self):
self.__by_metadata_results_graph_line()
self.__by_metadata_results_graph_box_plot()
self.__save_csv()
def __by_metadata_results_graph_line(self):
self.__logger.info("[Start MRR Overview - Results - (Graph Line)]")
for song_size in self.__metric_results_collection_df['song_set_size'].unique().tolist():
for user_size in self.__metric_results_collection_df['user_set_size'].unique().tolist():
plt.figure(figsize=(8, 6))
plt.grid(True)
plt.xlabel('Length of recommendation list', fontsize=18)
plt.ylabel('Value', fontsize=18)
for metadata, style, colors, makers in zip(self.__metadata_to_process, self.__graph_style,
self.__graph_colors, self.__graph_makers):
at_df = self.__metric_results_collection_df[
(self.__metric_results_collection_df['metadata_used'] == metadata) &
(self.__metric_results_collection_df['song_set_size'] == song_size) &
(self.__metric_results_collection_df['user_set_size'] == user_size)]
at_df.sort_values("at")
plt.plot(
at_df['at'],
at_df['value'],
linestyle=style,
color=colors,
marker=makers,
label=metadata
)
# plt.legend(loc='best')
lgd = plt.legend(loc=9, prop={'size': 20}, bbox_to_anchor=(0.5, -0.1), ncol=3)
plt.xticks(self.__at_size_list)
plt.savefig(
self.__directory_to_save_graphics
+ 'mrr_by_metadata_results_graph_line_'
+ 'song_' + str(song_size)
+ '_user_' + str(user_size)
+ '.png',
format='png',
dpi=300,
quality=100,
bbox_extra_artists=(lgd,),
bbox_inches='tight'
)
plt.close()
self.__logger.info("[Finish MRR Overview - Results - (Graph Line)]")
def __by_metadata_results_graph_box_plot(self):
self.__logger.info("[Start MRR Overview - Results - (Graph Box Plot)]")
for song_size in self.__metric_results_collection_df['song_set_size'].unique().tolist():
for user_size in self.__metric_results_collection_df['user_set_size'].unique().tolist():
box_plot_matrix = []
for metadata in self.__metadata_to_process:
at_df = self.__metric_results_collection_df[
(self.__metric_results_collection_df['metadata_used'] == metadata) &
(self.__metric_results_collection_df['song_set_size'] == song_size) &
(self.__metric_results_collection_df['user_set_size'] == user_size)]
box_plot_matrix.append([value for value in at_df['value'].tolist()])
if len(box_plot_matrix[0]) == 0:
continue
plt.figure(figsize=(8, 8))
plt.grid(True)
plt.xlabel('Metadata', fontsize=18)
plt.ylabel('Value', fontsize=18)
bp = plt.boxplot(
box_plot_matrix,
labels=self.__metadata_to_process,
showfliers=True
)
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
plt.xticks(rotation=30)
plt.savefig(
self.__directory_to_save_graphics
+ 'mrr_by_metadata_results_graph_box_plot_'
+ 'song_' + str(song_size)
+ '_user_' + str(user_size)
+ '.png',
format='png',
dpi=300,
quality=100
)
plt.close()
self.__logger.info("[Finish MRR Overview - Results - (Graph Box Plot)]")
def __save_csv(self):
self.__metric_results_collection_df.to_csv(self.__directory_to_save_graphics + 'MRR.csv')
| 47.907631
| 113
| 0.5578
|
ff69bdb3684a92134682620d0a77b6a4d145ef17
| 4,656
|
py
|
Python
|
src/pki_util.py
|
AshirwadPradhan/intragit
|
e2baf2ed90665750b069b01d554d7a2e0e1de413
|
[
"MIT"
] | null | null | null |
src/pki_util.py
|
AshirwadPradhan/intragit
|
e2baf2ed90665750b069b01d554d7a2e0e1de413
|
[
"MIT"
] | null | null | null |
src/pki_util.py
|
AshirwadPradhan/intragit
|
e2baf2ed90665750b069b01d554d7a2e0e1de413
|
[
"MIT"
] | 1
|
2020-03-29T11:59:46.000Z
|
2020-03-29T11:59:46.000Z
|
# Utility Module
# Provides following helper methods for PKI
# 1. Generate private key
# 2. Generate X509 certificate for the self-signing CA (here the organization is the CA)
# 3. Generate X509 for the server hosted by the organization
# 4. Sign the CSR of the server
#Inspired from RealPython blog
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from datetime import datetime, timedelta
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
def generate_private_key(filename:str, passphare:str):
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
utf8_pass = passphare.encode('utf-8')
algo = serialization.BestAvailableEncryption(utf8_pass)
with open(filename, 'wb') as keyfile:
keyfile.write(private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=algo))
return private_key
def generate_public_key(private_key, filename:str, **kwargs):
# Self-Signing CA
subject = x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, kwargs['country']),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, kwargs['state']),
x509.NameAttribute(NameOID.LOCALITY_NAME, kwargs['locality']),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, kwargs['org']),
x509.NameAttribute(NameOID.COMMON_NAME, kwargs['hostname'])])
# Issuer and subject is same becouse they are self-signed CA
issuer = subject
valid_from = datetime.utcnow() - timedelta(days=2)
valid_to = valid_from + timedelta(days=365)
builder = (x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(private_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(valid_from)
.not_valid_after(valid_to))
builder = builder.add_extension(x509.BasicConstraints(ca=True, path_length=1), critical=True)
public_key = builder.sign(private_key, hashes.SHA256(), default_backend())
with open(filename, 'wb') as certfile:
certfile.write(public_key.public_bytes(serialization.Encoding.PEM))
return public_key
def generate_csr(private_key, filename, **kwargs):
# Generate the CSR for the server
subject = x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, kwargs['country']),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, kwargs['state']),
x509.NameAttribute(NameOID.LOCALITY_NAME, kwargs['locality']),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, kwargs['org']),
x509.NameAttribute(NameOID.COMMON_NAME, kwargs['hostname'])])
alt_names = []
for name in kwargs.get('alt_names', []):
alt_names.append(x509.DNSName(name))
san = x509.SubjectAlternativeName(alt_names)
builder = (x509.CertificateSigningRequestBuilder()
.subject_name(subject)
.add_extension(san, critical=False))
csr = builder.sign(private_key, hashes.SHA256(), default_backend())
with open(filename, 'wb') as csrfile:
csrfile.write(csr.public_bytes(serialization.Encoding.PEM))
return csr
def sign_csr(csr, ca_public_key, ca_private_key, filename):
#Sign the csr for the server by CA
valid_from = datetime.utcnow() - timedelta(days=1)
valid_to = valid_from + timedelta(days=60)
builder = (x509.CertificateBuilder()
.subject_name(csr.subject)
.issuer_name(ca_public_key.subject)
.public_key(csr.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(valid_from)
.not_valid_after(valid_to))
for extension in csr.extensions:
builder = builder.add_extension(extension.value, extension.critical)
public_key = builder.sign(private_key=ca_private_key,
algorithm=hashes.SHA256(),
backend=default_backend())
with open(filename, 'wb') as keyfile:
keyfile.write(public_key.public_bytes(serialization.Encoding.PEM))
| 43.514019
| 107
| 0.661297
|
e6e2cc86a51f713e49420b7c63a95ef0cd29e205
| 8,711
|
py
|
Python
|
utils/extract_features_singlegpu.py
|
1219521375/bottom-up-attention.pytorch
|
4a2e64383f024cc56728dd2a0ee63c8a171663c8
|
[
"Apache-2.0"
] | 3
|
2022-02-18T13:38:47.000Z
|
2022-03-30T11:30:35.000Z
|
utils/extract_features_singlegpu.py
|
1219521375/bottom-up-attention.pytorch
|
4a2e64383f024cc56728dd2a0ee63c8a171663c8
|
[
"Apache-2.0"
] | null | null | null |
utils/extract_features_singlegpu.py
|
1219521375/bottom-up-attention.pytorch
|
4a2e64383f024cc56728dd2a0ee63c8a171663c8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# pylint: disable=no-member
"""
TridentNet Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import argparse
import os
import sys
import torch
# import tqdm
import cv2
import numpy as np
sys.path.append('detectron2')
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from detectron2.structures import Instances
from utils.utils import mkdir, save_features
from utils.extract_utils import get_image_blob, save_bbox, save_roi_features_by_bbox, save_roi_features
from utils.progress_bar import ProgressBar
from bua import add_config
from bua.caffe.modeling.box_regression import BUABoxes
from torch.nn import functional as F
from detectron2.modeling import postprocessing
def switch_extract_mode(mode):
if mode == 'roi_feats':
switch_cmd = ['MODEL.BUA.EXTRACTOR.MODE', 1]
elif mode == 'bboxes':
switch_cmd = ['MODEL.BUA.EXTRACTOR.MODE', 2]
elif mode == 'bbox_feats':
switch_cmd = ['MODEL.BUA.EXTRACTOR.MODE', 3, 'MODEL.PROPOSAL_GENERATOR.NAME', 'PrecomputedProposals']
else:
print('Wrong extract mode! ')
exit()
return switch_cmd
def set_min_max_boxes(min_max_boxes):
if min_max_boxes == 'min_max_default':
return []
try:
min_boxes = int(min_max_boxes.split(',')[0])
max_boxes = int(min_max_boxes.split(',')[1])
except:
print('Illegal min-max boxes setting, using config default. ')
return []
cmd = ['MODEL.BUA.EXTRACTOR.MIN_BOXES', min_boxes,
'MODEL.BUA.EXTRACTOR.MAX_BOXES', max_boxes]
return cmd
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.merge_from_list(['MODEL.BUA.EXTRACT_FEATS',True])
cfg.merge_from_list(switch_extract_mode(args.extract_mode))
cfg.merge_from_list(set_min_max_boxes(args.min_max_boxes))
cfg.freeze()
default_setup(cfg, args)
return cfg
def generate_npz(extract_mode, *args):
if extract_mode == 1:
save_roi_features(*args)
elif extract_mode == 2:
save_bbox(*args)
elif extract_mode == 3:
save_roi_features_by_bbox(*args)
else:
print('Invalid Extract Mode! ')
def extract_feat_singlegpu(split_idx, img_list, cfg, args):
num_images = len(img_list)
print('Number of images on split{}: {}.'.format(split_idx, num_images))
model = DefaultTrainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
model.eval()
for im_file in (img_list):
if os.path.exists(os.path.join(args.output_dir, im_file.split('.')[0]+'.npz')):
continue
im = cv2.imread(os.path.join(args.image_dir, im_file))
if im is None:
print(os.path.join(args.image_dir, im_file), "is illegal!")
continue
dataset_dict = get_image_blob(im, cfg.MODEL.PIXEL_MEAN)
# extract roi features
if cfg.MODEL.BUA.EXTRACTOR.MODE == 1:
attr_scores = None
with torch.set_grad_enabled(False):
if cfg.MODEL.BUA.ATTRIBUTE_ON:
boxes, scores, features_pooled, attr_scores = model([dataset_dict]) # caffe mode
else:
boxes, scores, features_pooled = model([dataset_dict])
boxes = [box.tensor.cpu() for box in boxes]
scores = [score.cpu() for score in scores]
features_pooled = [feat.cpu() for feat in features_pooled]
if not attr_scores is None:
attr_scores = [attr_score.cpu() for attr_score in attr_scores]
generate_npz(1,
args, cfg, im_file, im, dataset_dict,
boxes, scores, features_pooled, attr_scores)
# extract bbox only
elif cfg.MODEL.BUA.EXTRACTOR.MODE == 2:
with torch.set_grad_enabled(False):
boxes, scores = model([dataset_dict])
boxes = [box.cpu() for box in boxes]
scores = [score.cpu() for score in scores]
generate_npz(2,
args, cfg, im_file, im, dataset_dict,
boxes, scores)
# extract roi features by bbox
elif cfg.MODEL.BUA.EXTRACTOR.MODE == 3:
if not os.path.exists(os.path.join(args.bbox_dir, im_file.split('.')[0]+'.npz')):
continue
bbox = torch.from_numpy(np.load(os.path.join(args.bbox_dir, im_file.split('.')[0]+'.npz'))['bbox']) * dataset_dict['im_scale']
proposals = Instances(dataset_dict['image'].shape[-2:])
proposals.proposal_boxes = BUABoxes(bbox)
dataset_dict['proposals'] = proposals
attr_scores = None
with torch.set_grad_enabled(False):
if cfg.MODEL.BUA.ATTRIBUTE_ON:
boxes, scores, features_pooled, attr_scores = model([dataset_dict])
else:
boxes, scores, features_pooled = model([dataset_dict])
boxes = [box.tensor.cpu() for box in boxes]
scores = [score.cpu() for score in scores]
features_pooled = [feat.cpu() for feat in features_pooled]
if not attr_scores is None:
attr_scores = [attr_score.data.cpu() for attr_score in attr_scores]
generate_npz(3,
args, cfg, im_file, im, dataset_dict,
boxes, scores, features_pooled, attr_scores)
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection2 Inference")
parser.add_argument(
"--config-file",
default="configs/caffe/test-caffe-r101.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument('--num-cpus', default=1, type=int,
help='number of cpus to use for ray, 0 means no limit')
parser.add_argument('--gpus', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument("--mode", default="caffe", type=str, help="'caffe' and 'd2' indicates \
'use caffe model' and 'use detectron2 model'respectively")
parser.add_argument('--extract-mode', default='roi_feats', type=str,
help="'roi_feats', 'bboxes' and 'bbox_feats' indicates \
'extract roi features directly', 'extract bboxes only' and \
'extract roi features with pre-computed bboxes' respectively")
parser.add_argument('--min-max-boxes', default='min_max_default', type=str,
help='the number of min-max boxes of extractor')
parser.add_argument('--out-dir', dest='output_dir',
help='output directory for features',
default="features")
parser.add_argument('--image-dir', dest='image_dir',
help='directory with images',
default="image")
parser.add_argument('--bbox-dir', dest='bbox_dir',
help='directory with bbox',
default="bbox")
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = setup(args)
extract_feat_singlegpu_start(args,cfg)
def extract_feat_singlegpu_start(args,cfg):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
num_gpus = len(args.gpu_id.split(','))
MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES
MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES
CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH
# Extract features.
imglist = os.listdir(args.image_dir)
num_images = len(imglist)
print('Number of images: {}.'.format(num_images))
img_lists = [imglist[i::num_gpus] for i in range(num_gpus)]
print('Number of GPUs: {}.'.format(num_gpus))
for i in range(num_gpus):
extract_feat_singlegpu(i, img_lists[i], cfg, args)
if __name__ == "__main__":
main()
| 38.715556
| 138
| 0.635404
|
c31ac027e46aa014476765248ee3f430b4974162
| 3,449
|
py
|
Python
|
src/ml/trainer.py
|
AaronGrainer/gpt2-twitter-kubernetes
|
a33feb86231950dde5ee943bf5cfaf3915a23db2
|
[
"Apache-2.0"
] | 3
|
2020-05-06T19:52:49.000Z
|
2020-08-16T18:51:52.000Z
|
src/ml/trainer.py
|
AaronGrainer/gpt2-twitter-kubernetes
|
a33feb86231950dde5ee943bf5cfaf3915a23db2
|
[
"Apache-2.0"
] | 19
|
2020-05-05T17:41:37.000Z
|
2021-05-27T14:22:58.000Z
|
src/ml/trainer.py
|
AaronGrainer/gpt2-twitter-kubernetes
|
a33feb86231950dde5ee943bf5cfaf3915a23db2
|
[
"Apache-2.0"
] | null | null | null |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextDataset, DataCollatorForLanguageModeling
from transformers import TrainingArguments, Trainer
import torch
from src.config import global_config as gc
import os
import fire
from typing import List
import wandb
class GPT2Trainer:
def __init__(self, model_checkpoint: str, dataset: List):
"""Load class configs
Args:
model_checkpoint (str): Huggingface pretrained model name
dataset (List): Train dataset
"""
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
self.model_checkpoint = model_checkpoint
self.dataset = dataset
self.checkpoint_path = gc.gpt2_checkpoint_path
self.model_path = os.path.join(gc.gpt2_checkpoint_path, gc.gpt2_model_path)
# self.lr = 2e-5
self.batch_size = 16
self.epochs = 50
self.weight_decay = 0.01
self._load()
def _load(self):
"""Load Tokenizer, Dataset, Model and Jit input example
"""
self.tokenizer = AutoTokenizer.from_pretrained(self.model_checkpoint)
self.data_collator = DataCollatorForLanguageModeling(
tokenizer=self.tokenizer, mlm=False,
)
self.gpt2_dataset = dict(
train=TextDataset(tokenizer=self.tokenizer,
file_path=self.dataset,
block_size=128),
validation=TextDataset(tokenizer=self.tokenizer,
file_path=self.dataset,
block_size=128)
)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_checkpoint
)
def train_and_evaluate(self, run_name: str = None):
"""Train and model and evaluate
Args:
run_name (str, optional): Name of the Wandb run. Defaults to None.
"""
wandb.init(project='gpt2-twitter',
name=run_name)
training_args = TrainingArguments(
'ml/checkpoint/gpt2-twitter-trainer',
overwrite_output_dir=True,
evaluation_strategy='epoch',
# learning_rate=self.lr,
per_device_train_batch_size=self.batch_size,
per_device_eval_batch_size=self.batch_size,
num_train_epochs=self.epochs,
weight_decay=self.weight_decay,
logging_steps=50,
eval_steps=400,
save_steps=800,
save_total_limit=1,
warmup_steps=500
)
trainer = Trainer(
model=self.model,
args=training_args,
data_collator=self.data_collator,
train_dataset=self.gpt2_dataset['train'],
eval_dataset=self.gpt2_dataset['validation']
)
trainer.train()
wandb.finish()
def save(self):
"""Save model
"""
print('Saving model')
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
self.model.save_pretrained(self.model_path)
def main(dataset='data/karpathy_tweets.txt'):
run_name = 'gpt2-twitter'
model_checkpoint = 'gpt2'
gpt2_trainer = GPT2Trainer(model_checkpoint=model_checkpoint, dataset=dataset)
gpt2_trainer.train_and_evaluate(run_name=run_name)
gpt2_trainer.save()
if __name__ == '__main__':
fire.Fire(main)
| 29.732759
| 106
| 0.615251
|
2f63e58fa656b9c7c26b872312b0c11f73267951
| 1,273
|
py
|
Python
|
amaascore/assets/listed_cfd.py
|
amaas-fintech/amaas-core-sdk-python
|
bd77884de6e5ab05d864638addeb4bb338a51183
|
[
"Apache-2.0"
] | null | null | null |
amaascore/assets/listed_cfd.py
|
amaas-fintech/amaas-core-sdk-python
|
bd77884de6e5ab05d864638addeb4bb338a51183
|
[
"Apache-2.0"
] | 8
|
2017-06-06T09:42:41.000Z
|
2018-01-16T10:16:16.000Z
|
amaascore/assets/listed_cfd.py
|
amaas-fintech/amaas-core-sdk-python
|
bd77884de6e5ab05d864638addeb4bb338a51183
|
[
"Apache-2.0"
] | 8
|
2017-01-18T04:14:01.000Z
|
2017-12-01T08:03:10.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, date
from dateutil import parser
from amaascore.assets.listed_derivative import ListedDerivative
class ListedContractForDifference(ListedDerivative):
def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None, asset_status='Active', display_name='',
description='', country_id=None, venue_id=None, currency=None, issue_date=date.min,
links=None, references=None,
*args, **kwargs):
super(ListedContractForDifference, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id,
asset_issuer_id=asset_issuer_id,
asset_status=asset_status, display_name=display_name,
description=description,
country_id=country_id, venue_id=venue_id,
issue_date=issue_date, currency=currency,
links=links, references=references, *args, **kwargs)
| 57.863636
| 112
| 0.568735
|
b562e47b86ee7f6312b379e551512ebe549ca07b
| 17,351
|
py
|
Python
|
mindquantum/core/operators/polynomial_tensor.py
|
Takishima/mindquantum
|
e90dfe474b759023d7ae18281b9a87cb8d223d04
|
[
"Apache-2.0"
] | null | null | null |
mindquantum/core/operators/polynomial_tensor.py
|
Takishima/mindquantum
|
e90dfe474b759023d7ae18281b9a87cb8d223d04
|
[
"Apache-2.0"
] | null | null | null |
mindquantum/core/operators/polynomial_tensor.py
|
Takishima/mindquantum
|
e90dfe474b759023d7ae18281b9a87cb8d223d04
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Portions Copyright (c) 2020 Huawei Technologies Co.,ltd.
# Portions Copyright 2017 The OpenFermion Developers.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module we develop is default being licensed under Apache 2.0 license,
# and also uses or refactor Fermilib and OpenFermion licensed under
# Apache 2.0 license.
"""This is the base class that to represent fermionic molecualr or Hamiltonian."""
import copy
import itertools
import numpy
EQ_TOLERANCE = 1e-8
class PolynomialTensorError(Exception):
r"""Exception raised in methods from the PolynomialTensor class."""
class PolynomialTensor:
r"""
Class to store the coefficient of the fermionic ladder operators in a tensor form.
For instance, in a molecular Hamiltonian (degree 4 polynomial) which
conserves particle number, there are only three kinds of terms,
namely constant term, single excitation :math:`a^\dagger_p a_q` and
double excitation terms :math:`a^\dagger_p a^\dagger_q a_r a_s`,
and their corresponding coefficients can be stored in an scalar,
:math:`n_\text{qubits}\times n_\text{qubits}` matrix and
:math:`n_\text{qubits}\times n_\text{qubits} n_\text{qubits}\times n_\text{qubits}` matrix.
Note that each tensor must have an even number of dimensions due to
the parity conservation.
Much of the functionality of this class is similar to that of
FermionOperator.
Args:
n_body_tensors(dict): A dictionary storing the tensors describing
n-body interactions. The keys are tuples that indicate the
type of tensor.
For instance, n_body_tensors[()] would return a constant,
while a n_body_tensors[(1, 0)] would be an
:math:`n_\text{qubits}\times n_\text{qubits}` numpy
array, and n_body_tensors[(1,1,0,0)]
would return a
:math:`n_\text{qubits}\times n_\text{qubits} n_\text{qubits}\times n_\text{qubits}`
numpy array
and those constant and array represent the coefficients of terms of
the form identity, :math:`a^\dagger_p a_q`,
:math:`a^\dagger_p a^\dagger_q a_r a_s`, respectively. Default: None.
Note:
Here '1' represents :math:`a^\dagger`, while '0' represent :math:`a`.
Examples:
>>> import numpy as np
>>> from mindquantum.core.operators import PolynomialTensor
>>> constant = 1
>>> one_body_term = np.array([[1,0],[0,1]])
>>> two_body_term = two_body_term = np.array([[[[1,0],[0,1]],[[1,0],[0,1]]],[[[1,0],[0,1]],[[1,0],[0,1]]]])
>>> n_body_tensors = {(): 1, (1,0): one_body_term,(1,1,0,0):two_body_term}
>>> poly_op = PolynomialTensor(n_body_tensors)
>>> poly_op
() 1
((0, 1), (0, 0)) 1
((1, 1), (1, 0)) 1
((0, 1), (0, 1), (0, 0), (0, 0)) 1
((0, 1), (0, 1), (1, 0), (1, 0)) 1
((0, 1), (1, 1), (0, 0), (0, 0)) 1
((0, 1), (1, 1), (1, 0), (1, 0)) 1
((1, 1), (0, 1), (0, 0), (0, 0)) 1
((1, 1), (0, 1), (1, 0), (1, 0)) 1
((1, 1), (1, 1), (0, 0), (0, 0)) 1
((1, 1), (1, 1), (1, 0), (1, 0)) 1
>>> # get the constant
>>> poly_op.constant
1
>>> # set the constant
>>> poly_op.constant = 2
>>> poly_op.constant
2
>>> poly_op.n_qubits
2
>>> poly_op.one_body_tensor
array([[1, 0],
[0, 1]])
>>> poly_op.two_body_tensor
array([[[[1, 0],
[0, 1]],
[[1, 0],
[0, 1]]],
[[[1, 0],
[0, 1]],
[[1, 0],
[0, 1]]]])
"""
__hash__ = None
def __init__(self, n_body_tensors=None):
"""Initialize a PolynomialTensor object."""
self.n_body_tensors = n_body_tensors
self.n_qubits = 0
for key, _ in self.n_body_tensors.items():
if key == ():
pass
elif len(key) == 2 or len(key) == 4: # one body tensors
self.n_qubits = self.n_body_tensors[key].shape[0]
else:
PolynomialTensorError("Unexpected type of n-body-tensors!")
@property
def constant(self):
"""Get the value of the identity term."""
return self.n_body_tensors.get(())
@constant.setter
def constant(self, value):
"""Set the value of the identity term."""
self.n_body_tensors[()] = value
@property
def one_body_tensor(self):
"""Get the one-body term."""
if (1, 0) in self.n_body_tensors:
return self.n_body_tensors[(1, 0)]
return 0
@one_body_tensor.setter
def one_body_tensor(self, value):
"""
Set the value of the one body term.
The value should numpy array with size n_qubits x n_qubits.
"""
self.n_body_tensors[(1, 0)] = value
@property
def two_body_tensor(self):
"""Get the two body term."""
if (1, 1, 0, 0) in self.n_body_tensors:
return self.n_body_tensors[(1, 1, 0, 0)]
return 0
@two_body_tensor.setter
def two_body_tensor(self, value):
"""
Set the two body term.
The value should be of numpy array with size n_qubits x n_qubits x n_qubits x n_qubits.
"""
self.n_body_tensors[(1, 1, 0, 0)] = value
def __getitem__(self, args):
r"""
Look up the matrix table.
Args:
args(tuples): Tuples indicating which coefficient to get.
For instance,
`my_tensor[(3, 1), (4, 1), (2, 0)]` means look for the coefficient
of fermionic ladder operator (a^\dagger_3 a^\dagger_4 a_2 )
returns
`my_tensor.n_body_tensors[1, 1, 0][3, 4, 2]`
Note: this supports single element extraction
"""
if args == ():
return self.constant
# change it into array
index, key = tuple(zip(*args))[0], tuple(zip(*args))[1]
return self.n_body_tensors[key][index]
def __setitem__(self, args, value):
"""
Set matrix element.
Args:
args(tuples): Tuples indicating which terms to set the
corresponding coefficient.
"""
if args == ():
self.constant = value
else:
# handle with the case (1,0) or ((1,0),(2,1)) they both have the
# length 2
index, key = tuple(zip(*args))[0], tuple(zip(*args))[1]
self.n_body_tensors[key][index] = value
def __eq__(self, other):
"""Equality comparison operator."""
# first check qubits number
if self.n_qubits != other.n_qubits:
return False
# then check the maximum difference whether within the EQ_TOLERANCE
diff = 0.0
self_keys = set(self.n_body_tensors.keys())
other_keys = set(other.n_body_tensors.keys())
# check the intersection part
for key in self_keys.intersection(other_keys):
if key == () or key is not None:
self_tensor = self.n_body_tensors[key]
other_tensor = other.n_body_tensors[key]
discrepancy = numpy.amax(numpy.absolute(self_tensor - other_tensor))
diff = max(diff, discrepancy)
# check the difference part
for key in self_keys.symmetric_difference(other_keys):
if key == () or key is not None:
tensor = self.n_body_tensors[key] if self.n_body_tensors[key] is not None else other.n_body_tensors[key]
discrepancy = numpy.amax(numpy.abs(tensor))
diff = max(diff, discrepancy)
return diff < EQ_TOLERANCE
def __ne__(self, other):
"""Inequality comparison operator."""
return not self == other
def __iadd__(self, addend):
"""
In-place method for += addition of PolynomialTensor.
Args:
addend (PolynomialTensor): The addend.
Returns:
sum (PolynomialTensor), Mutated self.
Raises:
TypeError: Cannot add invalid addend type.
"""
if not isinstance(addend, type(self)):
raise PolynomialTensorError("Cannot add invalid type! \n Expect {}".format(type(self)))
# check dimension, self.n_qubits
if self.n_qubits != addend.n_qubits:
raise PolynomialTensorError("Can not add invalid type, the shape does not match!")
# add the common part
self_keys = set(self.n_body_tensors.keys())
addend_keys = set(addend.n_body_tensors.keys())
for key in self_keys.intersection(addend_keys):
self.n_body_tensors[key] = numpy.add(self.n_body_tensors[key], addend.n_body_tensors[key])
for key in addend_keys.difference(self_keys): # the term in added but not in self
if key:
self.n_body_tensors[key] = addend.n_body_tensors[key]
return self
def __add__(self, addend):
"""
Addition of PolynomialTensor.
Args:
added(PolynomialTensor): The addend.
Returns:
sum (PolynomialTensor), un-mutated self, but has new instance
Raises:
TypeError: Cannot add invalid operator type.
"""
sum_addend = copy.deepcopy(self)
sum_addend += addend
return sum_addend
def __neg__(self):
"""Return negation of the PolynomialTensor,mutated itself."""
for key in self.n_body_tensors:
self.n_body_tensors[key] = numpy.negative(self.n_body_tensors[key])
return self
def __isub__(self, subtractend):
"""
In-place method for -= subtraction of PolynomialTensor.
Args:
subtractend (PolynomialTensor): subtractend.
Returns:
subtract (PolynomialTensor), Mutated self.
Raises:
TypeError: Cannot sub invalid addend type.
"""
if not isinstance(subtractend, type(self)):
raise PolynomialTensorError("Cannot sub invalid type! \n Expect {}".format(type(self)))
# check dimension, self.n_qubits
if self.n_qubits != subtractend.n_qubits:
raise PolynomialTensorError("Cannot sub invalid type, the shape does not match!")
# sub the common part
self_keys = set(self.n_body_tensors.keys())
sub_keys = set(subtractend.n_body_tensors.keys())
for key in self_keys.intersection(sub_keys):
self.n_body_tensors[key] = numpy.subtract(self.n_body_tensors[key], subtractend.n_body_tensors[key])
for key in sub_keys.difference(self_keys): # the term in sub but not in self
if key:
self.n_body_tensors[key] = numpy.negative(subtractend.n_body_tensors[key])
return self
def __sub__(self, subtractend):
"""
Subtraction of PolynomialTensor.
Args:
subtractend(PolynomialTensor): The subtractend.
Returns:
subtractend (PolynomialTensor), un-mutated self,
but has new instance
Raises:
TypeError: Cannot sub invalid operator type.
"""
subend = copy.deepcopy(self)
subend -= subtractend
return subend
def __imul__(self, multiplier):
"""
In-place multiply (*=) with scalar or operator of the same type.
Default implementation is to multiply coefficients and concatenate terms (same operator).
Args:
multiplier(complex, float, or PolynomialTensor): multiplier
Returns:
products(PolynomialTensor)
Raises:
TypeError: cannot multiply invalid type of multiplier.
"""
# hand with scalar
if isinstance(multiplier, (int, float, complex)):
for key in self.n_body_tensors:
self.n_body_tensors[key] *= multiplier
elif isinstance(multiplier, type(self)):
if self.n_qubits != multiplier.n_qubits:
raise PolynomialTensorError("Cannot multiply invalid type, the shape does not match!")
# note we do not deal with the key multiplication,
# unlike that in FermionOperator, which is possible
self_keys = set(self.n_body_tensors.keys())
multiply_keys = set(multiplier.n_body_tensors.keys())
for key in self_keys.intersection(multiply_keys):
self.n_body_tensors[key] = numpy.multiply(self.n_body_tensors[key], multiplier.n_body_tensors[key])
for key in self_keys.difference(multiply_keys): # the term in added but not in self
if key == ():
self.constant = 0
else:
self.n_body_tensors[key] = numpy.zeros(self.n_body_tensors[key].shape)
else:
raise PolynomialTensorError("Cannot multiply invalid type!")
return self
def __mul__(self, multiplier):
"""
Multiplication for PolynomialTensor.
Args:
multiplier (PolynomialTensor): The multiplier to multiply.
Returns:
multiply (PolynomialTensor), un-Mutated self.
Raises:
TypeError: Cannot multiply invalid type.
"""
if isinstance(multiplier, (int, float, complex, type(self))):
# make use of the *= method
product_results = copy.deepcopy(self)
product_results *= multiplier
else:
raise PolynomialTensorError('Cannot multiply invalid type to {}.'.format(type(self)))
return product_results
def __rmul__(self, multiplier):
"""
Return multiplier * self.
Args:
multiplier: The operator to multiply.
Returns:
a new instance of PolynomialTensor
Raises:
TypeError: Cannot multiply invalid type.
"""
if isinstance(multiplier, (int, float, complex)): # make use of the * method, basically scalar
return self * multiplier
raise PolynomialTensorError('Cannot multiply invalid operator type to {}.'.format(type(self)))
def __itruediv__(self, divisor):
"""
Return self/divisor for the scalar.
Args:
divisor(int, float, complex): scalar
Returns:
a new instance of PolynomialTensor
Raises:
TypeError: cannot divide non-numeric type.
"""
if isinstance(divisor, (int, float, complex)) and divisor != 0:
for key in self.n_body_tensors:
self.n_body_tensors[key] /= divisor
else:
raise PolynomialTensorError(
'Cannot divide the {} by non_numeric type or \
the divisor is 0.'.format(
type(self)
)
)
return self
def __truediv__(self, divisor):
"""Implement division."""
if isinstance(divisor, (int, float, complex)) and divisor != 0:
quotient = copy.deepcopy(self)
quotient /= divisor
else:
raise PolynomialTensorError(
'Cannot divide the {} by non_numeric type or \
the divisor is 0.'.format(
type(self)
)
)
return quotient
# be careful with this function
def __div__(self, divisor):
"""For compatibility with Python 2."""
return self.__truediv__(divisor)
def __iter__(self):
"""Iterate over non-zero elements in the PolynomialTensor."""
def sort_key(key):
"""Determine how the keys to n_body_tensors should be sorted by mapping it to the corresponding integer."""
if key == ():
return 0
key_int = int(''.join(map(str, key)))
return key_int
for key in sorted(self.n_body_tensors, key=sort_key):
if key == ():
yield ()
else:
n_body_tensors = self.n_body_tensors[key] # get the matrix
# look up the non-zero elements in the n_body_tensors
for index in itertools.product(range(self.n_qubits), repeat=len(key)):
if n_body_tensors[index]:
yield tuple(zip(index, key))
def __str__(self):
"""Print out the non-zero elements of PolynomialTensor."""
strings = []
for key in self:
strings.append('{} {}\n'.format(key, self[key]))
return ''.join(strings) if strings else '0'
def __repr__(self):
"""Return a string representation of the object."""
return str(self)
__all__ = ['PolynomialTensor']
| 35.194726
| 120
| 0.583482
|
f4845c212a263b4fe077a05c14928e1e1a22ef96
| 2,734
|
py
|
Python
|
src/bert.py
|
geoffbacon/does-bert-agree
|
9ece52d01f30352a200ad841efb6162e7597f0e4
|
[
"MIT"
] | 2
|
2019-08-28T16:47:33.000Z
|
2022-02-25T06:47:07.000Z
|
src/bert.py
|
geoffbacon/does-bert-agree
|
9ece52d01f30352a200ad841efb6162e7597f0e4
|
[
"MIT"
] | null | null | null |
src/bert.py
|
geoffbacon/does-bert-agree
|
9ece52d01f30352a200ad841efb6162e7597f0e4
|
[
"MIT"
] | null | null | null |
"""Predict masked words using BERT.
The code in this module was heavily borrowed from Yoav Goldberg's code on
assessing BERT's syntactic abilities: https://github.com/yoavg/bert-syntax/
Thanks to Yoav for making his code available.
"""
import pandas as pd
import torch
from torch import LongTensor # pylint: disable=E0611
from transformers import BertForMaskedLM, BertTokenizer
from constants import MASK
START = ["[CLS]"]
END = ["[SEP]"]
class BERT:
"""High-level interface for getting word predictions from BERT."""
def __init__(self, name, gpu=False):
"""Initialize BERT instance.
Parameters
----------
name : str
Name of the pre-trained BERT model to use. In this project, either
'bert-base-multilingual-cased' or 'bert-base-cased'
cpu : bool
Whether to run on GPU or not (useful for debugging)
"""
self.model = BertForMaskedLM.from_pretrained(name)
self.gpu = gpu
if self.gpu:
self.model = self.model.cuda()
self.model = self.model.eval()
tokenizer = BertTokenizer.from_pretrained(name)
self.tokenize = tokenizer.tokenize
self.tokens_to_ids = tokenizer.convert_tokens_to_ids
self.ids_to_tokens = tokenizer.ids_to_tokens
# tokenizer.vocab is a collections.OrderedDict, not a regular Python
# dictionary, so its keys always come out in the same order.
self.vocab = list(tokenizer.vocab.keys())
self.index = pd.Index(self.vocab, name="word")
def predict(self, masked_sentence, fold_case=False):
"""Predict the masked word in `masked_sentence`.
Note that the output probability distribution is unnormalized.
Parameters
----------
masked_sentence : str
Sentence with one token masked out
fold_case : bool
Whether or not to average predictions over different casings.
Returns
-------
pd.DataFrame
The unnormalized probability distribution over BERT's vocab of
each word in the masked position.
"""
tokens = START + self.tokenize(masked_sentence) + END
target_index = tokens.index(MASK)
token_ids = self.tokens_to_ids(tokens)
tensor = LongTensor(token_ids).unsqueeze(0)
if self.gpu:
tensor = tensor.cuda()
probs = self.model(tensor)[0][0, target_index]
if self.gpu:
probs = probs.cpu()
probs = pd.DataFrame(probs.data.numpy(), index=self.index, columns=["p"])
if fold_case:
probs.index = probs.index.str.lower()
return probs.groupby("word").mean()
return probs
| 33.753086
| 81
| 0.634236
|
71ca61d6a00db381656778fff7754a31b2343ccb
| 530
|
py
|
Python
|
tests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py
|
periannath/ONE
|
61e0bdf2bcd0bc146faef42b85d469440e162886
|
[
"Apache-2.0"
] | 255
|
2020-05-22T07:45:29.000Z
|
2022-03-29T23:58:22.000Z
|
tests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py
|
periannath/ONE
|
61e0bdf2bcd0bc146faef42b85d469440e162886
|
[
"Apache-2.0"
] | 5,102
|
2020-05-22T07:48:33.000Z
|
2022-03-31T23:43:39.000Z
|
tests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py
|
periannath/ONE
|
61e0bdf2bcd0bc146faef42b85d469440e162886
|
[
"Apache-2.0"
] | 120
|
2020-05-22T07:51:08.000Z
|
2022-02-16T19:08:05.000Z
|
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{3}") # a vector of input
i2 = Input("op2", "TENSOR_FLOAT32", "{3}") # a vector of input
i3 = Output("op3", "TENSOR_BOOL8", "{3}") # a vector of output
model = model.Operation("NOT_EQUAL", i1, i2).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[2.0, 3.254232, 5.1232],
i2: # input 1
[2.0, 3.254111, 5.1232]}
output0 = {i3: # output 0
[False, True, False]}
# Instantiate an example
Example((input0, output0))
| 27.894737
| 62
| 0.586792
|
bcf5d64cd073b26e98f78c48cc39b90f76339503
| 1,554
|
py
|
Python
|
var/spack/repos/builtin/packages/intel-oneapi-inspector/package.py
|
BenWibking/spack
|
49b3b43a4a9375210b578635d9240875a5f3106b
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/intel-oneapi-inspector/package.py
|
BenWibking/spack
|
49b3b43a4a9375210b578635d9240875a5f3106b
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/intel-oneapi-inspector/package.py
|
flatironinstitute/spack
|
71a7b1b5fadbe16bcdb36fb679aa828cd7d83b02
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
from spack import *
class IntelOneapiInspector(IntelOneApiPackage):
"""Intel Inspector is a memory and thread checking and debugging tool to increase
the reliability, security, and accuracy of C/C++ and Fortran applications."""
maintainers = ['rscohn2']
homepage = 'https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/inspector.html'
if platform.system() == 'Linux':
version('2022.0.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18363/l_inspector_oneapi_p_2022.0.0.56_offline.sh',
sha256='79a0eb2ae3f1de1e3456076685680c468702922469c3fda3e074718fb0bea741',
expand=False)
version('2021.4.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18239/l_inspector_oneapi_p_2021.4.0.266_offline.sh',
sha256='c8210cbcd0e07cc75e773249a5e4a02cf34894ec80a213939f3a20e6c5705274',
expand=False)
version('2021.3.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17946/l_inspector_oneapi_p_2021.3.0.217_offline.sh',
sha256='1371ca74be2a6d4b069cdb3f8f2d6109abbc3261a81f437f0fe5412a7b659b43',
expand=False)
@property
def component_dir(self):
return 'inspector'
| 43.166667
| 133
| 0.704633
|
6934126eab65c646242a0d59ed59fefca05e743c
| 3,503
|
py
|
Python
|
src/urh/main.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | null | null | null |
src/urh/main.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | null | null | null |
src/urh/main.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import locale
import os
import re
import sys
import time
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QPalette, QIcon
from PyQt5.QtWidgets import QApplication, QWidget, QStyleFactory
locale.setlocale(locale.LC_ALL, '')
GENERATE_UI = True
def main():
t = time.time()
if GENERATE_UI and not hasattr(sys, 'frozen'):
try:
urh_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", ".."))
sys.path.append(urh_dir)
sys.path.append(os.path.join(urh_dir, "src"))
import generate_ui
generate_ui.gen()
print("Time for generating UI: %.2f seconds" % (time.time() - t), file=sys.stderr)
except (ImportError, FileNotFoundError):
print("Will not regenerate UI, because script cant be found. This is okay in "
"release.", file=sys.stderr)
urh_exe = sys.executable if hasattr(sys, 'frozen') else sys.argv[0]
urh_exe = os.readlink(urh_exe) if os.path.islink(urh_exe) else urh_exe
urh_dir = os.path.join(os.path.dirname(urh_exe), "..", "..")
prefix = os.path.abspath(os.path.normpath(urh_dir))
src_dir = os.path.join(prefix, "src")
if os.path.exists(src_dir) and not prefix.startswith("/usr") \
and not re.match(r"(?i)c:\\program", prefix):
# Started locally, not installed
print("Using modules from {0}".format(src_dir), file=sys.stderr)
sys.path.insert(0, src_dir)
try:
import urh.cythonext.signalFunctions
import urh.cythonext.path_creator
import urh.cythonext.util
except ImportError:
print("Could not find C++ extensions, trying to build them.", file=sys.stderr)
old_dir = os.curdir
os.chdir(os.path.join(src_dir, "urh", "cythonext"))
from urh.cythonext import build
build.main()
os.chdir(old_dir)
from urh.controller.MainController import MainController
from urh import constants
if constants.SETTINGS.value("use_fallback_theme", False, bool):
os.environ['QT_QPA_PLATFORMTHEME'] = 'fusion'
app = QApplication(sys.argv)
# noinspection PyUnresolvedReferences
import urh.ui.xtra_icons_rc # Use oxy theme always
QIcon.setThemeName("oxy")
constants.SETTINGS.setValue("default_theme", QApplication.style().objectName())
if constants.SETTINGS.value("use_fallback_theme", False, bool):
QApplication.setStyle(QStyleFactory.create("Fusion"))
main_window = MainController()
if sys.platform == "darwin":
menu_bar = main_window.menuBar()
menu_bar.setNativeMenuBar(False)
main_window.showMaximized()
# main_window.setFixedSize(1920, 1080 - 30) # Youtube
# use system colors for painting
widget = QWidget()
bgcolor = widget.palette().color(QPalette.Background)
fgcolor = widget.palette().color(QPalette.Foreground)
selection_color = widget.palette().color(QPalette.Highlight)
constants.BGCOLOR = bgcolor
constants.LINECOLOR = fgcolor
constants.SELECTION_COLOR = selection_color
constants.SEND_INDICATOR_COLOR = selection_color
if "autoclose" in sys.argv[1:]:
# Autoclose after 1 second, this is useful for automated testing
timer = QTimer()
timer.timeout.connect(app.quit)
timer.start(1000)
os._exit(app.exec_()) # sys.exit() is not enough on Windows and will result in crash on exit
if __name__ == "__main__":
main()
| 32.137615
| 97
| 0.66857
|
420026c9c960d5a23b97004701f4a5ca8f527209
| 1,595
|
py
|
Python
|
bohr/cli/dataset/commands.py
|
giganticode/bohr-framework
|
fd364a1f036123985ac96e9076e5dce3bbc2ca2c
|
[
"MIT"
] | null | null | null |
bohr/cli/dataset/commands.py
|
giganticode/bohr-framework
|
fd364a1f036123985ac96e9076e5dce3bbc2ca2c
|
[
"MIT"
] | 54
|
2021-02-17T13:36:51.000Z
|
2021-08-25T05:06:57.000Z
|
bohr/cli/dataset/commands.py
|
giganticode/bohr-framework
|
fd364a1f036123985ac96e9076e5dce3bbc2ca2c
|
[
"MIT"
] | null | null | null |
import logging
import textwrap
from pathlib import Path
from typing import Optional
import click
from tabulate import tabulate
from bohr import api
from bohr.datamodel.bohrrepo import load_bohr_repo
from bohr.util.logging import verbosity
logger = logging.getLogger(__name__)
@click.group()
def dataset():
pass
@dataset.command()
@click.option("-t", "--task", type=str)
@click.option("-a", "--extended-list", is_flag=True)
def ls(task: Optional[str], extended_list: bool) -> None:
bohr_repo = load_bohr_repo()
if task:
if task not in bohr_repo.tasks:
logger.error(
f"Task not found in the config: {task}. \n"
f"Defined tasks: {list(bohr_repo.tasks.keys())}"
)
exit(404)
datasets = bohr_repo.tasks[task].datasets
else:
datasets = bohr_repo.datasets
if extended_list:
print(
tabulate(
[
[dataset_name, textwrap.fill(dataset.description)]
for dataset_name, dataset in datasets.items()
],
tablefmt="fancy_grid",
)
)
else:
for dataset in datasets:
print(dataset)
@dataset.command()
@click.argument("path", type=str)
@click.option("-t", "--artifact", required=True)
@click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
def add(path: str, artifact: str, verbose: bool) -> None:
with verbosity(verbose):
dataset = api.add(Path(path), artifact)
print(f"Dataset {dataset.name} is added.")
| 27.033898
| 75
| 0.610031
|
561ab22e7d19b279526c74e7d320cdaddfa22162
| 6,312
|
py
|
Python
|
globalsky/tests/base.py
|
LCOGT/globalskypartners
|
ecb4ffc7c8ed0902e71b648907046093ea82dc77
|
[
"MIT"
] | null | null | null |
globalsky/tests/base.py
|
LCOGT/globalskypartners
|
ecb4ffc7c8ed0902e71b648907046093ea82dc77
|
[
"MIT"
] | 2
|
2021-05-25T13:16:56.000Z
|
2021-06-18T08:29:36.000Z
|
globalsky/tests/base.py
|
LCOGT/globalskypartners
|
ecb4ffc7c8ed0902e71b648907046093ea82dc77
|
[
"MIT"
] | null | null | null |
"""
NEO exchange: NEO observing portal for Las Cumbres Observatory
Copyright (C) 2015-2019 LCO
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from subprocess import check_output, CalledProcessError
from datetime import datetime, timedelta
from glob import glob
import tempfile
import os
import shutil
import pytz
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.conf import settings
from django.utils import timezone
from contextlib import contextmanager
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from django.contrib.auth.models import User
from partners.models import Cohort, Semester, Partner, Proposal
class FunctionalTest(StaticLiveServerTestCase):
def __init__(self, *args, **kwargs):
super(FunctionalTest, self).__init__(*args, **kwargs)
if settings.DEBUG is False:
settings.DEBUG = True
@contextmanager
def wait_for_page_load(self, timeout=30):
old_page = self.browser.find_element_by_tag_name('html')
yield
WebDriverWait(self.browser, timeout).until(
staleness_of(old_page)
)
def setUp(self):
if settings.USE_FIREFOXDRIVER:
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.startup.homepage", "about:blank")
fp.set_preference("startup.homepage_welcome_url", "about:blank")
fp.set_preference("startup.homepage_welcome_url.additional", "about:blank")
# Don't ask where to save downloaded files
fp.set_preference("browser.download.folderList", 2);
fp.set_preference("browser.download.manager.showWhenStarting", False);
fp.set_preference("browser.download.dir", self.test_dir);
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain");
if not hasattr(self, 'browser'):
firefox_capabilities = DesiredCapabilities.FIREFOX
# Marionette does not work on Firefox ~< 57. Try and determine the
# version and check it. Hopefully this code is robust and platform-
# independent...
try:
version = check_output(["firefox", "--version"], universal_newlines=True)
except (OSError, CalledProcessError):
version = None
if version and 'Firefox' in version:
version_num = version.rstrip().split(' ')[-1]
major_version = version_num.split('.')[0]
firefox_capabilities['marionette'] = True
if major_version.isdigit() and int(major_version) <= 52:
firefox_capabilities['marionette'] = False
options = webdriver.firefox.options.Options()
options.add_argument('--headless')
self.browser = webdriver.Firefox(capabilities=firefox_capabilities, firefox_profile=fp, options=options)
else:
options = webdriver.chrome.options.Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
self.browser = webdriver.Chrome(chrome_options=options)
self.browser.implicitly_wait(5)
def add_user(self):
self.username = 'bart'
self.password = 'simpson'
self.email = 'bart@simpson.org'
self.bart = User.objects.create_user(username=self.username, email=self.email)
self.bart.set_password(self.password)
self.bart.first_name= 'Bart'
self.bart.last_name = 'Simpson'
self.bart.is_active=1
self.bart.save()
def add_cohort_semester(self):
params = { 'year': '2021',
'active_call': True,
'deadline': datetime(2021, 6, 13, 0, 0, tzinfo=pytz.utc),
'call': 'https://lco.global/'
}
self.cohort, created = Cohort.objects.get_or_create(pk=1, **params)
params ={
'start': datetime(2021, 8, 1, 0, 0, tzinfo=pytz.utc),
'end': datetime(2022, 1, 31, 0, 0, tzinfo=pytz.utc),
'code': '2021B',
}
s1, created = Semester.objects.get_or_create(cohort=self.cohort, **params)
params = {
'start': datetime(2022, 2, 1, 0, 0, tzinfo=pytz.utc),
'end': datetime(2022, 7, 31, 0, 0, tzinfo=pytz.utc),
'code': '2022A',
}
s2, created = Semester.objects.get_or_create(cohort=self.cohort, **params)
params ={
'start': timezone.now() - timedelta(weeks=2),
'end': timezone.now() + timedelta(weeks=2),
'code': '20XXX',
}
s_now, created = Semester.objects.get_or_create(cohort=self.cohort, **params)
def add_partner(self):
params = {
'name': 'AstroAwesome',
'proposal_code': 'LCOEPO-001',
'summary': 'AstroAwesome is awesome',
'active': True
}
self.partner, created = Partner.objects.get_or_create(**params)
def add_user_as_pi(self):
self.partner.pi.add(self.bart)
self.partner.save()
def wait_for_element_with_id(self, element_id):
WebDriverWait(self.browser, timeout=10).until(
lambda b: b.find_element_by_id(element_id),
'Could not find element with id {}. Page text was:\n{}'.format(
element_id, self.browser.find_element_by_tag_name('body').text
)
)
| 42.362416
| 120
| 0.62706
|
409bb18ad6bf8d6006556c7c9fcbc8485c46e4cf
| 34,339
|
py
|
Python
|
git/config.py
|
daobook/GitPython
|
cbde921ecf38f25d1dd0dfcb9389514087187e68
|
[
"BSD-3-Clause"
] | null | null | null |
git/config.py
|
daobook/GitPython
|
cbde921ecf38f25d1dd0dfcb9389514087187e68
|
[
"BSD-3-Clause"
] | null | null | null |
git/config.py
|
daobook/GitPython
|
cbde921ecf38f25d1dd0dfcb9389514087187e68
|
[
"BSD-3-Clause"
] | null | null | null |
# config.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Module containing module parser implementation able to properly read and write
configuration files"""
import sys
import abc
from functools import wraps
import inspect
from io import BufferedReader, IOBase
import logging
import os
import re
import fnmatch
from git.compat import (
defenc,
force_text,
is_win,
)
from git.util import LockFile
import os.path as osp
import configparser as cp
# typing-------------------------------------------------------
from typing import (Any, Callable, Generic, IO, List, Dict, Sequence,
TYPE_CHECKING, Tuple, TypeVar, Union, cast)
from git.types import Lit_config_levels, ConfigLevels_Tup, PathLike, assert_never, _T
if TYPE_CHECKING:
from git.repo.base import Repo
from io import BytesIO
T_ConfigParser = TypeVar('T_ConfigParser', bound='GitConfigParser')
T_OMD_value = TypeVar('T_OMD_value', str, bytes, int, float, bool)
if sys.version_info[:3] < (3, 7, 2):
# typing.Ordereddict not added until py 3.7.2
from collections import OrderedDict
OrderedDict_OMD = OrderedDict
else:
from typing import OrderedDict
OrderedDict_OMD = OrderedDict[str, List[T_OMD_value]] # type: ignore[assignment, misc]
# -------------------------------------------------------------
__all__ = ('GitConfigParser', 'SectionConstraint')
log = logging.getLogger('git.config')
log.addHandler(logging.NullHandler())
# invariants
# represents the configuration level of a configuration file
CONFIG_LEVELS: ConfigLevels_Tup = ("system", "user", "global", "repository")
# Section pattern to detect conditional includes.
# https://git-scm.com/docs/git-config#_conditional_includes
CONDITIONAL_INCLUDE_REGEXP = re.compile(r"(?<=includeIf )\"(gitdir|gitdir/i|onbranch):(.+)\"")
class MetaParserBuilder(abc.ABCMeta):
"""Utlity class wrapping base-class methods into decorators that assure read-only properties"""
def __new__(cls, name: str, bases: Tuple, clsdict: Dict[str, Any]) -> 'MetaParserBuilder':
"""
Equip all base-class methods with a needs_values decorator, and all non-const methods
with a set_dirty_and_flush_changes decorator in addition to that."""
kmm = '_mutating_methods_'
if kmm in clsdict:
mutating_methods = clsdict[kmm]
for base in bases:
methods = (t for t in inspect.getmembers(base, inspect.isroutine) if not t[0].startswith("_"))
for name, method in methods:
if name in clsdict:
continue
method_with_values = needs_values(method)
if name in mutating_methods:
method_with_values = set_dirty_and_flush_changes(method_with_values)
# END mutating methods handling
clsdict[name] = method_with_values
# END for each name/method pair
# END for each base
return super(MetaParserBuilder, cls).__new__(cls, name, bases, clsdict)
def needs_values(func: Callable[..., _T]) -> Callable[..., _T]:
"""Returns method assuring we read values (on demand) before we try to access them"""
@wraps(func)
def assure_data_present(self: 'GitConfigParser', *args: Any, **kwargs: Any) -> _T:
self.read()
return func(self, *args, **kwargs)
# END wrapper method
return assure_data_present
def set_dirty_and_flush_changes(non_const_func: Callable[..., _T]) -> Callable[..., _T]:
"""Return method that checks whether given non constant function may be called.
If so, the instance will be set dirty.
Additionally, we flush the changes right to disk"""
def flush_changes(self: 'GitConfigParser', *args: Any, **kwargs: Any) -> _T:
rval = non_const_func(self, *args, **kwargs)
self._dirty = True
self.write()
return rval
# END wrapper method
flush_changes.__name__ = non_const_func.__name__
return flush_changes
class SectionConstraint(Generic[T_ConfigParser]):
"""Constrains a ConfigParser to only option commands which are constrained to
always use the section we have been initialized with.
It supports all ConfigParser methods that operate on an option.
:note:
If used as a context manager, will release the wrapped ConfigParser."""
__slots__ = ("_config", "_section_name")
_valid_attrs_ = ("get_value", "set_value", "get", "set", "getint", "getfloat", "getboolean", "has_option",
"remove_section", "remove_option", "options")
def __init__(self, config: T_ConfigParser, section: str) -> None:
self._config = config
self._section_name = section
def __del__(self) -> None:
# Yes, for some reason, we have to call it explicitly for it to work in PY3 !
# Apparently __del__ doesn't get call anymore if refcount becomes 0
# Ridiculous ... .
self._config.release()
def __getattr__(self, attr: str) -> Any:
if attr in self._valid_attrs_:
return lambda *args, **kwargs: self._call_config(attr, *args, **kwargs)
return super(SectionConstraint, self).__getattribute__(attr)
def _call_config(self, method: str, *args: Any, **kwargs: Any) -> Any:
"""Call the configuration at the given method which must take a section name
as first argument"""
return getattr(self._config, method)(self._section_name, *args, **kwargs)
@property
def config(self) -> T_ConfigParser:
"""return: Configparser instance we constrain"""
return self._config
def release(self) -> None:
"""Equivalent to GitConfigParser.release(), which is called on our underlying parser instance"""
return self._config.release()
def __enter__(self) -> 'SectionConstraint[T_ConfigParser]':
self._config.__enter__()
return self
def __exit__(self, exception_type: str, exception_value: str, traceback: str) -> None:
self._config.__exit__(exception_type, exception_value, traceback)
class _OMD(OrderedDict_OMD):
"""Ordered multi-dict."""
def __setitem__(self, key: str, value: _T) -> None:
super(_OMD, self).__setitem__(key, [value])
def add(self, key: str, value: Any) -> None:
if key not in self:
super(_OMD, self).__setitem__(key, [value])
return None
super(_OMD, self).__getitem__(key).append(value)
def setall(self, key: str, values: List[_T]) -> None:
super(_OMD, self).__setitem__(key, values)
def __getitem__(self, key: str) -> Any:
return super(_OMD, self).__getitem__(key)[-1]
def getlast(self, key: str) -> Any:
return super(_OMD, self).__getitem__(key)[-1]
def setlast(self, key: str, value: Any) -> None:
if key not in self:
super(_OMD, self).__setitem__(key, [value])
return
prior = super(_OMD, self).__getitem__(key)
prior[-1] = value
def get(self, key: str, default: Union[_T, None] = None) -> Union[_T, None]:
return super(_OMD, self).get(key, [default])[-1]
def getall(self, key: str) -> List[_T]:
return super(_OMD, self).__getitem__(key)
def items(self) -> List[Tuple[str, _T]]: # type: ignore[override]
"""List of (key, last value for key)."""
return [(k, self[k]) for k in self]
def items_all(self) -> List[Tuple[str, List[_T]]]:
"""List of (key, list of values for key)."""
return [(k, self.getall(k)) for k in self]
def get_config_path(config_level: Lit_config_levels) -> str:
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if is_win and config_level == "system":
config_level = "global"
if config_level == "system":
return "/etc/gitconfig"
elif config_level == "user":
config_home = os.environ.get("XDG_CONFIG_HOME") or osp.join(os.environ.get("HOME", '~'), ".config")
return osp.normpath(osp.expanduser(osp.join(config_home, "git", "config")))
elif config_level == "global":
return osp.normpath(osp.expanduser("~/.gitconfig"))
elif config_level == "repository":
raise ValueError("No repo to get repository configuration from. Use Repo._get_config_path")
else:
# Should not reach here. Will raise ValueError if does. Static typing will warn missing elifs
assert_never(config_level, # type: ignore[unreachable]
ValueError(f"Invalid configuration level: {config_level!r}"))
class GitConfigParser(cp.RawConfigParser, metaclass=MetaParserBuilder):
"""Implements specifics required to read git style configuration files.
This variation behaves much like the git.config command such that the configuration
will be read on demand based on the filepath given during initialization.
The changes will automatically be written once the instance goes out of scope, but
can be triggered manually as well.
The configuration file will be locked if you intend to change values preventing other
instances to write concurrently.
:note:
The config is case-sensitive even when queried, hence section and option names
must match perfectly.
If used as a context manager, will release the locked file."""
#{ Configuration
# The lock type determines the type of lock to use in new configuration readers.
# They must be compatible to the LockFile interface.
# A suitable alternative would be the BlockingLockFile
t_lock = LockFile
re_comment = re.compile(r'^\s*[#;]')
#} END configuration
optvalueonly_source = r'\s*(?P<option>[^:=\s][^:=]*)'
OPTVALUEONLY = re.compile(optvalueonly_source)
OPTCRE = re.compile(optvalueonly_source + r'\s*(?P<vi>[:=])\s*' + r'(?P<value>.*)$')
del optvalueonly_source
# list of RawConfigParser methods able to change the instance
_mutating_methods_ = ("add_section", "remove_section", "remove_option", "set")
def __init__(self, file_or_files: Union[None, PathLike, 'BytesIO', Sequence[Union[PathLike, 'BytesIO']]] = None,
read_only: bool = True, merge_includes: bool = True,
config_level: Union[Lit_config_levels, None] = None,
repo: Union['Repo', None] = None) -> None:
"""Initialize a configuration reader to read the given file_or_files and to
possibly allow changes to it by setting read_only False
:param file_or_files:
A single file path or file objects or multiple of these
:param read_only:
If True, the ConfigParser may only read the data , but not change it.
If False, only a single file path or file object may be given. We will write back the changes
when they happen, or when the ConfigParser is released. This will not happen if other
configuration files have been included
:param merge_includes: if True, we will read files mentioned in [include] sections and merge their
contents into ours. This makes it impossible to write back an individual configuration file.
Thus, if you want to modify a single configuration file, turn this off to leave the original
dataset unaltered when reading it.
:param repo: Reference to repository to use if [includeIf] sections are found in configuration files.
"""
cp.RawConfigParser.__init__(self, dict_type=_OMD)
self._dict: Callable[..., _OMD] # type: ignore # mypy/typeshed bug?
self._defaults: _OMD
self._sections: _OMD # type: ignore # mypy/typeshed bug?
# Used in python 3, needs to stay in sync with sections for underlying implementation to work
if not hasattr(self, '_proxies'):
self._proxies = self._dict()
if file_or_files is not None:
self._file_or_files: Union[PathLike, 'BytesIO', Sequence[Union[PathLike, 'BytesIO']]] = file_or_files
elif config_level is None:
if read_only:
self._file_or_files = [get_config_path(cast(Lit_config_levels, f))
for f in CONFIG_LEVELS
if f != 'repository']
else:
raise ValueError("No configuration level or configuration files specified")
else:
self._file_or_files = [get_config_path(config_level)]
self._read_only = read_only
self._dirty = False
self._is_initialized = False
self._merge_includes = merge_includes
self._repo = repo
self._lock: Union['LockFile', None] = None
self._acquire_lock()
def _acquire_lock(self) -> None:
if not self._read_only:
if not self._lock:
if isinstance(self._file_or_files, (str, os.PathLike)):
file_or_files = self._file_or_files
elif isinstance(self._file_or_files, (tuple, list, Sequence)):
raise ValueError(
"Write-ConfigParsers can operate on a single file only, multiple files have been passed")
else:
file_or_files = self._file_or_files.name
# END get filename from handle/stream
# initialize lock base - we want to write
self._lock = self.t_lock(file_or_files)
# END lock check
self._lock._obtain_lock()
# END read-only check
def __del__(self) -> None:
"""Write pending changes if required and release locks"""
# NOTE: only consistent in PY2
self.release()
def __enter__(self) -> 'GitConfigParser':
self._acquire_lock()
return self
def __exit__(self, *args: Any) -> None:
self.release()
def release(self) -> None:
"""Flush changes and release the configuration write lock. This instance must not be used anymore afterwards.
In Python 3, it's required to explicitly release locks and flush changes, as __del__ is not called
deterministically anymore."""
# checking for the lock here makes sure we do not raise during write()
# in case an invalid parser was created who could not get a lock
if self.read_only or (self._lock and not self._lock._has_lock()):
return
try:
self.write()
except IOError:
log.error("Exception during destruction of GitConfigParser", exc_info=True)
except ReferenceError:
# This happens in PY3 ... and usually means that some state cannot be written
# as the sections dict cannot be iterated
# Usually when shutting down the interpreter, don'y know how to fix this
pass
finally:
if self._lock is not None:
self._lock._release_lock()
def optionxform(self, optionstr: str) -> str:
"""Do not transform options in any way when writing"""
return optionstr
def _read(self, fp: Union[BufferedReader, IO[bytes]], fpname: str) -> None:
"""A direct copy of the py2.4 version of the super class's _read method
to assure it uses ordered dicts. Had to change one line to make it work.
Future versions have this fixed, but in fact its quite embarrassing for the
guys not to have done it right in the first place !
Removed big comments to make it more compact.
Made sure it ignores initial whitespace as git uses tabs"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
is_multi_line = False
e = None # None, or an exception
def string_decode(v: str) -> str:
if v[-1] == '\\':
v = v[:-1]
# end cut trailing escapes to prevent decode error
return v.encode(defenc).decode('unicode_escape')
# end
# end
while True:
# we assume to read binary !
line = fp.readline().decode(defenc)
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or self.re_comment.match(line):
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# is it a section header?
mo = self.SECTCRE.match(line.strip())
if not is_multi_line and mo:
sectname: str = mo.group('header').strip()
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == cp.DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict((('__name__', sectname),))
self._sections[sectname] = cursect
self._proxies[sectname] = None
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise cp.MissingSectionHeaderError(fpname, lineno, line)
# an option line?
elif not is_multi_line:
mo = self.OPTCRE.match(line)
if mo:
# We might just have handled the last line, which could contain a quotation we want to remove
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval and not optval.strip().startswith('"'):
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
if optval == '""':
optval = ''
# end handle empty string
optname = self.optionxform(optname.rstrip())
if len(optval) > 1 and optval[0] == '"' and optval[-1] != '"':
is_multi_line = True
optval = string_decode(optval[1:])
# end handle multi-line
# preserves multiple values for duplicate optnames
cursect.add(optname, optval)
else:
# check if it's an option with no value - it's just ignored by git
if not self.OPTVALUEONLY.match(line):
if not e:
e = cp.ParsingError(fpname)
e.append(lineno, repr(line))
continue
else:
line = line.rstrip()
if line.endswith('"'):
is_multi_line = False
line = line[:-1]
# end handle quotations
optval = cursect.getlast(optname)
cursect.setlast(optname, optval + string_decode(line))
# END parse section or option
# END while reading
# if any parsing errors occurred, raise an exception
if e:
raise e
def _has_includes(self) -> Union[bool, int]:
return self._merge_includes and len(self._included_paths())
def _included_paths(self) -> List[Tuple[str, str]]:
"""Return List all paths that must be included to configuration
as Tuples of (option, value).
"""
paths = []
for section in self.sections():
if section == "include":
paths += self.items(section)
match = CONDITIONAL_INCLUDE_REGEXP.search(section)
if match is None or self._repo is None:
continue
keyword = match.group(1)
value = match.group(2).strip()
if keyword in ["gitdir", "gitdir/i"]:
value = osp.expanduser(value)
if not any(value.startswith(s) for s in ["./", "/"]):
value = f'**/{value}'
if value.endswith("/"):
value += "**"
# Ensure that glob is always case insensitive if required.
if keyword.endswith("/i"):
value = re.sub(
r"[a-zA-Z]",
lambda m: "[{}{}]".format(
m.group().lower(),
m.group().upper()
),
value
)
if self._repo.git_dir and fnmatch.fnmatchcase(
str(self._repo.git_dir), value
):
paths += self.items(section)
elif keyword == "onbranch":
try:
branch_name = self._repo.active_branch.name
except TypeError:
# Ignore section if active branch cannot be retrieved.
continue
if fnmatch.fnmatchcase(branch_name, value):
paths += self.items(section)
return paths
def read(self) -> None: # type: ignore[override]
"""Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled"""
if self._is_initialized:
return None
self._is_initialized = True
files_to_read: List[Union[PathLike, IO]] = [""]
if isinstance(self._file_or_files, (str, os.PathLike)):
# for str or Path, as str is a type of Sequence
files_to_read = [self._file_or_files]
elif not isinstance(self._file_or_files, (tuple, list, Sequence)):
# could merge with above isinstance once runtime type known
files_to_read = [self._file_or_files]
else: # for lists or tuples
files_to_read = list(self._file_or_files)
# end assure we have a copy of the paths to handle
seen = set(files_to_read)
num_read_include_files = 0
while files_to_read:
file_path = files_to_read.pop(0)
file_ok = False
if hasattr(file_path, "seek"):
# must be a file objectfile-object
file_path = cast(IO[bytes], file_path) # replace with assert to narrow type, once sure
self._read(file_path, file_path.name)
else:
# assume a path if it is not a file-object
file_path = cast(PathLike, file_path)
try:
with open(file_path, 'rb') as fp:
file_ok = True
self._read(fp, fp.name)
except IOError:
continue
# Read includes and append those that we didn't handle yet
# We expect all paths to be normalized and absolute (and will assure that is the case)
if self._has_includes():
for _, include_path in self._included_paths():
if include_path.startswith('~'):
include_path = osp.expanduser(include_path)
if not osp.isabs(include_path):
if not file_ok:
continue
# end ignore relative paths if we don't know the configuration file path
file_path = cast(PathLike, file_path)
assert osp.isabs(file_path), "Need absolute paths to be sure our cycle checks will work"
include_path = osp.join(osp.dirname(file_path), include_path)
# end make include path absolute
include_path = osp.normpath(include_path)
if include_path in seen or not os.access(include_path, os.R_OK):
continue
seen.add(include_path)
# insert included file to the top to be considered first
files_to_read.insert(0, include_path)
num_read_include_files += 1
# each include path in configuration file
# end handle includes
# END for each file object to read
# If there was no file included, we can safely write back (potentially) the configuration file
# without altering it's meaning
if num_read_include_files == 0:
self._merge_includes = False
# end
def _write(self, fp: IO) -> None:
"""Write an .ini-format representation of the configuration state in
git compatible format"""
def write_section(name: str, section_dict: _OMD) -> None:
fp.write(("[%s]\n" % name).encode(defenc))
values: Sequence[str] # runtime only gets str in tests, but should be whatever _OMD stores
v: str
for (key, values) in section_dict.items_all():
if key == "__name__":
continue
for v in values:
fp.write(("\t%s = %s\n" % (key, self._value_to_string(v).replace('\n', '\n\t'))).encode(defenc))
# END if key is not __name__
# END section writing
if self._defaults:
write_section(cp.DEFAULTSECT, self._defaults)
value: _OMD
for name, value in self._sections.items():
write_section(name, value)
def items(self, section_name: str) -> List[Tuple[str, str]]: # type: ignore[override]
""":return: list((option, value), ...) pairs of all items in the given section"""
return [(k, v) for k, v in super(GitConfigParser, self).items(section_name) if k != '__name__']
def items_all(self, section_name: str) -> List[Tuple[str, List[str]]]:
""":return: list((option, [values...]), ...) pairs of all items in the given section"""
rv = _OMD(self._defaults)
for k, vs in self._sections[section_name].items_all():
if k == '__name__':
continue
if k in rv and rv.getall(k) == vs:
continue
for v in vs:
rv.add(k, v)
return rv.items_all()
@needs_values
def write(self) -> None:
"""Write changes to our file, if there are changes at all
:raise IOError: if this is a read-only writer instance or if we could not obtain
a file lock"""
self._assure_writable("write")
if not self._dirty:
return None
if isinstance(self._file_or_files, (list, tuple)):
raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files"
% len(self._file_or_files))
# end assert multiple files
if self._has_includes():
log.debug("Skipping write-back of configuration file as include files were merged in." +
"Set merge_includes=False to prevent this.")
return None
# end
fp = self._file_or_files
# we have a physical file on disk, so get a lock
is_file_lock = isinstance(fp, (str, os.PathLike, IOBase)) # can't use Pathlike until 3.5 dropped
if is_file_lock and self._lock is not None: # else raise Error?
self._lock._obtain_lock()
if not hasattr(fp, "seek"):
fp = cast(PathLike, fp)
with open(fp, "wb") as fp_open:
self._write(fp_open)
else:
fp = cast('BytesIO', fp)
fp.seek(0)
# make sure we do not overwrite into an existing file
if hasattr(fp, 'truncate'):
fp.truncate()
self._write(fp)
def _assure_writable(self, method_name: str) -> None:
if self.read_only:
raise IOError("Cannot execute non-constant method %s.%s" % (self, method_name))
def add_section(self, section: str) -> None:
"""Assures added options will stay in order"""
return super(GitConfigParser, self).add_section(section)
@property
def read_only(self) -> bool:
""":return: True if this instance may change the configuration file"""
return self._read_only
def get_value(self, section: str, option: str, default: Union[int, float, str, bool, None] = None
) -> Union[int, float, str, bool]:
# can default or return type include bool?
"""Get an option's value.
If multiple values are specified for this option in the section, the
last one specified is returned.
:param default:
If not None, the given default value will be returned in case
the option did not exist
:return: a properly typed value, either int, float or string
:raise TypeError: in case the value could not be understood
Otherwise the exceptions known to the ConfigParser will be raised."""
try:
valuestr = self.get(section, option)
except Exception:
if default is not None:
return default
raise
return self._string_to_value(valuestr)
def get_values(self, section: str, option: str, default: Union[int, float, str, bool, None] = None
) -> List[Union[int, float, str, bool]]:
"""Get an option's values.
If multiple values are specified for this option in the section, all are
returned.
:param default:
If not None, a list containing the given default value will be
returned in case the option did not exist
:return: a list of properly typed values, either int, float or string
:raise TypeError: in case the value could not be understood
Otherwise the exceptions known to the ConfigParser will be raised."""
try:
lst = self._sections[section].getall(option)
except Exception:
if default is not None:
return [default]
raise
return [self._string_to_value(valuestr) for valuestr in lst]
def _string_to_value(self, valuestr: str) -> Union[int, float, str, bool]:
types = (int, float)
for numtype in types:
try:
val = numtype(valuestr)
# truncated value ?
if val != float(valuestr):
continue
return val
except (ValueError, TypeError):
continue
# END for each numeric type
# try boolean values as git uses them
vl = valuestr.lower()
if vl == 'false':
return False
if vl == 'true':
return True
if not isinstance(valuestr, str):
raise TypeError(
"Invalid value type: only int, long, float and str are allowed",
valuestr)
return valuestr
def _value_to_string(self, value: Union[str, bytes, int, float, bool]) -> str:
if isinstance(value, (int, float, bool)):
return str(value)
return force_text(value)
@needs_values
@set_dirty_and_flush_changes
def set_value(self, section: str, option: str, value: Union[str, bytes, int, float, bool]) -> 'GitConfigParser':
"""Sets the given option in section to the given value.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the options whose value to set
:param value: Value to set the option to. It must be a string or convertible
to a string
:return: this instance"""
if not self.has_section(section):
self.add_section(section)
self.set(section, option, self._value_to_string(value))
return self
@needs_values
@set_dirty_and_flush_changes
def add_value(self, section: str, option: str, value: Union[str, bytes, int, float, bool]) -> 'GitConfigParser':
"""Adds a value for the given option in section.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method. The value becomes the new value of the option as returned
by 'get_value', and appends to the list of values returned by 'get_values`'.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the option
:param value: Value to add to option. It must be a string or convertible
to a string
:return: this instance"""
if not self.has_section(section):
self.add_section(section)
self._sections[section].add(option, self._value_to_string(value))
return self
def rename_section(self, section: str, new_name: str) -> 'GitConfigParser':
"""rename the given section to new_name
:raise ValueError: if section doesn't exit
:raise ValueError: if a section with new_name does already exist
:return: this instance
"""
if not self.has_section(section):
raise ValueError("Source section '%s' doesn't exist" % section)
if self.has_section(new_name):
raise ValueError("Destination section '%s' already exists" % new_name)
super(GitConfigParser, self).add_section(new_name)
new_section = self._sections[new_name]
for k, vs in self.items_all(section):
new_section.setall(k, vs)
# end for each value to copy
# This call writes back the changes, which is why we don't have the respective decorator
self.remove_section(section)
return self
| 40.686019
| 117
| 0.59396
|
abaaa889a43064ea5b905a960abf0500b7efb4e6
| 6,634
|
py
|
Python
|
interpreter.py
|
luisincrespo/ftp-client
|
09352e16b37fdb9cd14bd0e20438b7dfea774445
|
[
"MIT"
] | 5
|
2019-04-28T05:55:17.000Z
|
2020-10-16T08:54:29.000Z
|
interpreter.py
|
luisincrespo/ftp-client
|
09352e16b37fdb9cd14bd0e20438b7dfea774445
|
[
"MIT"
] | 1
|
2020-04-04T05:42:41.000Z
|
2020-04-05T10:40:29.000Z
|
interpreter.py
|
luisincrespo/ftp-client
|
09352e16b37fdb9cd14bd0e20438b7dfea774445
|
[
"MIT"
] | 3
|
2019-02-24T09:11:38.000Z
|
2020-09-27T02:59:02.000Z
|
import os
from cmd import Cmd
from client import FtpClient
class FtpInterpreter(Cmd):
"""
FTP client command line utility.
"""
def __init__(self, debug=False):
Cmd.__init__(self)
self.intro = ('FTP Client. Start typing help or ? to see available '
'commands.')
self.prompt = 'FTP > '
self._ftp_client = FtpClient(debug=debug)
def _update_prompt(self):
prompt = 'FTP'
if self._ftp_client.host is not None:
prompt = '{} {}'.format(prompt, self._ftp_client.host)
if self._ftp_client.user is not None:
prompt = '{} ({})'.format(prompt, self._ftp_client.user)
self.prompt = '{} > '.format(prompt)
def _perform_ftp_command(self, command, *args):
method = getattr(self._ftp_client, command)
try:
response = method(*args)
except (FtpClient.TimeoutException,
FtpClient.UnknownHostException,
FtpClient.ConnectionRefusedException) as e:
response = e.msg
except FtpClient.NotConnectedException as e:
response = e.msg
response = ('{}\nPlease connect to an FTP server using'
' the `connect` command').format(response)
except FtpClient.NotAuthenticatedException as e:
response = e.msg
response = ('{}\nPlease authenticate using the `login` command.')\
.format(response)
except FtpClient.LocalIOException as e:
response = e.msg
response = ('{}\nSomething went wrong trying to {} the file,'
' please try again.').format(response, command)
return response
def emptyline(self):
pass
def do_connect(self, host):
"""
Command to connect to an FTP server in the specified host.
Args:
host (str): The host to connect to.
"""
response = self._perform_ftp_command('connect', host)
print response
self._update_prompt()
def do_login(self, *args):
"""
Command to login with user and password in the connected FTP host.
"""
user = ''
while not user:
user = raw_input('User: ')
password = ''
while not password:
password = raw_input('Password: ')
response = self._perform_ftp_command('login', user, password)
print response
self._update_prompt()
def do_logout(self, *args):
"""
Command to logout the current user from the connected FTP host.
"""
self._perform_ftp_command('logout')
self._update_prompt()
def do_list(self, filename):
"""
Command to perform LIST command on the connected FTP host.
Args:
filename (str): Name of file or directory to retrieve info for.
"""
response = self._perform_ftp_command('list', filename)
print response
def do_disconnect(self, *args):
"""
Command to disconnect from connected FTP host.
"""
response = self._perform_ftp_command('disconnect')
print response
self._update_prompt()
def do_retrieve(self, *args):
"""
Command to retrieve a file from the connected FTP host and store
it locally.
"""
filename = ''
while not filename:
filename = raw_input('Remote file: ')
local_filename = ''
while not local_filename:
local_filename = raw_input('Local file: ')
response = self._perform_ftp_command('retrieve', filename,
local_filename)
local_file = None
if isinstance(response, tuple):
response, local_file = response
print response
if local_file is not None:
local_path = os.path.realpath(local_file.name)
print 'Local file created: {}'.format(local_path)
def do_store(self, *args):
"""
Command to send a local file to the connected FTP host.
"""
local_filename = ''
while not local_filename:
local_filename = raw_input('Local file: ')
filename = ''
while not filename:
filename = raw_input('Remote file: ')
response = self._perform_ftp_command('store', local_filename,
filename)
print response
def do_pwd(self, *args):
"""
Command to retrieve the current directory on the connected FTP host.
"""
response = self._perform_ftp_command('pwd')
print response
def do_cwd(self, directory):
"""
Command to change current directory on the connected FTP host.
Args:
directory (str): Name of directory to work on.
"""
response = self._perform_ftp_command('cwd', directory)
print response
def do_cdup(self, *args):
"""
Command to set parent directory as current working directory
on the connected FTP host.
"""
response = self._perform_ftp_command('cdup')
print response
def do_mkdir(self, directory):
"""
Command to create directory on the connected FTP host.
Args:
directory (str): Name of directory to create.
"""
response = self._perform_ftp_command('mkdir', directory)
print response
def do_rm(self, filename):
"""
Command to remove file on the connected FTP host.
Args:
filename (str): Name of file to delete.
"""
response = self._perform_ftp_command('rm', filename)
print response
def do_rmdir(self, directory):
"""
Command to remove directory on the connected FTP host.
Args:
directory (str): Name of directory to delete.
"""
response = self._perform_ftp_command('rmdir', directory)
print response
def do_rename(self, *args):
"""
Command to rename a file or directory on the connected FTP host.
"""
original_filename = ''
while not original_filename:
original_filename = raw_input('Name of original remote file: ')
new_filename = ''
while not new_filename:
new_filename = raw_input('New name for remote file: ')
response = self._perform_ftp_command('rename', original_filename,
new_filename)
print response
| 31.590476
| 78
| 0.571752
|
d87db0cda22d6c7a8db39f67d7dd92ee9d33c3b5
| 2,636
|
py
|
Python
|
gunicorn/tests/test_metadata.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
gunicorn/tests/test_metadata.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
gunicorn/tests/test_metadata.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.gunicorn import GUnicornCheck
from .common import CHECK_NAME, CONTAINER_NAME, GUNICORN_VERSION, INSTANCE
# TODO: Test metadata in e2e when we can collect metadata from the agent
CHECK_ID = 'test:123'
def _assert_metadata(datadog_agent):
major, minor, patch = GUNICORN_VERSION.split('.')
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': GUNICORN_VERSION,
}
datadog_agent.assert_metadata(CHECK_ID, version_metadata)
datadog_agent.assert_metadata_count(5)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
def test_collect_metadata_instance(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = setup_gunicorn['gunicorn_bin_path']
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
_assert_metadata(datadog_agent)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
def test_collect_metadata_init_config(aggregator, datadog_agent, setup_gunicorn):
init_config = {'gunicorn': setup_gunicorn['gunicorn_bin_path']}
check = GUnicornCheck(CHECK_NAME, init_config, [INSTANCE])
check.check_id = CHECK_ID
check.check(INSTANCE)
_assert_metadata(datadog_agent)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
@pytest.mark.usefixtures('dd_environment')
def test_collect_metadata_docker(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = 'docker exec {} gunicorn'.format(CONTAINER_NAME)
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
_assert_metadata(datadog_agent)
def test_collect_metadata_count(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = setup_gunicorn['gunicorn_bin_path']
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = 'test:123'
check.check(instance)
datadog_agent.assert_metadata_count(5)
def test_collect_metadata_invalid_binary(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = '/bin/not_exist'
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
datadog_agent.assert_metadata_count(0)
| 31.011765
| 84
| 0.747724
|
28f0cacd0877d5d14698a1898e77b37363fc2d02
| 1,118
|
py
|
Python
|
setup.py
|
pushfoo/eightdad
|
7464ea8212f4d8fa11f9d471736efdecf028e285
|
[
"BSD-2-Clause"
] | 1
|
2020-05-29T16:11:41.000Z
|
2020-05-29T16:11:41.000Z
|
setup.py
|
pushfoo/eightdad
|
7464ea8212f4d8fa11f9d471736efdecf028e285
|
[
"BSD-2-Clause"
] | 41
|
2020-05-29T08:01:15.000Z
|
2020-11-27T11:45:41.000Z
|
setup.py
|
pushfoo/eightdad
|
7464ea8212f4d8fa11f9d471736efdecf028e285
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup, find_packages
install_requires=['bitarray','arcade==2.6.13']
tests_require = [
'pytest',
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='eightdad',
version='0.0.1',
packages=find_packages(),
entry_points={
"console_scripts": [
'eightdad=eightdad.frontend.arcade_front:main'
]
},
install_requires=install_requires,
tests_require=tests_require,
url='https://github.com/pushfoo/eightdad',
license='BSD-2-Clause',
author='pushfoo',
author_email='pushfoo@gmail.com',
description='Chip-8 interpreter that might one day also have other tools',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Interpreters",
"Topic :: System :: Emulators"
],
python_requires='>=3.7'
)
| 27.268293
| 78
| 0.644902
|
537fd1c88f6238f594d78d9a53c7e2d55e5a2a6c
| 6,954
|
py
|
Python
|
src/build/mac_toolchain.py
|
Mr-Sheep/naiveproxy
|
9f6e9768295f6d1d41517a15a621d4756bd7d6be
|
[
"BSD-3-Clause"
] | null | null | null |
src/build/mac_toolchain.py
|
Mr-Sheep/naiveproxy
|
9f6e9768295f6d1d41517a15a621d4756bd7d6be
|
[
"BSD-3-Clause"
] | null | null | null |
src/build/mac_toolchain.py
|
Mr-Sheep/naiveproxy
|
9f6e9768295f6d1d41517a15a621d4756bd7d6be
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
If should_use_hermetic_xcode.py emits "1", and the current toolchain is out of
date:
* Downloads the hermetic mac toolchain
* Requires CIPD authentication. Run `cipd auth-login`, use Google account.
* Accepts the license.
* If xcode-select and xcodebuild are not passwordless in sudoers, requires
user interaction.
* Downloads standalone binaries from [a possibly different version of Xcode].
The toolchain version can be overridden by setting MAC_TOOLCHAIN_REVISION with
the full revision, e.g. 9A235.
"""
from __future__ import print_function
import argparse
import os
import pkg_resources
import platform
import plistlib
import shutil
import subprocess
import sys
# This contains binaries from Xcode 12.4 12D4e, along with the macOS 11 SDK.
# To build these packages, see comments in build/xcode_binaries.yaml
MAC_BINARIES_LABEL = 'infra_internal/ios/xcode/xcode_binaries/mac-amd64'
MAC_BINARIES_TAG = 'Za4aUIwiTUjk8rnjRow4nXbth-j7ZoN5plyOSCLidcgC'
# The toolchain will not be downloaded if the minimum OS version is not met. 19
# is the major version number for macOS 10.15. 12B5044c (Xcode 12.2rc) only runs
# on 10.15.4 and newer.
MAC_MINIMUM_OS_VERSION = [19, 4]
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
TOOLCHAIN_ROOT = os.path.join(BASE_DIR, 'mac_files')
TOOLCHAIN_BUILD_DIR = os.path.join(TOOLCHAIN_ROOT, 'Xcode.app')
# Always integrity-check the entire SDK. Mac SDK packages are complex and often
# hit edge cases in cipd (eg https://crbug.com/1033987,
# https://crbug.com/915278), and generally when this happens it requires manual
# intervention to fix.
# Note the trailing \n!
PARANOID_MODE = '$ParanoidMode CheckIntegrity\n'
def PlatformMeetsHermeticXcodeRequirements():
if sys.platform != 'darwin':
return True
needed = MAC_MINIMUM_OS_VERSION
major_version = [int(v) for v in platform.release().split('.')[:len(needed)]]
return major_version >= needed
def _UseHermeticToolchain():
current_dir = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.join(current_dir, 'mac/should_use_hermetic_xcode.py')
proc = subprocess.Popen([script_path, 'mac'], stdout=subprocess.PIPE)
return '1' in proc.stdout.readline()
def RequestCipdAuthentication():
"""Requests that the user authenticate to access Xcode CIPD packages."""
print('Access to Xcode CIPD package requires authentication.')
print('-----------------------------------------------------------------')
print()
print('You appear to be a Googler.')
print()
print('I\'m sorry for the hassle, but you may need to do a one-time manual')
print('authentication. Please run:')
print()
print(' cipd auth-login')
print()
print('and follow the instructions.')
print()
print('NOTE: Use your google.com credentials, not chromium.org.')
print()
print('-----------------------------------------------------------------')
print()
sys.stdout.flush()
def PrintError(message):
# Flush buffers to ensure correct output ordering.
sys.stdout.flush()
sys.stderr.write(message + '\n')
sys.stderr.flush()
def InstallXcodeBinaries(binaries_root=None):
"""Installs the Xcode binaries needed to build Chrome and accepts the license.
This is the replacement for InstallXcode that installs a trimmed down version
of Xcode that is OS-version agnostic.
"""
# First make sure the directory exists. It will serve as the cipd root. This
# also ensures that there will be no conflicts of cipd root.
if binaries_root is None:
binaries_root = os.path.join(TOOLCHAIN_ROOT, 'xcode_binaries')
if not os.path.exists(binaries_root):
os.makedirs(binaries_root)
# 'cipd ensure' is idempotent.
args = ['cipd', 'ensure', '-root', binaries_root, '-ensure-file', '-']
p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input=PARANOID_MODE + MAC_BINARIES_LABEL +
' ' + MAC_BINARIES_TAG)
if p.returncode != 0:
print(stdout)
print(stderr)
RequestCipdAuthentication()
return 1
if sys.platform != 'darwin':
return 0
# Accept the license for this version of Xcode if it's newer than the
# currently accepted version.
cipd_xcode_version_plist_path = os.path.join(binaries_root,
'Contents/version.plist')
cipd_xcode_version_plist = plistlib.readPlist(cipd_xcode_version_plist_path)
cipd_xcode_version = cipd_xcode_version_plist['CFBundleShortVersionString']
cipd_license_path = os.path.join(binaries_root,
'Contents/Resources/LicenseInfo.plist')
cipd_license_plist = plistlib.readPlist(cipd_license_path)
cipd_license_version = cipd_license_plist['licenseID']
should_overwrite_license = True
current_license_path = '/Library/Preferences/com.apple.dt.Xcode.plist'
if os.path.exists(current_license_path):
current_license_plist = plistlib.readPlist(current_license_path)
xcode_version = current_license_plist.get(
'IDEXcodeVersionForAgreedToGMLicense')
if (xcode_version is not None and pkg_resources.parse_version(xcode_version)
>= pkg_resources.parse_version(cipd_xcode_version)):
should_overwrite_license = False
if not should_overwrite_license:
return 0
# Use puppet's sudoers script to accept the license if its available.
license_accept_script = '/usr/local/bin/xcode_accept_license.py'
if os.path.exists(license_accept_script):
args = [
'sudo', license_accept_script, '--xcode-version', cipd_xcode_version,
'--license-version', cipd_license_version
]
subprocess.check_call(args)
return 0
# Otherwise manually accept the license. This will prompt for sudo.
print('Accepting new Xcode license. Requires sudo.')
sys.stdout.flush()
args = [
'sudo', 'defaults', 'write', current_license_path,
'IDEXcodeVersionForAgreedToGMLicense', cipd_xcode_version
]
subprocess.check_call(args)
args = [
'sudo', 'defaults', 'write', current_license_path,
'IDELastGMLicenseAgreedTo', cipd_license_version
]
subprocess.check_call(args)
args = ['sudo', 'plutil', '-convert', 'xml1', current_license_path]
subprocess.check_call(args)
return 0
def main():
if not _UseHermeticToolchain():
print('Skipping Mac toolchain installation for mac')
return 0
parser = argparse.ArgumentParser(description='Download hermetic Xcode.')
args = parser.parse_args()
if not PlatformMeetsHermeticXcodeRequirements():
print('OS version does not support toolchain.')
return 0
return InstallXcodeBinaries()
if __name__ == '__main__':
sys.exit(main())
| 34.77
| 80
| 0.714265
|
bc85ad18bb86c11487257f0042f96e4b7429bc9b
| 556
|
py
|
Python
|
entity_resolution/date_adjustment.py
|
budiryan/ScholarsNet
|
b6a9f3830c390a4420e361752f0187d8f955acfe
|
[
"MIT"
] | 9
|
2017-06-08T12:05:03.000Z
|
2021-11-08T12:19:46.000Z
|
entity_resolution/date_adjustment.py
|
budiryan/ScholarsNet
|
b6a9f3830c390a4420e361752f0187d8f955acfe
|
[
"MIT"
] | null | null | null |
entity_resolution/date_adjustment.py
|
budiryan/ScholarsNet
|
b6a9f3830c390a4420e361752f0187d8f955acfe
|
[
"MIT"
] | null | null | null |
import sqlite3
path = '../sqlite/paperDB.db'
connection = sqlite3.connect('../sqlite/paperDB.db')
with connection:
cursor = connection.cursor()
cursor.execute('SELECT * FROM papers')
rows = cursor.fetchall()
for row in rows:
row = list(row)
if row[-1] == 'arxiv':
row[5] = row[5][0:4]
cursor.execute('insert into papers2 values(?,?,?,?,?,?,?,?)', row)
else:
row[5] = row[5][-5:]
cursor.execute('insert into papers2 values(?,?,?,?,?,?,?,?)', row)
connection.commit()
| 29.263158
| 78
| 0.546763
|
45266ccfa09495f44e1f6dced368b58d92a28360
| 10,114
|
py
|
Python
|
wtl/wtgithub/tests/worker.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | null | null | null |
wtl/wtgithub/tests/worker.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | 1
|
2017-09-19T17:06:49.000Z
|
2017-09-19T17:06:49.000Z
|
wtl/wtgithub/tests/worker.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import os
from django.test import TestCase
from exam.asserts import AssertsMixin
from github import UnknownObjectException
from github.Requester import Requester
import mock
from wtl.wtgithub.models import Repository
from wtl.wtgithub.tests.factories import RepositoryFactory
from wtl.wtgithub.worker import (GithubWorker, GithubBulkWorker,
CantFindParserError, ParseError)
from wtl.wtlib.tests.factories import (ProjectFactory, LibraryFactory,
LibraryVersionFactory, LanguageFactory)
from wtl.wtlib.models import Project, Library, LibraryVersion
from wtl.wtparser.parsers import RequirementsParser
class BaseTestCase(TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.language = LanguageFactory(name='Python')
# Mock github requests
self.connectionMock = mock.Mock()
connectionClass = mock.Mock()
connectionClass.return_value = self.connectionMock
Requester.injectConnectionClasses(connectionClass, connectionClass)
def tearDown(self):
super(BaseTestCase, self).tearDown()
Requester.resetConnectionClasses()
def githubWillRespondWith(self, filename, response_code=200, headers=None):
"""
Instead of requesting to github, respond with contents of test_responses/$filename file
"""
if headers is None:
headers = {}
content = open(os.path.join(os.path.dirname(__file__), 'test_responses', filename)).read()
response = mock.Mock()
type(response).status = mock.PropertyMock(return_value=response_code)
response.getheaders.return_value = headers
response.read.return_value = content
self.connectionMock.getresponse.return_value = response
class BaseWorkerTestCase(BaseTestCase):
def setUp(self):
super(BaseWorkerTestCase, self).setUp()
self.worker = GithubWorker()
# preload gh_rep used by most tests
self.githubWillRespondWith('get_repo/elegion__djangodash2013.json')
self.gh_rep = self.worker.github.get_repo('elegion/djangodash2013')
class GetParserForRepositoryTestCase(BaseWorkerTestCase):
def test_returns_parser(self):
self.githubWillRespondWith('get_git_tree/elegion__djangodash2013.json')
sha, parser = self.worker._get_parser_for_repository(self.gh_rep)
self.assertIsInstance(parser, RequirementsParser)
self.assertEqual(40, len(sha))
def test_returns_none(self):
self.githubWillRespondWith('get_git_tree/github__objective-c-conventions.json')
with self.assertRaises(CantFindParserError):
self.worker._get_parser_for_repository(self.gh_rep)
class GetOrCreateRepositoryTestCase(BaseWorkerTestCase, AssertsMixin):
def test_creates_repository(self):
with self.assertChanges(Repository.objects.count, before=0, after=1):
self.worker._get_or_create_repository(self.gh_rep)
repository = Repository.objects.all()[0]
self.assertEqual('elegion', repository.owner)
self.assertEqual('djangodash2013', repository.name)
def test_updates_existing_repository(self):
RepositoryFactory(name='djangodash2013',
owner='elegion',
starsCount=100)
with self.assertDoesNotChange(Repository.objects.count):
self.worker._get_or_create_repository(self.gh_rep)
repository = Repository.objects.all()[0]
self.assertEqual('elegion', repository.owner)
self.assertEqual('djangodash2013', repository.name)
self.assertEqual(0, repository.starsCount)
def test_creates_project_if_not_exist(self):
with self.assertChanges(Project.objects.count, before=0, after=1):
self.worker._get_or_create_repository(self.gh_rep)
project = Project.objects.all()[0]
self.assertEqual('djangodash2013', project.name)
self.assertEqual('djangodash2013', project.github.name)
self.assertEqual('elegion', project.github.owner)
def test_creates_project_even_if_repository_exists(self):
RepositoryFactory(name='djangodash2013',
owner='elegion',
starsCount=100)
with self.assertChanges(Project.objects.count, before=0, after=1):
self.worker._get_or_create_repository(self.gh_rep)
project = Project.objects.all()[0]
self.assertEqual('djangodash2013', project.name)
self.assertEqual('djangodash2013', project.github.name)
self.assertEqual('elegion', project.github.owner)
class ParseRequirementsTestCase(BaseWorkerTestCase):
def test_returns_parsed_requirements(self):
parser = mock.Mock()
parser.parse.return_value = {'language': self.language, 'packages': []}
self.githubWillRespondWith('get_git_blog/requrements.txt.json')
res = self.worker._parse_requirements(self.gh_rep, 'bbdce0004a897ba617f1001591c7dea665485425', parser)
self.assertIsInstance(res, dict)
self.assertDictEqual(parser.parse.return_value, res)
def test_raises_parse_error(self):
parser = mock.Mock()
parser.parse.side_effect = ValueError('some parse error')
self.githubWillRespondWith('get_git_blog/invalid-requirements.txt.json')
with self.assertRaises(ParseError):
self.worker._parse_requirements(self.gh_rep, 'dd3705261c05bd3d3609de15bff66b6b4a5dd0ad', parser)
class SaveParsedRequirementsTestCase(BaseWorkerTestCase, AssertsMixin):
def sampleDict(self):
return {
'platform': None,
'language': self.language.name,
'packages': [
{'name': 'django', 'version': '1.5.4', 'version_special': ''},
{'name': 'south', 'version': '0.8.2', 'version_special': ''},
],
'version': None,
'filename': 'requirements.txt'
}
def test_saves_packages_and_versions(self):
project = ProjectFactory()
with self.assertChanges(Library.objects.count, before=0, after=2):
with self.assertChanges(LibraryVersion.objects.count, before=0, after=2):
self.worker._save_parsed_requirements(project, self.sampleDict())
self.assertEqual(2, project.libraries.count())
lib1 = project.libraries.order_by('library__name')[0]
lib2 = project.libraries.order_by('library__name')[1]
self.assertEqual('django', lib1.library.name)
self.assertEqual('1.5.4', lib1.version)
self.assertEqual(1, lib1.total_users)
self.assertEqual('south', lib2.library.name)
self.assertEqual('0.8.2', lib2.version)
self.assertEqual(1, lib2.total_users)
def test_updates_total_users_count(self):
l1 = LibraryFactory(name='django', language=self.language)
l2 = LibraryFactory(name='south', language=self.language)
self.worker._save_parsed_requirements(ProjectFactory(),
self.sampleDict())
self.assertEqual(Library.objects.get(id=l1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(library=l1).total_users, 1)
self.assertEqual(Library.objects.get(id=l2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(library=l2).total_users, 1)
def test_doesnt_duplicate_libraries_and_versions(self):
project = ProjectFactory()
lib1 = LibraryFactory(name='django', language=self.language)
lib2 = LibraryFactory(name='south', language=self.language)
LibraryVersionFactory(library=lib1, version='1.5.4')
LibraryVersionFactory(library=lib2, version='0.8.2')
with self.assertDoesNotChange(Library.objects.count,):
with self.assertDoesNotChange(LibraryVersion.objects.count):
self.worker._save_parsed_requirements(project, self.sampleDict())
self.assertEqual(2, project.libraries.count())
class AnalyzeRepoTestCase(BaseWorkerTestCase):
def test_called_with_invalid_url(self):
self.githubWillRespondWith('404.json', response_code=404)
with self.assertRaises(UnknownObjectException):
self.worker.analyze_repo('invalid/url')
# Bulk worker test cases
class BaseBulkWorkerTestCase(BaseTestCase):
def setUp(self):
super(BaseBulkWorkerTestCase, self).setUp()
#Requester.resetConnectionClasses()
self.worker = GithubBulkWorker(per_page=2)
class SearchReposTestCase(BaseBulkWorkerTestCase):
def test_returns_repos(self):
self.githubWillRespondWith('search_repos/page1.json', headers={'link': '<https://api.github.com/search/repositories?q=language%3Apython&per_page=2&page=2>; rel="next", <https://api.github.com/search/repositories?q=language%3Apython&per_page=2&page=500>; rel="last"'})
res = self.worker._get_repositories('python')
it = iter(res)
# Check that response is almost endlessly paginated (new ?pages will be downloaded from github)
for i in range(200):
next(it)
class CheckRepositoryAnalyzedReposTestCase(BaseBulkWorkerTestCase):
def test_analyzed(self):
self.githubWillRespondWith('search_repos/page1.json')
repository = RepositoryFactory(name='django', owner='django')
ProjectFactory(name='django', github=repository)
rep = self.worker._get_repositories('python')[0]
self.assertTrue(self.worker._check_repository_analyzed(rep))
def test_repository_created_but_not_analyzed(self):
self.githubWillRespondWith('search_repos/page1.json')
RepositoryFactory(name='django', owner='django')
rep = self.worker._get_repositories('python')[0]
self.assertFalse(self.worker._check_repository_analyzed(rep))
def test_new_repository(self):
self.githubWillRespondWith('search_repos/page1.json')
rep = self.worker._get_repositories('python')[0]
self.assertFalse(self.worker._check_repository_analyzed(rep))
| 43.407725
| 275
| 0.69735
|
c6e060cfacd2a1d24ae08023a350d6f98eba1c03
| 1,252
|
py
|
Python
|
ProgrammerAlgorithmInterview/Chapter01/01_10_given_only_a_node_and_remove_it.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
ProgrammerAlgorithmInterview/Chapter01/01_10_given_only_a_node_and_remove_it.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
ProgrammerAlgorithmInterview/Chapter01/01_10_given_only_a_node_and_remove_it.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#coding:utf-8
class LNode(object):
def __init__(self, x=None):
self.val = x
self.next = None
def remove(r_node):
"""
方法功能:给定单链表中某个结点,删除该节点
输入参数:链表中某结点
返回值:true: 删除该结点;false: 删除失败
"""
# 若输入结点为空或该结点无后继结点,则无法删除;因为实际上是删除后继结点
if r_node is None or r_node.next is None:
return False
r_node.val = r_node.next.val
r_node.next = r_node.next.next
return True
def constructLinkedList(n, random_num):
"""
方法功能:创建单链表
输入参数:n: 不算头结点 head 与尾部的 None,结点的长度;random_num: 用来指向某个结点
返回值:head:建成的链表的头结点;random_node: 某个结点
"""
head = LNode()
cur = head
for i in range(1, n+1):
tmp = LNode()
tmp.val = i
cur.next = tmp
cur = tmp
if i == random_num:
random_node = cur
return head, random_node
def printLinkedList(head):
""" 打印单链表 """
print("head->", end='')
cur = head.next
while cur:
print(cur.val, end="->")
cur = cur.next
print("None")
return None
if __name__ == "__main__":
head, r_node = constructLinkedList(7, 5)
print("before:", end=' ')
printLinkedList(head)
print("\nafter:", end=' ')
remove(r_node)
printLinkedList(head)
| 21.586207
| 59
| 0.586262
|
fe6fa1ce6288e3706241c02e3f67d93762fef4ca
| 9,775
|
py
|
Python
|
lib/galaxy/external_services/service.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/external_services/service.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/external_services/service.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
#Contains objects for accessing external services applications
import logging
from parameters import ExternalServiceParameter
from actions import ExternalServiceAction
from galaxy.util.bunch import Bunch
log = logging.getLogger( __name__ )
class ExternalServiceActionsGroup( object ):
def __init__( self, parent, name, label=None ):
self.name = name
self.label = label
self.parent = parent
self.items = []
@classmethod
def from_elem( self, elem, parent = None ):
"""
Return ExternalServiceActionsGroup created from an xml element.
"""
if elem is not None:
name = elem.get( 'name' )
label = elem.get( 'label' )
rval = ExternalServiceActionsGroup( parent, name, label=label )
rval.load_sub_elems( elem )
else:
rval = ExternalServiceActionsGroup( None, None )
return rval
def load_sub_elems( self, elem ):
for sub_elem in elem:
if sub_elem.tag == 'param':
self.add_item( ExternalServiceParameter.from_elem( sub_elem, self ) )
elif sub_elem.tag == 'action':
self.add_item( ExternalServiceAction.from_elem( sub_elem, self ) )
elif sub_elem.tag == 'section':
self.add_item( ExternalServiceActionsGroup.from_elem( sub_elem, self ) )
elif sub_elem.tag == 'conditional':
self.add_item( ExternalServiceActionsConditional( sub_elem, self ) )
else:
raise ValueError( 'Unknown tag: %s' % sub_elem.tag )
def add_item( self, item ):
self.items.append( item )
def populate( self, service_instance, item = None, param_dict = None ):
return PopulatedExternalService( self, service_instance, item, param_dict )
def prepare_actions( self, param_dict, parent_dict, parent_section ):
group = Bunch()
group_section = ActionSection( self.name, self.label )
parent_section.append( group_section )
parent_dict[ self.name ] = group
for item in self.items:
if isinstance( item, ExternalServiceParameter ):
group[ item.name ] = item.get_value( param_dict )
elif isinstance( item, ExternalServiceActionsGroup ):
group[ item.name ] = item.prepare_actions( param_dict, group, group_section )
elif isinstance( item, ExternalServiceAction ):
group_section.append( item.populate_action( param_dict ) )
elif isinstance( item, ExternalServiceActionsConditional ):
conditional_group = Bunch()
conditional_group_section = ActionSection( item.name, item.label )#[]
group_section.append( conditional_group_section )
group[ item.name ] = conditional_group
for case in item.get_current_cases( param_dict ):
conditional_group[ case.name ] = case.prepare_actions( param_dict, conditional_group, conditional_group_section )
else:
raise TypeError( 'unknown item type found: %s' % item )
return group
class ExternalServiceActionsGroupWhen( ExternalServiceActionsGroup ):
type="when"
@classmethod
def from_elem( self, parent, elem ):
"""Loads the proper when by attributes of elem"""
when_type = elem.get( 'type' )
assert when_type in when_type_to_class, TypeError( "When type not implemented: %s" % when_type )
return when_type_to_class[ when_type ].from_elem( parent, elem )
def is_case( self, param_dict ):
raise TypeError( "Abstract method" )
def get_ref( self, param_dict ):
ref = param_dict
for ref_name in self.parent.ref:
assert ref_name in ref, "Required dependency '%s' not found in incoming values" % ref_name
ref = ref.get( ref_name )
return ref
class ValueExternalServiceActionsGroupWhen( ExternalServiceActionsGroupWhen ):
type = "value"
def __init__( self, parent, name, value, label=None ):
super( ValueExternalServiceActionsGroupWhen, self ).__init__( parent, name, label )
self.value = value
@classmethod
def from_elem( self, parent, elem ):
"""Returns an instance of this when"""
rval = ValueExternalServiceActionsGroupWhen( parent, elem.get( 'name' ), elem.get( 'value' ), elem.get( 'label' ) )
rval.load_sub_elems( elem )
return rval
def is_case( self, param_dict ):
ref = self.get_ref( param_dict )
return bool( str( ref ) == self.value )
class BooleanExternalServiceActionsGroupWhen( ExternalServiceActionsGroupWhen ):
type = "boolean"
def __init__( self, parent, name, value, label=None ):
super( BooleanExternalServiceActionsGroupWhen, self ).__init__( parent, name, label )
self.value = value
@classmethod
def from_elem( self, parent, elem ):
"""Returns an instance of this when"""
rval = BooleanExternalServiceActionsGroupWhen( parent, elem.get( 'name' ), elem.get( 'label' ) )
rval.load_sub_elems( elem )
return rval
def is_case( self, param_dict ):
ref = self.get_ref( param_dict )
return bool( ref )
class ItemIsInstanceExternalServiceActionsGroupWhen( ExternalServiceActionsGroupWhen ):
type = "item_type"
def __init__( self, parent, name, value, label=None ):
super( ItemIsInstanceExternalServiceActionsGroupWhen, self ).__init__( parent, name, label )
self.value = value
@classmethod
def from_elem( self, parent, elem ):
"""Returns an instance of this when"""
rval = ItemIsInstanceExternalServiceActionsGroupWhen( parent, elem.get( 'name' ), elem.get( 'value' ), elem.get( 'label' ) )
rval.load_sub_elems( elem )
return rval
def is_case( self, param_dict ):
ref = self.get_ref( param_dict )
return ref.__class__.__name__.lower() in map( lambda x: x.lower(), self.value.split( '.' ) ) #HACK!
when_type_to_class = {}
for class_type in [ ValueExternalServiceActionsGroupWhen, BooleanExternalServiceActionsGroupWhen, ItemIsInstanceExternalServiceActionsGroupWhen]:
when_type_to_class[ class_type.type ] = class_type
class ExternalServiceActionsConditional( object ):
type = "conditional"
def __init__( self, elem, parent ):
self.parent = parent
self.name = elem.get( 'name', None )
assert self.name is not None, "Required 'name' attribute missing from ExternalServiceActionsConditional"
self.label = elem.get( 'label' )
self.ref = elem.get( 'ref', None )
assert self.ref is not None, "Required 'ref' attribute missing from ExternalServiceActionsConditional"
self.ref = self.ref.split( '.' )
self.cases = []
for when_elem in elem.findall( 'when' ):
self.cases.append( ExternalServiceActionsGroupWhen.from_elem( self, when_elem ) )
def get_current_cases( self, param_dict ):
rval = []
for case in self.cases:
if case.is_case( param_dict ):
rval.append( case )
return rval
class ActionSection( list ):
def __init__( self, name, label ):
list.__init__( self )
self.name = name
self.label = label
def has_action( self ):
for item in self:
if not isinstance( item, ActionSection ):
return True
else:
if item.has_action():
return True
return False
class PopulatedExternalService( object ):
def __init__( self, service_group, service_instance, item, param_dict = None ):
self.service_group = service_group
self.service_instance = service_instance
self.item = item
self.param_dict = param_dict
self.populate()
def __getattr__( self, name ):
return getattr( self.service_instance, name )#should .service or.service_instance should be here...
def populate( self ):
param_dict = {}
param_dict['fields'] = Bunch( **self.service_instance.form_values.content )
param_dict['item'] = self.item
param_dict['service'] = self.service_group.parent
param_dict['service_instance'] = self.service_instance
action_list = ActionSection( self.service_group.name, self.service_group.label )
for item in self.service_group.items:
if isinstance( item, ExternalServiceParameter ):
param_dict[ item.name ] = item.get_value( param_dict )
elif isinstance( item, ExternalServiceAction ):
action_list.append( item.populate_action( param_dict ) )
elif isinstance( item, ExternalServiceActionsGroup ):
item.prepare_actions( param_dict, param_dict, action_list )
else:
raise 'unknown item type found'
self.param_dict = param_dict
self.actions = action_list
def perform_action_by_name( self, actions_list ):
action = self.get_action_by_name( actions_list )
result = action.perform_action()
return action
def get_action_by_name( self, actions_list ):
action = None
actions = self.actions #populated actions
for name in actions_list:
action_found = False
for action in actions:
if action.name == name:
action_found = True
actions = action
break
assert action_found, 'Action not found: %s in %s' % ( name, actions_list )
assert action, 'Action not found: %s' % actions_list
return action
def __nonzero__( self ):
return self.actions.has_action()
| 45.465116
| 145
| 0.64133
|
013084e88df75926ad4a96c91c654dadd67bbb90
| 1,661
|
py
|
Python
|
software/authbox/test_gpio_button.py
|
brianredbeard/makerspace-auth
|
79fab40be5455e29f5d596a44d0c4978f9112109
|
[
"Apache-2.0"
] | 1
|
2019-05-29T17:02:09.000Z
|
2019-05-29T17:02:09.000Z
|
software/authbox/test_gpio_button.py
|
brianredbeard/makerspace-auth
|
79fab40be5455e29f5d596a44d0c4978f9112109
|
[
"Apache-2.0"
] | null | null | null |
software/authbox/test_gpio_button.py
|
brianredbeard/makerspace-auth
|
79fab40be5455e29f5d596a44d0c4978f9112109
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for authbox.gpio_button"""
import sys
import unittest
from authbox.compat import queue
import authbox.gpio_button
from authbox import fake_gpio_for_testing
from RPi import GPIO
class BlinkTest(unittest.TestCase):
def setUp(self):
self.fake = fake_gpio_for_testing.FakeGPIO()
self.q = queue.Queue()
self.b = authbox.gpio_button.Button(self.q, 'b', '1', '2', on_down=self.on_down)
def on_down(self):
pass
def test_on(self):
self.b.on()
self.b.run_inner()
# 2 is output
self.fake.compare_log([(0, 2, True)])
# 1 is input
self.assertEqual(GPIO.FALLING, self.fake.events[1][0])
self.fake.events[1][1](None)
self.assertEqual(self.q.get(block=False), (self.on_down, self.b))
def test_blinking_thread(self):
# TODO: Improve this test to not take 1.5 seconds of wall time by faking
# Queue.get timeouts.
self.b.start()
self.b.blink()
for i in range(4):
self.b.run_inner()
self.fake.compare_log([
(0.0, 2, True), (0.5, 2, False), (1.0, 2, True), (1.5, 2, False)])
| 30.759259
| 84
| 0.697772
|
c426d8994cfa392438d78a5d06176a7198013a57
| 19,898
|
py
|
Python
|
manila/tests/share/test_share_types.py
|
cloudification-io/manila
|
9555e181f9e1b817b1072b986d35b46b59fb4e65
|
[
"Apache-2.0"
] | 1
|
2020-06-17T13:20:21.000Z
|
2020-06-17T13:20:21.000Z
|
manila/tests/share/test_share_types.py
|
viroel/manila
|
fbcabd2c03985000bd9b4d4d9a4478bc0b784efa
|
[
"Apache-2.0"
] | null | null | null |
manila/tests/share/test_share_types.py
|
viroel/manila
|
fbcabd2c03985000bd9b4d4d9a4478bc0b784efa
|
[
"Apache-2.0"
] | 1
|
2021-02-23T05:52:11.000Z
|
2021-02-23T05:52:11.000Z
|
# Copyright 2015 Deutsche Telekom AG. All rights reserved.
# Copyright 2015 Tom Barron. All rights reserved.
# Copyright 2015 Mirantis, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Share Type methods for Manila."""
import copy
import datetime
import itertools
from unittest import mock
import ddt
from oslo_utils import strutils
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import share_types
from manila import test
def create_share_type_dict(extra_specs=None):
return {
'fake_type': {
'name': 'fake1',
'extra_specs': extra_specs
}
}
def return_share_type_update(context, id, values):
name = values.get('name')
description = values.get('description')
is_public = values.get('is_public')
if id == '444':
raise exception.ShareTypeUpdateFailed(id=id)
else:
st_update = {
'created_at': datetime.datetime(2019, 9, 9, 14, 40, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': {u'gold': u'True'},
'required_extra_specs': {},
'id': id,
'name': name,
'is_public': is_public,
'description': description,
'updated_at': None
}
return st_update
@ddt.ddt
class ShareTypesTestCase(test.TestCase):
fake_type = {
'test': {
'created_at': datetime.datetime(2015, 1, 22, 11, 43, 24),
'deleted': '0',
'deleted_at': None,
'extra_specs': {},
'required_extra_specs': {},
'id': u'fooid-1',
'name': u'test',
'updated_at': None
}
}
fake_extra_specs = {u'gold': u'True'}
fake_share_type_id = u'fooid-2'
fake_type_w_extra = {
'test_with_extra': {
'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': u'test_with_extra',
'updated_at': None
}
}
fake_type_update = {
'test_type_update': {
'created_at': datetime.datetime(2019, 9, 9, 14, 40, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': {u'gold': u'True'},
'required_extra_specs': {},
'id': '888',
'name': 'new_name',
'is_public': True,
'description': 'new_description',
'updated_at': None
}
}
fake_r_extra_specs = {
u'gold': u'True',
u'driver_handles_share_servers': u'True'
}
fake_r_required_extra_specs = {
u'driver_handles_share_servers': u'True'
}
fake_r_type_extra = {
'test_with_extra': {
'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_r_extra_specs,
'required_extra_specs': fake_r_required_extra_specs,
'id': fake_share_type_id,
'name': u'test_with_extra',
'updated_at': None
}
}
fake_required_extra_specs = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true',
}
fake_optional_extra_specs = {
constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'true',
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'false',
constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT: 'false',
}
fake_type_w_valid_extra = {
'test_with_extra': {
'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_required_extra_specs,
'required_extra_specs': fake_required_extra_specs,
'id': u'fooid-2',
'name': u'test_with_extra',
'updated_at': None
}
}
fake_types = fake_type.copy()
fake_types.update(fake_type_w_extra)
fake_types.update(fake_type_w_valid_extra)
fake_share = {'id': u'fooid-1', 'share_type_id': fake_share_type_id}
def setUp(self):
super(ShareTypesTestCase, self).setUp()
self.context = context.get_admin_context()
@ddt.data({}, fake_type, fake_type_w_extra, fake_types)
def test_get_all_types(self, share_type):
self.mock_object(db,
'share_type_get_all',
mock.Mock(return_value=copy.deepcopy(share_type)))
returned_type = share_types.get_all_types(self.context)
self.assertItemsEqual(share_type, returned_type)
def test_get_all_types_search(self):
share_type = self.fake_type_w_extra
search_filter = {'extra_specs': {'gold': 'True'}, 'is_public': True}
self.mock_object(db,
'share_type_get_all',
mock.Mock(return_value=share_type))
returned_type = share_types.get_all_types(self.context,
search_opts=search_filter)
db.share_type_get_all.assert_called_once_with(
mock.ANY, 0, filters={'is_public': True})
self.assertItemsEqual(share_type, returned_type)
search_filter = {'extra_specs': {'gold': 'False'}}
expected_types = {}
returned_types = share_types.get_all_types(self.context,
search_opts=search_filter)
self.assertEqual(expected_types, returned_types)
share_type = self.fake_r_type_extra
search_filter = {'extra_specs': {'gold': 'True'}}
returned_type = share_types.get_all_types(self.context,
search_opts=search_filter)
self.assertItemsEqual(share_type, returned_type)
@ddt.data("nova", "supernova,nova", "supernova",
"nova,hypernova,supernova")
def test_get_all_types_search_by_availability_zone(self, search_azs):
all_share_types = {
'gold': {
'extra_specs': {
'somepoolcap': 'somevalue',
'availability_zones': 'nova,supernova,hypernova',
},
'required_extra_specs': {
'driver_handles_share_servers': True,
},
'id': '1e8f93a8-9669-4467-88a0-7b8229a9a609',
'name': u'gold-share-type',
'is_public': True,
},
'silver': {
'extra_specs': {
'somepoolcap': 'somevalue',
'availability_zones': 'nova,supernova',
},
'required_extra_specs': {
'driver_handles_share_servers': False,
},
'id': '39a7b9a8-8c76-4b49-aed3-60b718d54325',
'name': u'silver-share-type',
'is_public': True,
},
'bronze': {
'extra_specs': {
'somepoolcap': 'somevalue',
'availability_zones': 'milkyway,andromeda',
},
'required_extra_specs': {
'driver_handles_share_servers': True,
},
'id': '5a55a54d-6688-49b4-9344-bfc2d9634f70',
'name': u'bronze-share-type',
'is_public': True,
},
'default': {
'extra_specs': {
'somepoolcap': 'somevalue',
},
'required_extra_specs': {
'driver_handles_share_servers': True,
},
'id': '5a55a54d-6688-49b4-9344-bfc2d9634f70',
'name': u'bronze-share-type',
'is_public': True,
}
}
self.mock_object(
db, 'share_type_get_all', mock.Mock(return_value=all_share_types))
self.mock_object(share_types, 'get_valid_required_extra_specs')
search_opts = {
'extra_specs': {
'somepoolcap': 'somevalue',
'availability_zones': search_azs
},
'is_public': True,
}
returned_types = share_types.get_all_types(
self.context, search_opts=search_opts)
db.share_type_get_all.assert_called_once_with(
mock.ANY, 0, filters={'is_public': True})
expected_return_types = (['gold', 'silver', 'default']
if len(search_azs.split(',')) < 3
else ['gold', 'default'])
self.assertItemsEqual(expected_return_types, returned_types)
def test_get_share_type_extra_specs(self):
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
id = share_type['id']
extra_spec = share_types.get_share_type_extra_specs(id, key='gold')
self.assertEqual(share_type['extra_specs']['gold'], extra_spec)
extra_spec = share_types.get_share_type_extra_specs(id)
self.assertEqual(share_type['extra_specs'], extra_spec)
def test_get_extra_specs_from_share(self):
expected = self.fake_extra_specs
self.mock_object(share_types, 'get_share_type_extra_specs',
mock.Mock(return_value=expected))
spec_value = share_types.get_extra_specs_from_share(self.fake_share)
self.assertEqual(expected, spec_value)
share_types.get_share_type_extra_specs.assert_called_once_with(
self.fake_share_type_id)
def test_update_share_type(self):
expected = self.fake_type_update['test_type_update']
self.mock_object(db,
'share_type_update',
mock.Mock(side_effect=return_share_type_update))
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=expected))
new_name = "new_name"
new_description = "new_description"
is_public = True
self.assertRaises(exception.ShareTypeUpdateFailed, share_types.update,
self.context, id='444', name=new_name,
description=new_description, is_public=is_public)
share_types.update(self.context, '888', new_name,
new_description, is_public)
st_update = share_types.get_share_type(self.context, '888')
self.assertEqual(new_name, st_update['name'])
self.assertEqual(new_description, st_update['description'])
self.assertEqual(is_public, st_update['is_public'])
@ddt.data({}, {"fake": "fake"})
def test_create_without_required_extra_spec(self, optional_specs):
specs = copy.copy(self.fake_required_extra_specs)
del specs['driver_handles_share_servers']
specs.update(optional_specs)
self.assertRaises(exception.InvalidShareType, share_types.create,
self.context, "fake_share_type", specs)
@ddt.data({"snapshot_support": "fake"})
def test_create_with_invalid_optional_extra_spec(self, optional_specs):
specs = copy.copy(self.fake_required_extra_specs)
specs.update(optional_specs)
self.assertRaises(exception.InvalidShareType, share_types.create,
self.context, "fake_share_type", specs)
def test_get_required_extra_specs(self):
result = share_types.get_required_extra_specs()
self.assertEqual(constants.ExtraSpecs.REQUIRED, result)
def test_get_optional_extra_specs(self):
result = share_types.get_optional_extra_specs()
self.assertEqual(constants.ExtraSpecs.OPTIONAL, result)
def test_get_tenant_visible_extra_specs(self):
result = share_types.get_tenant_visible_extra_specs()
self.assertEqual(constants.ExtraSpecs.TENANT_VISIBLE, result)
def test_get_boolean_extra_specs(self):
result = share_types.get_boolean_extra_specs()
self.assertEqual(constants.ExtraSpecs.BOOLEAN, result)
def test_is_valid_required_extra_spec_other(self):
actual_result = share_types.is_valid_required_extra_spec(
'fake', 'fake')
self.assertIsNone(actual_result)
@ddt.data(*itertools.product(
constants.ExtraSpecs.REQUIRED,
strutils.TRUE_STRINGS + strutils.FALSE_STRINGS))
@ddt.unpack
def test_is_valid_required_extra_spec_valid(self, key, value):
actual_result = share_types.is_valid_required_extra_spec(key, value)
self.assertTrue(actual_result)
@ddt.data('invalid', {}, '0000000000')
def test_is_valid_required_extra_spec_invalid(self, value):
key = constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS
actual_result = share_types.is_valid_required_extra_spec(key, value)
self.assertFalse(actual_result)
@ddt.data({},
{'another_key': True})
def test_get_valid_required_extra_specs_valid(self, optional_specs):
specs = copy.copy(self.fake_required_extra_specs)
specs.update(optional_specs)
actual_result = share_types.get_valid_required_extra_specs(specs)
self.assertEqual(self.fake_required_extra_specs, actual_result)
@ddt.data(None,
{},
{constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'fake'})
def test_get_valid_required_extra_specs_invalid(self, extra_specs):
self.assertRaises(exception.InvalidExtraSpec,
share_types.get_valid_required_extra_specs,
extra_specs)
@ddt.data(*(
list(itertools.product(
(constants.ExtraSpecs.SNAPSHOT_SUPPORT,
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT,
constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT,
constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT),
strutils.TRUE_STRINGS + strutils.FALSE_STRINGS)) +
list(itertools.product(
(constants.ExtraSpecs.REPLICATION_TYPE_SPEC,),
constants.ExtraSpecs.REPLICATION_TYPES)) +
[(constants.ExtraSpecs.AVAILABILITY_ZONES, 'zone a, zoneb$c'),
(constants.ExtraSpecs.AVAILABILITY_ZONES, ' zonea, zoneb'),
(constants.ExtraSpecs.AVAILABILITY_ZONES, 'zone1')]
))
@ddt.unpack
def test_is_valid_optional_extra_spec_valid(self, key, value):
result = share_types.is_valid_optional_extra_spec(key, value)
self.assertTrue(result)
def test_is_valid_optional_extra_spec_valid_unknown_key(self):
result = share_types.is_valid_optional_extra_spec('fake', 'fake')
self.assertIsNone(result)
def test_get_valid_optional_extra_specs(self):
extra_specs = copy.copy(self.fake_required_extra_specs)
extra_specs.update(self.fake_optional_extra_specs)
extra_specs.update({'fake': 'fake'})
result = share_types.get_valid_optional_extra_specs(extra_specs)
self.assertEqual(self.fake_optional_extra_specs, result)
def test_get_valid_optional_extra_specs_empty(self):
result = share_types.get_valid_optional_extra_specs({})
self.assertEqual({}, result)
@ddt.data({constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'fake'},
{constants.ExtraSpecs.AVAILABILITY_ZONES: 'ZoneA,'})
def test_get_valid_optional_extra_specs_invalid(self, extra_specs):
self.assertRaises(exception.InvalidExtraSpec,
share_types.get_valid_optional_extra_specs,
extra_specs)
@ddt.data(' az 1, az2 ,az 3 ', 'az 1,az2,az 3 ', None)
def test_sanitize_extra_specs(self, spec_value):
extra_specs = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'True',
constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'True',
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'False'
}
expected_specs = copy.copy(extra_specs)
if spec_value is not None:
extra_specs[constants.ExtraSpecs.AVAILABILITY_ZONES] = spec_value
expected_specs['availability_zones'] = 'az 1,az2,az 3'
self.assertDictMatch(expected_specs,
share_types.sanitize_extra_specs(extra_specs))
def test_add_access(self):
project_id = '456'
extra_specs = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true',
constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'true',
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'false',
}
share_type = share_types.create(self.context, 'type1', extra_specs)
share_type_id = share_type.get('id')
share_types.add_share_type_access(self.context, share_type_id,
project_id)
stype_access = db.share_type_access_get_all(self.context,
share_type_id)
self.assertIn(project_id, [a.project_id for a in stype_access])
def test_add_access_invalid(self):
self.assertRaises(exception.InvalidShareType,
share_types.add_share_type_access,
'fake', None, 'fake')
def test_remove_access(self):
project_id = '456'
extra_specs = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true',
constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'true',
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'false',
}
share_type = share_types.create(
self.context, 'type1', projects=['456'], extra_specs=extra_specs)
share_type_id = share_type.get('id')
share_types.remove_share_type_access(self.context, share_type_id,
project_id)
stype_access = db.share_type_access_get_all(self.context,
share_type_id)
self.assertNotIn(project_id, stype_access)
def test_remove_access_invalid(self):
self.assertRaises(exception.InvalidShareType,
share_types.remove_share_type_access,
'fake', None, 'fake')
@ddt.data({'spec_value': '<is> True', 'expected': True},
{'spec_value': '<is>true', 'expected': True},
{'spec_value': '<is> False', 'expected': False},
{'spec_value': '<is>false', 'expected': False},
{'spec_value': u' <is> FaLsE ', 'expected': False})
@ddt.unpack
def test_parse_boolean_extra_spec(self, spec_value, expected):
result = share_types.parse_boolean_extra_spec('fake_key', spec_value)
self.assertEqual(expected, result)
@ddt.data('<isnt> True', '<is> Wrong', None, 5)
def test_parse_boolean_extra_spec_invalid(self, spec_value):
self.assertRaises(exception.InvalidExtraSpec,
share_types.parse_boolean_extra_spec,
'fake_key',
spec_value)
| 37.685606
| 78
| 0.609006
|
408169bd7883644a582a34e30466ad45bb3f9d9e
| 170
|
py
|
Python
|
sql/get_sql_table.py
|
JWen00/sthAwesome_19T2
|
8080a93a1945d7f118b4f06ee1edef075cd4a5b5
|
[
"MIT"
] | null | null | null |
sql/get_sql_table.py
|
JWen00/sthAwesome_19T2
|
8080a93a1945d7f118b4f06ee1edef075cd4a5b5
|
[
"MIT"
] | null | null | null |
sql/get_sql_table.py
|
JWen00/sthAwesome_19T2
|
8080a93a1945d7f118b4f06ee1edef075cd4a5b5
|
[
"MIT"
] | null | null | null |
import sqlite3
f = 'sqldemo.db'
conn = sqlite3.connect(f)
c = conn.cursor()
def get_table():
def set_availability(id, availability):
conn.commit()
| 13.076923
| 40
| 0.635294
|
a78ee5793d13ef1638d619e5286a55e593f5ca86
| 3,896
|
py
|
Python
|
research_2.py
|
TeamTitanz/ClassVectorRepresentation
|
7e4eadd90d0a93e60af26d9313eff06e8088f57b
|
[
"Apache-2.0"
] | null | null | null |
research_2.py
|
TeamTitanz/ClassVectorRepresentation
|
7e4eadd90d0a93e60af26d9313eff06e8088f57b
|
[
"Apache-2.0"
] | null | null | null |
research_2.py
|
TeamTitanz/ClassVectorRepresentation
|
7e4eadd90d0a93e60af26d9313eff06e8088f57b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import cPickle
from os import walk
import random
import math
from sklearn.cluster import KMeans
from sklearn import svm
from sklearn.linear_model import Perceptron
def calculateInputs(vectorList):
del vectorList[-1]
del vectorList[0]
X = np.array(vectorList)
vectorCount = len(vectorList)
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
cluster_1 = []
cluster_2 = []
for i in range(vectorCount):
if(labels[i] == 0):
cluster_1.append(vectorList[i])
else:
cluster_2.append(vectorList[i])
clf = svm.SVC(gamma = 0.001, C=100, kernel='linear')
clf.fit(X,labels)
avgCluster_1Vec = []
for i in range(300):
totalComponentValue = 0
for j in range(len(cluster_1)):
totalComponentValue += cluster_1[j][i]
avgComponentValue = totalComponentValue/float(len(cluster_1))
avgCluster_1Vec.append(avgComponentValue)
avgCluster_2Vec = []
for i in range(300):
totalComponentValue = 0
for j in range(len(cluster_2)):
totalComponentValue += cluster_2[j][i]
avgComponentValue = totalComponentValue/float(len(cluster_2))
avgCluster_2Vec.append(avgComponentValue)
avgInstanceVec = []
for i in range(300):
totalComponentValue = 0
for j in range(vectorCount):
totalComponentValue += vectorList[j][i]
avgComponentValue = totalComponentValue/float(vectorCount)
avgInstanceVec.append(avgComponentValue)
avgSVs = []
for i in range(300):
totalComponentValue = 0
for j in range(len(clf.support_vectors_)):
totalComponentValue += clf.support_vectors_[j][i]
avgComponentValue = totalComponentValue/float(len(clf.support_vectors_))
avgSVs.append(avgComponentValue)
medianVector = []
minDistance = 10000000
for i in range(vectorCount):
differenceVec = 0
for j in range(300):
differenceVec += (vectorList[i][j] - avgInstanceVec[j])**2
distance = math.sqrt(differenceVec)
if(minDistance > distance):
minDistance = distance
medianVector = vectorList[i]
medianVectorToReturn = []
for i in range(300):
medianVectorToReturn.append(medianVector[i])
return avgSVs, avgInstanceVec, avgCluster_1Vec, avgCluster_2Vec, medianVectorToReturn
vectorFileNames = filenames = next(walk('E:\MoraHack\FYP\Vectors\Final Law vectors'))[2]
finalAvgSVs = []
finalAvgInstanceVec = []
finalAvgCluster_1Vec = []
finalAvgCluster_2Vec = []
finalMedianVector = []
for i in range(12):
if(i != 6):
vectorList = cPickle.load(open(vectorFileNames[i], 'rb'))
avgSVs, avgInstanceVec, avgCluster_1Vec, avgCluster_2Vec, medianVector = calculateInputs(vectorList)
finalAvgSVs += avgSVs
finalAvgInstanceVec += avgInstanceVec
finalAvgCluster_1Vec += avgCluster_1Vec
finalAvgCluster_2Vec += avgCluster_2Vec
finalMedianVector += medianVector
# Data
d = np.array([finalAvgSVs, finalAvgInstanceVec, finalAvgCluster_1Vec, finalAvgCluster_2Vec, finalMedianVector])
# Labels
classVectors = cPickle.load(open('../classes_vectors.p', 'rb'))
del classVectors[-1]
finalLabels=[]
for i in range(11):
label = np.array(classVectors[i])
newLabel = []
for j in range(300):
temp = label[j]
newLabel.append(temp)
finalLabels += newLabel
# rotate the data 180 degrees
d90 = np.rot90(d)
d90 = np.rot90(d90)
d90 = np.rot90(d90)
df1 = pd.DataFrame(d90)
df2 = pd.DataFrame(finalLabels)
df3 = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1.to_csv("perceptron_input.csv")
df2.to_csv("perceptron_output.csv")
df3.to_csv("perceptron_combined.csv")
| 26.868966
| 111
| 0.669405
|
3e20f42c685a9408ebee510711a2d1cd5944c318
| 13,007
|
py
|
Python
|
cynetworkx/classes/coreviews.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 12
|
2019-07-23T08:07:53.000Z
|
2022-03-09T06:13:16.000Z
|
cynetworkx/classes/coreviews.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 7
|
2019-08-30T07:00:00.000Z
|
2021-12-30T08:02:56.000Z
|
cynetworkx/classes/coreviews.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 5
|
2020-10-10T03:40:32.000Z
|
2021-11-23T12:28:53.000Z
|
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg (hagberg@lanl.gov),
# Pieter Swart (swart@lanl.gov),
# Dan Schult(dschult@colgate.edu)
"""
"""
from collections import Mapping
import cynetworkx as nx
__all__ = ['AtlasView', 'AdjacencyView', 'MultiAdjacencyView',
'UnionAtlas', 'UnionAdjacency',
'UnionMultiInner', 'UnionMultiAdjacency',
'FilterAtlas', 'FilterAdjacency',
'FilterMultiInner', 'FilterMultiAdjacency',
'ReadOnlyGraph',
]
class AtlasView(Mapping):
"""An AtlasView is a Read-only Mapping of Mappings.
It is a View into a dict-of-dict data structure.
The inner level of dict is read-write. But the
outer level is read-only.
See Also
========
AdjacencyView - View into dict-of-dict-of-dict
MultiAdjacencyView - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ('_atlas',)
def __getstate__(self):
return {'_atlas': self._atlas}
def __setstate__(self, state):
self._atlas = state['_atlas']
def __init__(self, d):
self._atlas = d
def __len__(self):
return len(self._atlas)
def __iter__(self):
return iter(self._atlas)
def __getitem__(self, key):
return self._atlas[key]
def copy(self):
return {n: self[n].copy() for n in self._atlas}
def __str__(self):
return str(self._atlas) # {nbr: self[nbr] for nbr in self})
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._atlas)
class AdjacencyView(AtlasView):
"""An AdjacencyView is a Read-only Map of Maps of Maps.
It is a View into a dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView - View into dict-of-dict
MultiAdjacencyView - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = () # Still uses AtlasView slots names _atlas
def __getitem__(self, name):
return AtlasView(self._atlas[name])
def copy(self):
return {n: self[n].copy() for n in self._atlas}
class MultiAdjacencyView(AdjacencyView):
"""An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.
It is a View into a dict-of-dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView - View into dict-of-dict
AdjacencyView - View into dict-of-dict-of-dict
"""
__slots__ = () # Still uses AtlasView slots names _atlas
def __getitem__(self, name):
return AdjacencyView(self._atlas[name])
def copy(self):
return {n: self[n].copy() for n in self._atlas}
class UnionAtlas(Mapping):
"""A read-only union of two atlases (dict-of-dict).
The two dict-of-dicts represent the inner dict of
an Adjacency: `G.succ[node]` and `G.pred[node]`.
The inner level of dict of both hold attribute key:value
pairs and is read-write. But the outer level is read-only.
See Also
========
UnionAdjacency - View into dict-of-dict-of-dict
UnionMultiAdjacency - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ('_succ', '_pred')
def __getstate__(self):
return {'_succ': self._succ, '_pred': self._pred}
def __setstate__(self, state):
self._succ = state['_succ']
self._pred = state['_pred']
def __init__(self, succ, pred):
self._succ = succ
self._pred = pred
def __len__(self):
return len(self._succ) + len(self._pred)
def __iter__(self):
return iter(set(self._succ.keys()) | set(self._pred.keys()))
def __getitem__(self, key):
try:
return self._succ[key]
except KeyError:
return self._pred[key]
def copy(self):
result = {nbr: dd.copy() for nbr, dd in self._succ.items()}
for nbr, dd in self._pred.items():
if nbr in result:
result[nbr].update(dd)
else:
result[nbr] = dd.copy()
return result
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self._succ, self._pred)
class UnionAdjacency(Mapping):
"""A read-only union of dict Adjacencies as a Map of Maps of Maps.
The two input dict-of-dict-of-dicts represent the union of
`G.succ` and `G.pred`. Return values are UnionAtlas
The inner level of dict is read-write. But the
middle and outer levels are read-only.
succ : a dict-of-dict-of-dict {node: nbrdict}
pred : a dict-of-dict-of-dict {node: nbrdict}
The keys for the two dicts should be the same
See Also
========
UnionAtlas - View into dict-of-dict
UnionMultiAdjacency - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ('_succ', '_pred')
def __getstate__(self):
return {'_succ': self._succ, '_pred': self._pred}
def __setstate__(self, state):
self._succ = state['_succ']
self._pred = state['_pred']
def __init__(self, succ, pred):
# keys must be the same for two input dicts
assert(len(set(succ.keys()) ^ set(pred.keys())) == 0)
self._succ = succ
self._pred = pred
def __len__(self):
return len(self._succ) # length of each dict should be the same
def __iter__(self):
return iter(self._succ)
def __getitem__(self, nbr):
return UnionAtlas(self._succ[nbr], self._pred[nbr])
def copy(self):
return {n: self[n].copy() for n in self._succ}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self._succ, self._pred)
class UnionMultiInner(UnionAtlas):
"""A read-only union of two inner dicts of MultiAdjacencies.
The two input dict-of-dict-of-dicts represent the union of
`G.succ[node]` and `G.pred[node]` for MultiDiGraphs.
Return values are UnionAtlas.
The inner level of dict is read-write. But the outer levels are read-only.
See Also
========
UnionAtlas - View into dict-of-dict
UnionAdjacency - View into dict-of-dict-of-dict
UnionMultiAdjacency - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = () # Still uses UnionAtlas slots names _succ, _pred
def __getitem__(self, node):
in_succ = node in self._succ
in_pred = node in self._pred
if in_succ:
if in_pred:
return UnionAtlas(self._succ[node], self._pred[node])
return UnionAtlas(self._succ[node], {})
return UnionAtlas({}, self._pred[node])
def copy(self):
nodes = set(self._succ.keys()) | set(self._pred.keys())
return {n: self[n].copy() for n in nodes}
class UnionMultiAdjacency(UnionAdjacency):
"""A read-only union of two dict MultiAdjacencies.
The two input dict-of-dict-of-dict-of-dicts represent the union of
`G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency.
The inner level of dict is read-write. But the outer levels are read-only.
See Also
========
UnionAtlas - View into dict-of-dict
UnionMultiInner - View into dict-of-dict-of-dict
"""
__slots__ = () # Still uses UnionAdjacency slots names _succ, _pred
def __getitem__(self, node):
return UnionMultiInner(self._succ[node], self._pred[node])
class ReadOnlyGraph(object):
"""A Mixin Class to mask the write methods of a graph class."""
def not_allowed(self, *args, **kwds):
msg = "SubGraph Views are readonly. Mutations not allowed"
raise nx.NetworkXError(msg)
add_node = not_allowed
remove_node = not_allowed
add_nodes_from = not_allowed
remove_nodes_from = not_allowed
add_edge = not_allowed
remove_edge = not_allowed
add_edges_from = not_allowed
add_weighted_edges_from = not_allowed
remove_edges_from = not_allowed
clear = not_allowed
class FilterAtlas(Mapping): # nodedict, nbrdict, keydict
def __init__(self, d, NODE_OK):
self._atlas = d
self.NODE_OK = NODE_OK
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
if hasattr(self.NODE_OK, 'nodes'):
return (n for n in self.NODE_OK.nodes if n in self._atlas)
return (n for n in self._atlas if self.NODE_OK(n))
def __getitem__(self, key):
if key in self._atlas and self.NODE_OK(key):
return self._atlas[key]
raise KeyError("Key {} not found".format(key))
def copy(self):
if hasattr(self.NODE_OK, 'nodes'):
return {u: self._atlas[u] for u in self.NODE_OK.nodes
if u in self._atlas}
return {u: d for u, d in self._atlas.items()
if self.NODE_OK(u)}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self._atlas,
self.NODE_OK)
class FilterAdjacency(Mapping): # edgedict
def __init__(self, d, NODE_OK, EDGE_OK):
self._atlas = d
self.NODE_OK = NODE_OK
self.EDGE_OK = EDGE_OK
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
if hasattr(self.NODE_OK, 'nodes'):
return (n for n in self.NODE_OK.nodes if n in self._atlas)
return (n for n in self._atlas if self.NODE_OK(n))
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def new_node_ok(nbr):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr)
return FilterAtlas(self._atlas[node], new_node_ok)
raise KeyError("Key {} not found".format(node))
def copy(self):
if hasattr(self.NODE_OK, 'nodes'):
return {u: {v: d for v, d in self._atlas[u].items()
if self.NODE_OK(v) if self.EDGE_OK(u, v)}
for u in self.NODE_OK.nodes if u in self._atlas}
return {u: {v: d for v, d in nbrs.items() if self.NODE_OK(v)
if self.EDGE_OK(u, v)}
for u, nbrs in self._atlas.items()
if self.NODE_OK(u)}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._atlas,
self.NODE_OK, self.EDGE_OK)
class FilterMultiInner(FilterAdjacency): # muliedge_seconddict
def __iter__(self):
if hasattr(self.NODE_OK, 'nodes'):
my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas)
else:
my_nodes = (n for n in self._atlas if self.NODE_OK(n))
for n in my_nodes:
some_keys_ok = False
for key in self._atlas[n]:
if self.EDGE_OK(n, key):
some_keys_ok = True
break
if some_keys_ok is True:
yield n
def __getitem__(self, nbr):
if nbr in self._atlas and self.NODE_OK(nbr):
def new_node_ok(key):
return self.EDGE_OK(nbr, key)
return FilterAtlas(self._atlas[nbr], new_node_ok)
raise KeyError("Key {} not found".format(nbr))
def copy(self):
if hasattr(self.NODE_OK, 'nodes'):
return {v: {k: d for k, d in self._atlas[v].items()
if self.EDGE_OK(v, k)}
for v in self.NODE_OK.nodes if v in self._atlas}
return {v: {k: d for k, d in nbrs.items() if self.EDGE_OK(v, k)}
for v, nbrs in self._atlas.items() if self.NODE_OK(v)}
class FilterMultiAdjacency(FilterAdjacency): # multiedgedict
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def edge_ok(nbr, key):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)
return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)
raise KeyError("Key {} not found".format(node))
def copy(self):
if hasattr(self.NODE_OK, 'nodes'):
my_nodes = self.NODE_OK.nodes
return {u: {v: {k: d for k, d in kd.items()
if self.EDGE_OK(u, v, k)}
for v, kd in self._atlas[u].items() if v in my_nodes}
for u in my_nodes if u in self._atlas}
return {u: {v: {k: d for k, d in kd.items()
if self.EDGE_OK(u, v, k)}
for v, kd in nbrs.items() if self.NODE_OK(v)}
for u, nbrs in self._atlas.items() if self.NODE_OK(u)}
| 32.116049
| 79
| 0.605213
|
a6252474876bf33e54b3278c767a278d7ae621ee
| 6,225
|
py
|
Python
|
statham/schema/elements/meta.py
|
george-fry/statham-schema
|
19aa64de8750001cbc24f2775e0684f2298f840f
|
[
"MIT"
] | 23
|
2020-06-25T15:55:29.000Z
|
2022-03-31T16:51:40.000Z
|
statham/schema/elements/meta.py
|
george-fry/statham-schema
|
19aa64de8750001cbc24f2775e0684f2298f840f
|
[
"MIT"
] | 25
|
2020-02-29T15:32:35.000Z
|
2022-03-03T17:22:45.000Z
|
statham/schema/elements/meta.py
|
george-fry/statham-schema
|
19aa64de8750001cbc24f2775e0684f2298f840f
|
[
"MIT"
] | 5
|
2020-10-18T19:14:32.000Z
|
2022-03-09T10:40:41.000Z
|
import inspect
import keyword
from typing import Any, cast, Dict, List, Tuple, Type, Union
from statham.schema.constants import Maybe, NotPassed
from statham.schema.elements.base import Element
from statham.schema.property import _Property
from statham.schema.exceptions import SchemaDefinitionError
from statham.schema.validation import (
AdditionalProperties,
Const,
Dependencies,
Enum,
InstanceOf,
MaxProperties,
MinProperties,
PropertyNames,
Required,
Validator,
)
RESERVED_PROPERTIES = dir(object) + list(keyword.kwlist) + ["_dict"]
class ObjectClassDict(dict):
"""Overriden class dictionary for the metaclass of Object.
Collects schema properties and default value if present.
"""
properties: Dict
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.properties = {}
def __setitem__(self, key, value):
if key in RESERVED_PROPERTIES and isinstance(value, _Property):
raise SchemaDefinitionError.reserved_attribute(key)
if isinstance(value, _Property):
return self.properties.__setitem__(key, value)
return super().__setitem__(key, value)
class ObjectMeta(type, Element):
"""Metaclass to allow declaring Object schemas as classes.
Collects default value and properties defined as class variables,
and binds information to those properties.
"""
additionalProperties: Union[Element, bool]
patternProperties: Maybe[Dict[str, Element]]
minProperties: Maybe[int]
maxProperties: Maybe[int]
propertyNames: Maybe[Element]
const: Maybe[Any]
enum: Maybe[List[Any]]
dependencies: Maybe[Dict[str, Union[List[str], Element]]]
@staticmethod
def __subclasses__():
# This is overriden to prevent errors.
# TODO: Is there a more elegant way to achieve this? Perhaps
# __init_subclass__ should error to prevent this from being
# wrong.
return []
@classmethod
def __prepare__(mcs, _name, _bases, **_kwargs):
return ObjectClassDict()
# pylint: disable=too-many-locals
def __new__(
mcs,
name: str,
bases: Tuple[Type],
classdict: ObjectClassDict,
*,
default: Maybe[Any] = NotPassed(),
const: Maybe[Any] = NotPassed(),
enum: Maybe[List[Any]] = NotPassed(),
required: Maybe[List[str]] = NotPassed(),
minProperties: Maybe[int] = NotPassed(),
maxProperties: Maybe[int] = NotPassed(),
patternProperties: Maybe[Dict[str, Element]] = NotPassed(),
additionalProperties: Maybe[Union[Element, bool]] = NotPassed(),
propertyNames: Maybe[Element] = NotPassed(),
dependencies: Maybe[Dict[str, Union[List[str], Element]]] = NotPassed(),
):
cls: ObjectMeta = cast(
ObjectMeta, type.__new__(mcs, name, bases, dict(classdict))
)
previous = lambda attr, default: getattr(cls, attr, default)
get_value = (
lambda value, attr: value
if not isinstance(value, NotPassed)
else previous(attr, NotPassed())
)
cls.default = get_value(default, "default")
cls.const = get_value(const, "const")
cls.enum = get_value(enum, "enum")
cls.required = get_value(required, "required")
# https://github.com/python/mypy/issues/3004
cls.properties = { # type: ignore
**{
attr: prop.clone()
for attr, prop in previous("properties", {}).items()
},
**classdict.properties,
}
cls.minProperties = get_value(minProperties, "minProperties")
cls.maxProperties = get_value(maxProperties, "maxProperties")
cls.patternProperties = get_value(
patternProperties, "patternProperties"
)
cls.additionalProperties = (
additionalProperties
if not isinstance(additionalProperties, NotPassed)
else previous("additionalProperties", True)
)
cls.propertyNames = get_value(propertyNames, "propertyNames")
cls.dependencies = get_value(dependencies, "dependencies")
return cls
def __hash__(cls):
return hash(tuple([cls.__name__] + list(cls.properties or [])))
@property
def annotation(cls) -> str:
return cls.__name__
def __repr__(cls):
return cls.__name__
@property
def type_validator(cls) -> Validator:
return InstanceOf(dict, cls)
@property
def validators(cls) -> List[Validator]:
possible_validators = [
cls.type_validator,
Required.from_element(cls),
AdditionalProperties(cls.__properties__),
MinProperties.from_element(cls),
MaxProperties.from_element(cls),
PropertyNames.from_element(cls),
Const.from_element(cls),
Enum.from_element(cls),
Dependencies.from_element(cls),
]
return [validator for validator in possible_validators if validator]
def python(cls) -> str:
super_cls = next(iter(cls.mro()[1:]))
cls_args = [super_cls.__name__]
parameters = list(
inspect.signature(type(cls).__new__).parameters.values()
)
for param in parameters:
if param.kind != param.KEYWORD_ONLY:
continue
value = getattr(cls, param.name, NotPassed())
if value == param.default or (
param.name == "additionalProperties" and value is True
):
continue
cls_args.append(f"{param.name}={repr(value)}")
class_def = f"""class {repr(cls)}({', '.join(cls_args)}):
"""
if not cls.properties:
class_def = (
class_def
+ """
pass
"""
)
# False positive
# pylint: disable=no-member
for property_ in cast(
Dict[str, _Property], cls.properties or {}
).values():
class_def = (
class_def
+ f"""
{property_.python()}
"""
)
return class_def
| 32.421875
| 80
| 0.609157
|
504a2ebc6904df41b749f89e264a7da9f7113fd5
| 11,481
|
py
|
Python
|
ormar/queryset/clause.py
|
philipp-leanix/ormar
|
e5538852218b93b6b10f4732128accaa2e3f45e0
|
[
"MIT"
] | null | null | null |
ormar/queryset/clause.py
|
philipp-leanix/ormar
|
e5538852218b93b6b10f4732128accaa2e3f45e0
|
[
"MIT"
] | null | null | null |
ormar/queryset/clause.py
|
philipp-leanix/ormar
|
e5538852218b93b6b10f4732128accaa2e3f45e0
|
[
"MIT"
] | null | null | null |
import itertools
from dataclasses import dataclass
from enum import Enum
from typing import Any, Generator, List, TYPE_CHECKING, Tuple, Type
import sqlalchemy
import ormar # noqa I100
from ormar.queryset.actions.filter_action import FilterAction
from ormar.queryset.utils import get_relationship_alias_model_and_str
if TYPE_CHECKING: # pragma no cover
from ormar import Model
class FilterType(Enum):
AND = 1
OR = 2
class FilterGroup:
"""
Filter groups are used in complex queries condition to group and and or
clauses in where condition
"""
def __init__(
self, *args: Any, _filter_type: FilterType = FilterType.AND, **kwargs: Any,
) -> None:
self.filter_type = _filter_type
self.exclude = False
self._nested_groups: List["FilterGroup"] = list(args)
self._resolved = False
self.is_source_model_filter = False
self._kwargs_dict = kwargs
self.actions: List[FilterAction] = []
def resolve(
self,
model_cls: Type["Model"],
select_related: List = None,
filter_clauses: List = None,
) -> Tuple[List[FilterAction], List[str]]:
"""
Resolves the FilterGroups actions to use proper target model, replace
complex relation prefixes if needed and nested groups also resolved.
:param model_cls: model from which the query is run
:type model_cls: Type["Model"]
:param select_related: list of models to join
:type select_related: List[str]
:param filter_clauses: list of filter conditions
:type filter_clauses: List[FilterAction]
:return: list of filter conditions and select_related list
:rtype: Tuple[List[FilterAction], List[str]]
"""
select_related = select_related if select_related is not None else []
filter_clauses = filter_clauses if filter_clauses is not None else []
qryclause = QueryClause(
model_cls=model_cls,
select_related=select_related,
filter_clauses=filter_clauses,
)
own_filter_clauses, select_related = qryclause.prepare_filter(
_own_only=True, **self._kwargs_dict
)
self.actions = own_filter_clauses
filter_clauses = filter_clauses + own_filter_clauses
self._resolved = True
if self._nested_groups:
for group in self._nested_groups:
(filter_clauses, select_related) = group.resolve(
model_cls=model_cls,
select_related=select_related,
filter_clauses=filter_clauses,
)
return filter_clauses, select_related
def _iter(self) -> Generator:
"""
Iterates all actions in a tree
:return: generator yielding from own actions and nested groups
:rtype: Generator
"""
for group in self._nested_groups:
yield from group._iter()
yield from self.actions
def _get_text_clauses(self) -> List[sqlalchemy.sql.expression.TextClause]:
"""
Helper to return list of text queries from actions and nested groups
:return: list of text queries from actions and nested groups
:rtype: List[sqlalchemy.sql.elements.TextClause]
"""
return [x.get_text_clause() for x in self._nested_groups] + [
x.get_text_clause() for x in self.actions
]
def get_text_clause(self) -> sqlalchemy.sql.expression.TextClause:
"""
Returns all own actions and nested groups conditions compiled and joined
inside parentheses.
Escapes characters if it's required.
Substitutes values of the models if value is a ormar Model with its pk value.
Compiles the clause.
:return: complied and escaped clause
:rtype: sqlalchemy.sql.elements.TextClause
"""
if self.filter_type == FilterType.AND:
clause = sqlalchemy.text(
"( " + str(sqlalchemy.sql.and_(*self._get_text_clauses())) + " )"
)
else:
clause = sqlalchemy.text(
"( " + str(sqlalchemy.sql.or_(*self._get_text_clauses())) + " )"
)
return clause
def or_(*args: FilterGroup, **kwargs: Any) -> FilterGroup:
"""
Construct or filter from nested groups and keyword arguments
:param args: nested filter groups
:type args: Tuple[FilterGroup]
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: FilterGroup ready to be resolved
:rtype: ormar.queryset.clause.FilterGroup
"""
return FilterGroup(_filter_type=FilterType.OR, *args, **kwargs)
def and_(*args: FilterGroup, **kwargs: Any) -> FilterGroup:
"""
Construct and filter from nested groups and keyword arguments
:param args: nested filter groups
:type args: Tuple[FilterGroup]
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: FilterGroup ready to be resolved
:rtype: ormar.queryset.clause.FilterGroup
"""
return FilterGroup(_filter_type=FilterType.AND, *args, **kwargs)
@dataclass
class Prefix:
source_model: Type["Model"]
table_prefix: str
model_cls: Type["Model"]
relation_str: str
is_through: bool
@property
def alias_key(self) -> str:
source_model_name = self.source_model.get_name()
return f"{source_model_name}_" f"{self.relation_str}"
class QueryClause:
"""
Constructs FilterActions from strings passed as arguments
"""
def __init__(
self, model_cls: Type["Model"], filter_clauses: List, select_related: List,
) -> None:
self._select_related = select_related[:]
self.filter_clauses = filter_clauses[:]
self.model_cls = model_cls
self.table = self.model_cls.Meta.table
def prepare_filter( # noqa: A003
self, _own_only: bool = False, **kwargs: Any
) -> Tuple[List[FilterAction], List[str]]:
"""
Main external access point that processes the clauses into sqlalchemy text
clauses and updates select_related list with implicit related tables
mentioned in select_related strings but not included in select_related.
:param _own_only:
:type _own_only:
:param kwargs: key, value pair with column names and values
:type kwargs: Any
:return: Tuple with list of where clauses and updated select_related list
:rtype: Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]]
"""
if kwargs.get("pk"):
pk_name = self.model_cls.get_column_alias(self.model_cls.Meta.pkname)
kwargs[pk_name] = kwargs.pop("pk")
filter_clauses, select_related = self._populate_filter_clauses(
_own_only=_own_only, **kwargs
)
return filter_clauses, select_related
def _populate_filter_clauses(
self, _own_only: bool, **kwargs: Any
) -> Tuple[List[FilterAction], List[str]]:
"""
Iterates all clauses and extracts used operator and field from related
models if needed. Based on the chain of related names the target table
is determined and the final clause is escaped if needed and compiled.
:param kwargs: key, value pair with column names and values
:type kwargs: Any
:return: Tuple with list of where clauses and updated select_related list
:rtype: Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]]
"""
filter_clauses = self.filter_clauses
own_filter_clauses = []
select_related = list(self._select_related)
for key, value in kwargs.items():
filter_action = FilterAction(
filter_str=key, value=value, model_cls=self.model_cls
)
select_related = filter_action.update_select_related(
select_related=select_related
)
own_filter_clauses.append(filter_action)
self._register_complex_duplicates(select_related)
filter_clauses = self._switch_filter_action_prefixes(
filter_clauses=filter_clauses + own_filter_clauses
)
if _own_only:
return own_filter_clauses, select_related
return filter_clauses, select_related
def _register_complex_duplicates(self, select_related: List[str]) -> None:
"""
Checks if duplicate aliases are presented which can happen in self relation
or when two joins end with the same pair of models.
If there are duplicates, the all duplicated joins are registered as source
model and whole relation key (not just last relation name).
:param select_related: list of relation strings
:type select_related: List[str]
:return: None
:rtype: None
"""
prefixes = self._parse_related_prefixes(select_related=select_related)
manager = self.model_cls.Meta.alias_manager
filtered_prefixes = sorted(prefixes, key=lambda x: x.table_prefix)
grouped = itertools.groupby(filtered_prefixes, key=lambda x: x.table_prefix)
for _, group in grouped:
sorted_group = sorted(
group, key=lambda x: len(x.relation_str), reverse=True
)
for prefix in sorted_group[:-1]:
if prefix.alias_key not in manager:
manager.add_alias(alias_key=prefix.alias_key)
def _parse_related_prefixes(self, select_related: List[str]) -> List[Prefix]:
"""
Walks all relation strings and parses the target models and prefixes.
:param select_related: list of relation strings
:type select_related: List[str]
:return: list of parsed prefixes
:rtype: List[Prefix]
"""
prefixes: List[Prefix] = []
for related in select_related:
prefix = Prefix(
self.model_cls,
*get_relationship_alias_model_and_str(
self.model_cls, related.split("__")
),
)
prefixes.append(prefix)
return prefixes
def _switch_filter_action_prefixes(
self, filter_clauses: List[FilterAction]
) -> List[FilterAction]:
"""
Substitutes aliases for filter action if the complex key (whole relation str) is
present in alias_manager.
:param filter_clauses: raw list of actions
:type filter_clauses: List[FilterAction]
:return: list of actions with aliases changed if needed
:rtype: List[FilterAction]
"""
for action in filter_clauses:
if isinstance(action, FilterGroup):
for action2 in action._iter():
self._verify_prefix_and_switch(action2)
else:
self._verify_prefix_and_switch(action)
return filter_clauses
def _verify_prefix_and_switch(self, action: "FilterAction") -> None:
"""
Helper to switch prefix to complex relation one if required
:param action: action to switch prefix in
:type action: ormar.queryset.actions.filter_action.FilterAction
"""
manager = self.model_cls.Meta.alias_manager
new_alias = manager.resolve_relation_alias(self.model_cls, action.related_str)
if "__" in action.related_str and new_alias:
action.table_prefix = new_alias
| 36.447619
| 88
| 0.646895
|
8265df9fe2cb8a0bd815fd4c324b356c7b35c4f5
| 110
|
py
|
Python
|
dummy/book/urls.py
|
KeeperNight/Citric
|
c472b6f172393ada87453cab17c760d155c4800c
|
[
"Apache-2.0"
] | 1
|
2019-11-25T08:10:32.000Z
|
2019-11-25T08:10:32.000Z
|
dummy/book/urls.py
|
KeeperNight/Citric
|
c472b6f172393ada87453cab17c760d155c4800c
|
[
"Apache-2.0"
] | null | null | null |
dummy/book/urls.py
|
KeeperNight/Citric
|
c472b6f172393ada87453cab17c760d155c4800c
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns=[
path('',views.about_book,name="book"),
]
| 13.75
| 42
| 0.7
|
f3890719340cbf811e3e09837ced9a1cb3868ead
| 742
|
py
|
Python
|
setup.py
|
amane-katagiri/mdx_cite
|
35c280023c5a9cca72c455b92cbd7e9477b869c0
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
amane-katagiri/mdx_cite
|
35c280023c5a9cca72c455b92cbd7e9477b869c0
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
amane-katagiri/mdx_cite
|
35c280023c5a9cca72c455b92cbd7e9477b869c0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
setup(
name='mdx_cite',
version='1.0',
author='Alexandre Leray',
author_email='alexandre@stdin.fr',
description='Python-Markdown extension to support the <cite> tag.',
url='http://activearchives.org/',
py_modules=['mdx_cite'],
install_requires=['Markdown>=2.0',],
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Markup :: HTML'
]
)
| 27.481481
| 71
| 0.613208
|
74b2ed20c1c5c4ea5a3d91408dff2c76e991aae9
| 408
|
py
|
Python
|
escalate/core/models/base_classes/chemistry_base_class.py
|
darkreactions/ESCALATE
|
0020da00b81a2dd80d1c9fd72d2edf92b519e605
|
[
"MIT"
] | 11
|
2020-09-29T13:59:02.000Z
|
2022-03-23T04:57:52.000Z
|
escalate/core/models/base_classes/chemistry_base_class.py
|
darkreactions/ESCALATE
|
0020da00b81a2dd80d1c9fd72d2edf92b519e605
|
[
"MIT"
] | 95
|
2019-11-18T20:10:49.000Z
|
2022-03-31T17:09:49.000Z
|
escalate/core/models/base_classes/chemistry_base_class.py
|
darkreactions/ESCALATE
|
0020da00b81a2dd80d1c9fd72d2edf92b519e605
|
[
"MIT"
] | 2
|
2021-11-26T18:22:08.000Z
|
2022-03-31T11:57:10.000Z
|
from django.db import models
class ChemistryBaseColumns(models.Model):
'''
Currently I don't see common variables between all classes in chemistry_data so this is empty.
This should still stay to maintain Template -> Model -> Object coding sentiment and if common columns are
created throughout chemistry tables they should be added here
'''
class Meta:
abstract = True
| 37.090909
| 110
| 0.72549
|
3751071156f72c3d1ad9e0ea38cbbff5dbc21e9e
| 3,289
|
py
|
Python
|
game/consumers/base_game_consumer.py
|
dimadk24/english-fight-api
|
506a3eb2cb4cb91203b1e023b5248c27975df075
|
[
"MIT"
] | null | null | null |
game/consumers/base_game_consumer.py
|
dimadk24/english-fight-api
|
506a3eb2cb4cb91203b1e023b5248c27975df075
|
[
"MIT"
] | null | null | null |
game/consumers/base_game_consumer.py
|
dimadk24/english-fight-api
|
506a3eb2cb4cb91203b1e023b5248c27975df075
|
[
"MIT"
] | null | null | null |
from typing import Optional, Type
from asgiref.sync import async_to_sync
from channels.generic.websocket import JsonWebsocketConsumer
from django.db.models import Model
from rest_framework.serializers import Serializer
from typing_extensions import TypedDict
from common.string_utils import snake_case
from game.consumers.websocket_errors import (
AUTH_FAILED_ERROR,
)
from game.models import AppUser, GameDefinition
from game.serializers.serializer_utils import get_serializer_by_model_name
class InputContent(TypedDict):
type: str
data: Optional[dict]
class Scope(TypedDict):
user: AppUser
game_def_id: str
game_def: GameDefinition
url_route: dict
class BaseGameConsumer(JsonWebsocketConsumer):
scope: Scope
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.room_group_name = None
def connect(self):
game_def_id = self.scope['url_route']['kwargs']['game_def_id']
try:
self.scope['game_def'] = GameDefinition.objects.get(id=game_def_id)
except GameDefinition.DoesNotExist:
return self.close()
if self.scope['game_def'].started:
return self.close()
self.scope['game_def_id'] = game_def_id
self.room_group_name = f'game-{game_def_id}'
async_to_sync(self.channel_layer.group_add)(
self.room_group_name, self.channel_name
)
self.accept()
def disconnect(self, close_code):
if self.room_group_name:
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name, self.channel_name
)
def receive_json(self, content: InputContent, **kwargs):
event_type = content.get('type')
if not event_type:
raise Exception(
'WS event should include "type" prop. '
f'Input content: {content}'
)
if not self.scope.get('user') and event_type != 'authenticate':
return self.close(AUTH_FAILED_ERROR)
method_name = event_type.replace('-', '_')
if not hasattr(self, method_name):
raise Exception(
f'received {event_type} event, '
f'but there is no method {method_name} to handle it'
)
method = getattr(self, method_name)
event_data = content.get('data')
method(event_data)
def send_data(
self,
event_type: str,
instance: Optional[Model] = None,
data: Optional[dict] = None,
serializer: Optional[Type[Serializer]] = None,
serializer_kwargs: Optional[dict] = None,
model_name: Optional[str] = '',
):
data_to_send = {'type': event_type}
if instance:
model_name = model_name or snake_case(type(instance).__name__)
if serializer is None:
serializer = get_serializer_by_model_name(model_name)
data_to_send['model'] = model_name
if serializer_kwargs is None:
serializer_kwargs = {}
data_to_send['instance'] = serializer(
instance, **serializer_kwargs
).data
if data:
data_to_send['data'] = data
self.send_json(data_to_send)
| 31.625
| 79
| 0.633931
|
3b70ffe7b375ce9013dd273e364bfb063a21cd0a
| 4,043
|
py
|
Python
|
tests/unit/cli/errors_test.py
|
pareshmg/compose
|
cba758361499d74ef26bf281b73206e6dc12b5c9
|
[
"Apache-2.0"
] | 2
|
2020-08-30T12:57:11.000Z
|
2021-01-21T13:17:43.000Z
|
tests/unit/cli/errors_test.py
|
pareshmg/compose
|
cba758361499d74ef26bf281b73206e6dc12b5c9
|
[
"Apache-2.0"
] | 38
|
2021-07-19T21:08:06.000Z
|
2022-03-28T21:11:05.000Z
|
tests/unit/cli/errors_test.py
|
pareshmg/compose
|
cba758361499d74ef26bf281b73206e6dc12b5c9
|
[
"Apache-2.0"
] | 3
|
2020-09-22T02:56:37.000Z
|
2021-03-15T10:31:24.000Z
|
import pytest
from docker.errors import APIError
from requests.exceptions import ConnectionError
from compose.cli import errors
from compose.cli.errors import handle_connection_errors
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
@pytest.yield_fixture
def mock_logging():
with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
yield mock_log
def patch_find_executable(side_effect):
return mock.patch(
'compose.cli.errors.find_executable',
autospec=True,
side_effect=side_effect)
class TestHandleConnectionErrors:
def test_generic_connection_error(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with patch_find_executable(['/bin/docker', None]):
with handle_connection_errors(mock.Mock()):
raise ConnectionError()
_, args, _ = mock_logging.error.mock_calls[0]
assert "Couldn't connect to Docker daemon" in args[0]
def test_api_error_version_mismatch(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, b"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, "client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_other(self, mock_logging):
msg = b"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
def test_api_error_version_other_unicode_explanation(self, mock_logging):
msg = "Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg)
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_no_data(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "The current Compose file version is not compatible with your engine version." in args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_misc(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_encoding_issue(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(9999, 'WriteFile', 'I use weird characters \xe9')
_, args, _ = mock_logging.error.mock_calls[0]
assert 'Windows named pipe error: I use weird characters \xe9 (code: 9999)' == args[0]
| 42.114583
| 104
| 0.695523
|
ba5cdfd3c1c0c2c7293a5f23667305aa04b5bcdc
| 3,169
|
py
|
Python
|
cogniac/tenant.py
|
CogniacViSean/cogniac-sdk-py
|
11f985a07bbaf5616315abbb0a56c75bc7928472
|
[
"Apache-2.0"
] | null | null | null |
cogniac/tenant.py
|
CogniacViSean/cogniac-sdk-py
|
11f985a07bbaf5616315abbb0a56c75bc7928472
|
[
"Apache-2.0"
] | null | null | null |
cogniac/tenant.py
|
CogniacViSean/cogniac-sdk-py
|
11f985a07bbaf5616315abbb0a56c75bc7928472
|
[
"Apache-2.0"
] | null | null | null |
"""
CogniacTenant Object
Copyright (C) 2016 Cogniac Corporation
"""
import json
from retrying import retry
from .common import *
TENANT_ADMIN_ROLE = "tenant_admin"
TENANT_USER_ROLE = "tenant_user"
TENANT_VIEWER_ROLE = "tenant_viewer"
TENANT_BILLING_ROLE = "tenant_billing"
##
# CogniacTenant
##
class CogniacTenant(object):
@classmethod
@retry(stop_max_attempt_number=8, wait_exponential_multiplier=500, retry_on_exception=server_error)
def get(cls, connection):
resp = connection._get("/tenants/current")
return CogniacTenant(connection, json.loads(resp.content))
def __init__(self, connection, tenant_dict):
super(CogniacTenant, self).__setattr__('_tenant_keys', tenant_dict.keys())
self._cc = connection
for k, v in tenant_dict.items():
super(CogniacTenant, self).__setattr__(k, v)
def __str__(self):
return "%s (%s)" % (self.name, self.tenant_id)
def __repr__(self):
return "%s (%s)" % (self.name, self.tenant_id)
def __setattr__(self, name, value):
if name not in self._tenant_keys:
super(CogniacTenant, self).__setattr__(name, value)
return
data = {name: value}
resp = self._cc._post("/tenants/%s" % self.tenant_id, json=data)
for k, v in resp.json().items():
super(CogniacTenant, self).__setattr__(k, v)
def users(self):
resp = self._cc._get("/tenants/%s/users" % self.tenant_id)
return resp.json()['data']
def set_user_role(self, user_email, role):
users = self.users()
users = [u for u in self.users() if u['email'] == user_email]
if not users:
raise Exception("unknown user_email %s" % user_email)
data = {'user_id': users[0]['user_id'], 'role': role}
self._cc._post("/tenants/%s/users/role" % self.tenant_id, json=data)
def add_user(self, user_email, role='tenant_user'):
users = self.users()
users = [u for u in self.users() if u['email'] == user_email]
if not users:
raise Exception("unknown user_email %s" % user_email)
data = {'user_id': users[0]['user_id'], 'role': role}
self._cc._post("/tenants/%s/users" % self.tenant_id, json=data)
def delete_user(self, user_email):
users = self.users()
users = [u for u in self.users() if u['email'] == user_email]
if not users:
raise Exception("unknown user_email %s" % user_email)
data = {'user_id': users[0]['user_id']}
self._cc._delete("/tenants/%s/users" % self.tenant_id, json=data)
def usage(self, start, end, period='15min'):
assert(period in ['15min', 'hour', 'day'])
url = "/usage/summary?period=%s&start=%d&end=%d" % (period, start, end)
@retry(stop_max_attempt_number=8, wait_exponential_multiplier=500, retry_on_exception=server_error)
def get_next(url):
resp = self._cc._get(url)
return resp.json()
while url:
resp = get_next(url)
for record in resp['data']:
yield record
url = resp['paging'].get('next')
| 33.357895
| 107
| 0.618807
|
85bb47485a2e79619633eb3ff98dbda483aca65f
| 303,714
|
py
|
Python
|
pandas/core/frame.py
|
mojones/pandas
|
3d4f9dc19d784526f71a197bfb6e36b0409e0760
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-13T02:14:37.000Z
|
2020-04-13T02:14:37.000Z
|
pandas/core/frame.py
|
mojones/pandas
|
3d4f9dc19d784526f71a197bfb6e36b0409e0760
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-16T05:21:06.000Z
|
2020-04-16T05:21:06.000Z
|
pandas/core/frame.py
|
mojones/pandas
|
3d4f9dc19d784526f71a197bfb6e36b0409e0760
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-06-19T11:52:05.000Z
|
2020-06-19T11:52:05.000Z
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
import collections
from collections import abc
import datetime
from io import StringIO
import itertools
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
from pandas._typing import (
ArrayLike,
Axes,
Axis,
Dtype,
FilePathOrBuffer,
Label,
Level,
Renamer,
)
from pandas.compat import PY37
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
maybe_upcast_putmask,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_object_dtype,
is_period_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.ops.missing import dispatch_fill_zeros
from pandas.core.series import Series
from pandas.io.common import get_filepath_or_buffer
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import info
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes="index, columns",
klass="DataFrame",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel="",
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects.
.. versionchanged:: 0.23.0
If data is a dict, column order follows insertion-order for
Python 3.6 and later.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order
for Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
@property
def _constructor(self) -> Type["DataFrame"]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_deprecations: FrozenSet[str] = NDFrame._deprecations | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
raise NotImplementedError("Not supported for DataFrames!")
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
else:
raise ValueError("DataFrame constructor not properly called!")
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._mgr.is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if self._data.any_extension_types:
# TODO(EA2D) special case would be unnecessary with 2D EAs
return False
return len(self._data.blocks) == 1
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
table_id=None,
render_links=False,
)
return formatter.to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.FormattersType] = None,
float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width,
)
return formatter.to_string(buf=buf, encoding=encoding)
# ----------------------------------------------------------------------
@property
def style(self) -> "Styler":
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Label, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Label, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Label, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python versions before 3.7 support at most 255 arguments to constructors
can_return_named_tuples = PY37 or len(self.columns) + index < 255
if name is not None and can_return_named_tuples:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> "DataFrame":
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(self, dtype=None, copy: bool = False) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
) -> "DataFrame":
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, ABCMultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index.values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, ABCMultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls, arrays, columns, index, dtype=None, verify_integrity=True
) -> "DataFrame":
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls(mgr)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
version: Optional[int] = 114,
convert_strl: Optional[Sequence[Label]] = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {114, 117, 118, 119, None}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import StataWriter117 as statawriter # type: ignore
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import StataWriterUTF8 as statawriter # type:ignore
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path, **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str
String file path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
)
@Substitution(klass="DataFrame")
@Appender(_shared_docs["to_markdown"])
def to_markdown(
self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs
) -> Optional[str]:
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
buf, _, _, _ = get_filepath_or_buffer(buf, mode=mode)
assert buf is not None # Help mypy.
buf.writelines(result)
return None
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path,
engine="auto",
compression="snappy",
index=None,
partition_cols=None,
**kwargs,
) -> None:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 1.0.0
Previously this was "fname"
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
table_id=table_id,
render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return formatter.to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
)
# ----------------------------------------------------------------------
@doc(info)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
) -> None:
return info(self, verbose, buf, max_cols, memory_usage, null_counts)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.000000+0.000000j 1 True
1 1 1.0 1.000000+0.000000j 1 True
2 1 1.0 1.000000+0.000000j 1 True
3 1 1.0 1.000000+0.000000j 1 True
4 1 1.0 1.000000+0.000000j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5216
"""
result = Series(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = Series(self.index.memory_usage(deep=deep), index=["Index"]).append(
result
)
return result
def transpose(self, *args, copy: bool = False) -> "DataFrame":
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> "DataFrame":
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._data.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, ABCMultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.iloc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
self.loc._ensure_listlike_indexer(key, axis=1)
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.iloc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _iset_item(self, loc: int, value):
self._ensure_valid_index(value)
# technically _sanitize_column expects a label, not a position,
# but the behavior is the same as long as we pass broadcast=False
value = self._sanitize_column(loc, value, broadcast=False)
NDFrame._iset_item(self, loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
"""
try:
if takeable is True:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
self._mgr = self._mgr.reindex_axis(
value.index.copy(), axis=1, fill_value=np.nan
)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that contain spaces or operators by
surrounding them in backticks. This way you can also escape
names that start with a digit, or those that are a Python keyword.
Basically when it is not valid Python identifier. See notes down
for more details.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
else:
return result
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
if issubclass(unique_dtype.type, tuple(dtypes_set)) # type: ignore
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> "DataFrame":
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
.. versionchanged:: 0.23.0
Keyword argument order is maintained.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
# other
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, ABCMultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, ABCMultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(self._mgr.iget(idx), index=self.index, name=item)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value) -> "DataFrame":
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
) -> "DataFrame":
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> "DataFrame":
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional["DataFrame"]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional["DataFrame"]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "DataFrame":
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Label] = []
for col in keys:
if isinstance(
col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)
):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = list(self.index.names)
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Label] = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Label = "",
) -> Optional["DataFrame"]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data # TODO: can we de-kludge yet?
if mask.any():
values, _ = maybe_upcast_putmask(values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, ABCMultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, ABCMultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self) -> "DataFrame":
result = self._constructor(self._data.isna(func=isna))
return result.__finalize__(self, method="isna")
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self) -> "DataFrame":
return self.isna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self) -> "DataFrame":
return ~self.isna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self) -> "DataFrame":
return ~self.isna()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> "Series":
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame
The original DataFrame sorted by the labels.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
"""
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(labels, ABCMultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(
labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position,
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and labels.is_monotonic_increasing) or (
not ascending and labels.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
labels, kind=kind, ascending=ascending, na_position=na_position
)
baxis = self._get_block_manager_axis(axis)
new_data = self._mgr.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_index")
def value_counts(
self,
subset: Optional[Sequence[Label]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
6 0 1
2 2 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
6 0 0.25
2 2 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep="first") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame":
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), ABCMultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, ABCMultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, ABCMultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0) -> "DataFrame":
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), ABCMultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, ABCMultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, ABCMultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other: "DataFrame", func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(self, other, func):
# iterate over columns
new_data = ops.dispatch_to_series(self, other, _arith_op)
else:
with np.errstate(all="ignore"):
res_values = _arith_op(self.values, other.values)
new_data = dispatch_fill_zeros(func, self.values, other.values, res_values)
return new_data
def _construct_result(self, result) -> "DataFrame":
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, index=self.index, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
return out
def combine(
self, other: "DataFrame", func, fill_value=None, overwrite=True
) -> "DataFrame":
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: "DataFrame") -> "DataFrame":
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view("i8")
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
) -> "DataFrameGroupBy":
from pandas.core.groupby.generic import DataFrameGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged:: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> "DataFrame":
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> "DataFrame":
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def explode(self, column: Union[str, Tuple]) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged. Empty list-likes will
result in a np.nan for that row.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
# TODO: use overload to refine return type of reset_index
assert df is not None # needed for mypy
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs[
"melt"
] = """
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
%(other)s : Identical method.
pivot_table : Create a spreadsheet-style pivot table as a DataFrame.
DataFrame.pivot : Return reshaped DataFrame organized
by given index / column values.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
@Appender(
_shared_docs["melt"]
% dict(
caller="df.melt(",
versionadded="\n .. versionadded:: 0.20.0\n",
other="melt",
)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> "DataFrame":
from pandas.core.reshape.melt import melt
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods: int = 1, axis: Axis = 0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
self._consolidate_inplace()
if bm_axis == 0 and periods != 0:
return self.T.diff(periods, axis=0).T
new_data = self._mgr.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.EWM : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@Substitution(
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs,
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = self.T._aggregate(arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return super()._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs["transform"] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame":
axis = self._get_axis_number(axis)
if axis == 1:
return self.T.transform(func, *args, **kwargs).T
return super().transform(func, *args, **kwargs)
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func) -> "DataFrame":
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Notes
-----
In the current implementation applymap calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
) -> "DataFrame":
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
def join(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
) -> "DataFrame":
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
) -> "DataFrame":
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> "DataFrame":
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> "DataFrame":
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == "pearson":
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None) -> "DataFrame":
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, ABCMultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = DataFrame(counts, index=agg_axis, columns=level_index)
else:
result = DataFrame(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
assert filter_type is None or filter_type == "bool", filter_type
dtype_is_dt = np.array(
[
is_datetime64_any_dtype(values.dtype) or is_period_dtype(values.dtype)
for values in self._iter_column_arrays()
],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64, datetime64tz, and PeriodDtype columns in a "
"future version.",
FutureWarning,
stacklevel=3,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
if axis is None and filter_type == "bool":
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
def _get_data(axis_matters):
if filter_type is None:
data = self._get_numeric_data()
elif filter_type == "bool":
if axis_matters:
# GH#25101, GH#24434
data = self._get_bool_data() if axis == 0 else self
else:
data = self._get_bool_data()
else: # pragma: no cover
msg = (
f"Generating numeric_only data with filter_type {filter_type} "
"not supported."
)
raise NotImplementedError(msg)
return data
if numeric_only is not None and axis in [0, 1]:
df = self
if numeric_only is True:
df = _get_data(axis_matters=True)
if axis == 1:
df = df.T
axis = 0
out_dtype = "bool" if filter_type == "bool" else None
def blk_func(values):
if values.ndim == 1 and not isinstance(values, np.ndarray):
# we can't pass axis=1
return op(values, axis=0, skipna=skipna, **kwds)
return op(values, axis=1, skipna=skipna, **kwds)
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager._reduce
res = df._mgr.reduce(blk_func)
assert isinstance(res, dict)
if len(res):
assert len(res) == max(list(res.keys())) + 1, res.keys()
out = df._constructor_sliced(res, index=range(len(res)), dtype=out_dtype)
out.index = df.columns
if axis == 0 and df.dtypes.apply(needs_i8_conversion).any():
# FIXME: needs_i8_conversion check is kludge, not sure
# why it is necessary in this case and this case alone
out[:] = coerce_to_dtypes(out.values, df.dtypes)
return out
if not self._is_homogeneous_type:
# try to avoid self.values call
if filter_type is None and axis == 0 and len(self) > 0:
# operate column-wise
# numeric_only must be None here, as other cases caught above
# require len(self) > 0 bc frame_apply messes up empty prod/sum
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(
self, func=f, result_type="expand", ignore_failures=True
)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0].rename(None)
return result
data = self
if numeric_only is None:
data = self
values = data.values
try:
result = f(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
# TODO: why doesnt axis matter here?
data = _get_data(axis_matters=False)
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = f(values)
else:
if numeric_only:
data = _get_data(axis_matters=True)
labels = data._get_agg_axis(axis)
values = data.values
else:
data = self
values = data.values
result = f(values)
if filter_type == "bool" and is_object_dtype(values) and axis is None:
# work around https://github.com/numpy/numpy/issues/10489
# TODO: can we de-duplicate parts of this with the next blocK?
result = np.bool_(result)
elif hasattr(result, "dtype") and is_object_dtype(result.dtype):
try:
if filter_type is None:
result = result.astype(np.float64)
elif filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, data.dtypes)
if constructor is not None:
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(self, axis=0, numeric_only=False, dropna=True) -> "DataFrame":
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._mgr.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(
self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
) -> "DataFrame":
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> "DataFrame":
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> "DataFrame":
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return DataFrame(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_NUMBERS = {"index": 0, "columns": 1}
_AXIS_NAMES = {0: "index", 1: "columns"}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: "Index" = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: "Index" = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
| 34.411285
| 170
| 0.531263
|
f45d869f3863fa3f668180b94babdbe4cf2d6ea6
| 9,022
|
py
|
Python
|
RB_UTILITY/mcp2210/commands.py
|
calcite/ResetBuddy
|
864c1789aa8cf360124e96b15f0ab45057d52ecb
|
[
"MIT"
] | 1
|
2016-04-12T19:25:02.000Z
|
2016-04-12T19:25:02.000Z
|
sw/display/mcp/commands.py
|
misisnik/ExternalInterface
|
a936b5999a3441374f0af38f2b012d9ae8291dae
|
[
"MIT"
] | null | null | null |
sw/display/mcp/commands.py
|
misisnik/ExternalInterface
|
a936b5999a3441374f0af38f2b012d9ae8291dae
|
[
"MIT"
] | null | null | null |
from ctypes import Structure, c_ubyte, c_ushort, c_uint, c_char
class CommandHeader(Structure):
_fields_ = [('command', c_ubyte),
('subcommand', c_ubyte),
('reserved_1', c_ubyte),
('reserved_2', c_ubyte)]
class ResponseHeader(Structure):
_fields_ = [('command', c_ubyte),
('status', c_ubyte),
('subcommand', c_ubyte),
('reserved', c_ubyte)]
class Response(Structure):
pass
class EmptyResponse(Response):
_anonymous_ = ['header']
_fields_ = [('header', ResponseHeader)]
class Command(Structure):
def __init__(self, *args, **kwargs):
super(Command, self).__init__((self.COMMAND, self.SUBCOMMAND, 0x00, 0x00), *args, **kwargs)
class SetBootSettingsCommand(Command):
COMMAND = 0x60
RESPONSE = EmptyResponse
class ChipSettings(Structure):
_fields_ = [('pin_designations', c_ubyte * 9),
('gpio_outputs', c_ushort),
('gpio_directions', c_ushort),
('other_settings', c_ubyte),
('access_control', c_ubyte),
('new_password', c_char * 8)]
class SetBootChipSettingsCommand(SetBootSettingsCommand):
SUBCOMMAND = 0x20
_fields_ = [('header', CommandHeader),
('settings', ChipSettings)]
class SPISettings(Structure):
_fields_ = [('bit_rate', c_uint),
('idle_cs', c_ushort),
('active_cs', c_ushort),
('cs_data_delay', c_ushort),
('lb_cs_delay', c_ushort),
('interbyte_delay', c_ushort),
('spi_tx_size', c_ushort),
('spi_mode', c_ubyte)]
class SetBootSPISettingsCommand(SetBootSettingsCommand):
SUBCOMMAND = 0x10
_fields_ = [('header', CommandHeader),
('settings', SPISettings)]
class USBSettings(Structure):
_fields_ = [('vid', c_ushort),
('pid', c_ushort),
('power_option', c_ubyte),
('current_request', c_ubyte)]
class SetBootUSBSettingsCommand(SetBootSettingsCommand):
SUBCOMMAND = 0x30
_fields_ = [('header', CommandHeader),
('settings', USBSettings)]
class SetUSBStringCommand(SetBootSettingsCommand):
_fields_ = [('header', CommandHeader),
('str_len', c_ubyte),
('descriptor_id', c_ubyte),
('str', c_ubyte * 58)]
def __init__(self, s):
super(SetUSBStringCommand, self).__init__()
self.descriptor_id = 0x03
self.string = s
@property
def string(self):
return ''.join(chr(x) for x in self.str[:self.str_len - 2])#.decode('utf16')
@string.setter
def string(self, value):
for i, x in enumerate((value + '\0').encode('utf16')):
self.str[i] = ord(x)
self.str_len = len(value) * 2 + 4
class SetUSBManufacturerCommand(SetUSBStringCommand):
SUBCOMMAND = 0x50
class SetUSBProductCommand(SetUSBStringCommand):
SUBCOMMAND = 0x40
class GetBootSettingsCommand(Command):
COMMAND = 0x61
class GetChipSettingsResponse(Response):
_anonymous_ = ['header']
_fields_ = [('header', ResponseHeader),
('settings', ChipSettings)]
class GetBootChipSettingsCommand(GetBootSettingsCommand):
SUBCOMMAND = 0x20
RESPONSE = GetChipSettingsResponse
_fields_ = [('header', CommandHeader)]
class GetSPISettingsResponse(Response):
_anonymous_ = ['header']
_fields_ = [('header', ResponseHeader),
('settings', SPISettings)]
class GetBootSPISettingsCommand(GetBootSettingsCommand):
SUBCOMMAND = 0x10
RESPONSE = GetSPISettingsResponse
_fields_ = [('header', CommandHeader)]
class GetUSBSettingsResponse(Response):
_anonymous_ = ['header']
_fields_ = [('header', ResponseHeader),
('reserved', c_ubyte * 8),
('vid', c_ushort),
('pid', c_ushort),
('reserved_2', c_ubyte * 13),
('power_option', c_ubyte),
('current_request', c_ubyte)]
@property
def settings(self):
return USBSettings(self.vid, self.pid, self.power_option, self.current_request)
class GetBootUSBSettingsCommand(GetBootSettingsCommand):
SUBCOMMAND = 0x30
RESPONSE = GetUSBSettingsResponse
_fields_ = [('header', CommandHeader)]
class GetUSBStringResponse(Response):
_anonymous_ = ['header']
_fields_ = [('header', ResponseHeader),
('str_len', c_ubyte),
('descriptor_id', c_ubyte),
('str', c_ubyte * 58)]
@property
def string(self):
return ''.join(chr(x) for x in self.str[:self.str_len - 2])#.decode('utf16')
class GetUSBProductCommand(GetBootSettingsCommand):
SUBCOMMAND = 0x40
RESPONSE = GetUSBStringResponse
_fields_ = [('header', CommandHeader)]
class GetUSBManufacturerCommand(GetBootSettingsCommand):
SUBCOMMAND = 0x50
RESPONSE = GetUSBStringResponse
_fields_ = [('header', CommandHeader)]
class SendPasswordCommand(Command):
COMMAND = 0x70
SUBCOMMAND = 0x00
RESPONSE = EmptyResponse
_fields_ = [('header', CommandHeader),
('password', c_char * 8)]
class GetSPISettingsCommand(Command):
COMMAND = 0x41
SUBCOMMAND = 0x00
RESPONSE = GetSPISettingsResponse
_fields_ = [('header', CommandHeader)]
class SetSPISettingsCommand(Command):
COMMAND = 0x40
SUBCOMMAND = 0x00
RESPONSE = EmptyResponse
_fields_ = [('header', CommandHeader),
('settings', SPISettings)]
class GetChipSettingsCommand(Command):
COMMAND = 0x20
SUBCOMMAND = 0x00
RESPONSE = GetChipSettingsResponse
_fields_ = [('header', CommandHeader)]
class SetChipSettingsCommand(Command):
COMMAND = 0x21
SUBCOMMAND = 0x00
RESPONSE = EmptyResponse
_fields_ = [('header', CommandHeader),
('settings', ChipSettings)]
class GetGPIOResponse(Response):
_anonymous_ = ['header']
_fields_ = [('header', ResponseHeader),
('gpio', c_ushort)]
class GetGPIOCommand(Command):
SUBCOMMAND = 0x00
RESPONSE = GetGPIOResponse
_fields_ = [('header', CommandHeader)]
class GetGPIODirectionCommand(GetGPIOCommand):
COMMAND = 0x33
class SetGPIOCommand(Command):
SUBCOMMAND = 0x00
RESPONSE = EmptyResponse
_fields_ = [('header', CommandHeader),
('gpio', c_ushort)]
class SetGPIODirectionCommand(SetGPIOCommand):
COMMAND = 0x32
class GetGPIOValueCommand(GetGPIOCommand):
COMMAND = 0x31
class SetGPIOValueCommand(SetGPIOCommand):
COMMAND = 0x30
class ReadEEPROMResponse(Structure):
_fields_ = [('command', c_ubyte),
('status', c_ubyte),
('address', c_ubyte),
('data', c_ubyte)]
class ReadEEPROMCommand(Structure):
COMMAND = 0x50
RESPONSE = ReadEEPROMResponse
_fields_ = [('command', c_ubyte),
('address', c_ubyte),
('reserved', c_ubyte)]
def __init__(self, address):
super(ReadEEPROMCommand, self).__init__(self.COMMAND, address, 0x00)
class WriteEEPROMCommand(Structure):
COMMAND = 0x51
RESPONSE = EmptyResponse
_fields_ = [('command', c_ubyte),
('address', c_ubyte),
('value', c_ubyte)]
def __init__(self, address, value):
super(WriteEEPROMCommand, self).__init__(self.COMMAND, address, value)
SPIBuffer = c_ubyte * 60
class SPITransferResponse(Structure):
_fields_ = [('command', c_ubyte),
('status', c_ubyte),
('length', c_ubyte),
('engine_status', c_ubyte),
('_data', SPIBuffer)]
@property
def data(self):
return ''.join(chr(x) for x in self._data[:self.length])
class SPITransferCommand(Structure):
COMMAND = 0x42
RESPONSE = SPITransferResponse
_fields_ = [('command', c_ubyte),
('length', c_ubyte),
('reserved', c_ushort),
('data', SPIBuffer)]
def __init__(self, data):
data_len = len(data)
"""
global SPIBuffer
if data_len > 10:
SPIBuffer = c_ubyte * data_len
else:
SPIBuffer = c_ubyte * 60"""
data = SPIBuffer(*(ord(x) for x in data))
#print('a')
#data = join(ord(x) for x in data)
#data = data[0 : data_len-1]
super(SPITransferCommand, self).__init__(self.COMMAND, data_len, 0x0000, data)
class DeviceStatusResponse(Response):
_fields_ = [('command', c_ubyte),
('status', c_ubyte),
('bus_release_status', c_ubyte),
('bus_owner', c_ubyte),
('password_attempts', c_ubyte),
('password_guessed', c_ubyte)]
class CancelTransferCommand(Command):
COMMAND = 0x11
SUBCOMMAND = 0x00
RESPONSE = DeviceStatusResponse
_fields_ = [('header', CommandHeader)]
| 26.692308
| 99
| 0.610397
|
1c1cdf32049e5474a7fd370b9f1c39d77a785955
| 29,600
|
py
|
Python
|
sql/query.py
|
jiangming1/archery
|
e05c06ee18eb2c967fd49b4b5a72d271abcccbe4
|
[
"Apache-2.0"
] | null | null | null |
sql/query.py
|
jiangming1/archery
|
e05c06ee18eb2c967fd49b4b5a72d271abcccbe4
|
[
"Apache-2.0"
] | null | null | null |
sql/query.py
|
jiangming1/archery
|
e05c06ee18eb2c967fd49b4b5a72d271abcccbe4
|
[
"Apache-2.0"
] | 1
|
2019-05-29T11:22:40.000Z
|
2019-05-29T11:22:40.000Z
|
# -*- coding: UTF-8 -*-
import datetime
import logging
import re
import time
import traceback
import simplejson as json
from django.contrib.auth.decorators import permission_required
from django.core import serializers
from django.db import connection
from django.db import transaction
from django.db.models import Q, Min
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from common.config import SysConfig
from common.utils.const import WorkflowDict
from common.utils.extend_json_encoder import ExtendJSONEncoder
from sql.utils.dao import Dao
from sql.utils.data_masking import Masking
from sql.utils.group import user_instances, user_groups
from sql.utils.workflow import Workflow
from .models import QueryPrivilegesApply, QueryPrivileges, QueryLog, SqlGroup
logger = logging.getLogger('default')
datamasking = Masking()
workflowOb = Workflow()
# 查询权限申请用于工作流审核回调
def query_audit_call_back(workflow_id, workflow_status):
# 更新业务表状态
apply_info = QueryPrivilegesApply()
apply_info.apply_id = workflow_id
apply_info.status = workflow_status
apply_info.save(update_fields=['status'])
# 审核通过插入权限信息,批量插入,减少性能消耗
if workflow_status == WorkflowDict.workflow_status['audit_success']:
apply_queryset = QueryPrivilegesApply.objects.get(apply_id=workflow_id)
# 库权限
if apply_queryset.priv_type == 1:
insertlist = [QueryPrivileges(
user_name=apply_queryset.user_name,
user_display=apply_queryset.user_display,
instance_name=apply_queryset.instance_name, db_name=db_name,
table_name=apply_queryset.table_list, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for db_name in
apply_queryset.db_list.split(',')]
# 表权限
elif apply_queryset.priv_type == 2:
insertlist = [QueryPrivileges(
user_name=apply_queryset.user_name,
user_display=apply_queryset.user_display,
instance_name=apply_queryset.instance_name, db_name=apply_queryset.db_list,
table_name=table_name, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for table_name in
apply_queryset.table_list.split(',')]
QueryPrivileges.objects.bulk_create(insertlist)
# 查询权限校验
def query_priv_check(user, instance_name, db_name, sql_content, limit_num):
result = {'status': 0, 'msg': 'ok', 'data': {'priv_check': 1, 'limit_num': 0}}
# 检查用户是否有该数据库/表的查询权限
if user.is_superuser:
if SysConfig().sys_config.get('admin_query_limit'):
user_limit_num = int(SysConfig().sys_config.get('admin_query_limit'))
else:
user_limit_num = 0
limit_num = int(user_limit_num) if int(limit_num) == 0 else min(int(limit_num), int(user_limit_num))
# 查看表结构和执行计划,inception会报错,故单独处理,explain直接跳过不做校验
elif re.match(r"^show\s+create\s+table", sql_content.lower()):
tb_name = re.sub('^show\s+create\s+table', '', sql_content, count=1, flags=0).strip()
# 先判断是否有整库权限
db_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name,
db_name=db_name, priv_type=1,
valid_date__gte=datetime.datetime.now(), is_deleted=0)
# 无整库权限再验证表权限
if len(db_privileges) == 0:
tb_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name,
db_name=db_name, table_name=tb_name, priv_type=2,
valid_date__gte=datetime.datetime.now(), is_deleted=0)
if len(tb_privileges) == 0:
result['status'] = 1
result['msg'] = '你无' + db_name + '.' + tb_name + '表的查询权限!请先到查询权限管理进行申请'
return result
# sql查询, 可以校验到表级权限
else:
# 首先使用inception的语法树打印获取查询涉及的的表
table_ref_result = datamasking.query_table_ref(sql_content + ';', instance_name, db_name)
# 正确解析拿到表数据,可以校验表权限
if table_ref_result['status'] == 0:
table_ref = table_ref_result['data']
# 获取表信息,校验是否拥有全部表查询权限
QueryPrivilegesOb = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name)
# 先判断是否有整库权限
for table in table_ref:
db_privileges = QueryPrivilegesOb.filter(db_name=table['db'], priv_type=1,
valid_date__gte=datetime.datetime.now(),
is_deleted=0)
# 无整库权限再验证表权限
if len(db_privileges) == 0:
tb_privileges = QueryPrivilegesOb.filter(db_name=table['db'], table_name=table['table'],
valid_date__gte=datetime.datetime.now(), is_deleted=0)
if len(tb_privileges) == 0:
result['status'] = 1
result['msg'] = '你无' + table['db'] + '.' + table['table'] + '表的查询权限!请先到查询权限管理进行申请'
return result
# 获取表数据报错,检查配置文件是否允许继续执行,并进行库权限校验
else:
table_ref = None
# 校验库权限,防止inception的语法树打印错误时连库权限也未做校验
privileges = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name,
db_name=db_name,
valid_date__gte=datetime.datetime.now(),
is_deleted=0)
if len(privileges) == 0:
result['status'] = 1
result['msg'] = '你无' + db_name + '数据库的查询权限!请先到查询权限管理进行申请'
return result
if SysConfig().sys_config.get('query_check'):
return table_ref_result
else:
result['data']['priv_check'] = 2
# 获取查询涉及表的最小limit限制
if table_ref:
db_list = [table_info['db'] for table_info in table_ref]
table_list = [table_info['table'] for table_info in table_ref]
user_limit_num = QueryPrivileges.objects.filter(user_name=user.username,
instance_name=instance_name,
db_name__in=db_list,
table_name__in=table_list,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))['limit_num__min']
if user_limit_num is None:
# 如果表没获取到则获取涉及库的最小limit限制
user_limit_num = QueryPrivileges.objects.filter(user_name=user.username,
instance_name=instance_name,
db_name=db_name,
valid_date__gte=datetime.datetime.now(), is_deleted=0
).aggregate(Min('limit_num'))['limit_num__min']
else:
# 如果表没获取到则获取涉及库的最小limit限制
user_limit_num = QueryPrivileges.objects.filter(user_name=user.username,
instance_name=instance_name,
db_name=db_name,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))['limit_num__min']
limit_num = int(user_limit_num) if int(limit_num) == 0 else min(int(limit_num), int(user_limit_num))
result['data']['limit_num'] = limit_num
return result
# 获取查询权限申请列表
@permission_required('sql.menu_queryapplylist', raise_exception=True)
def getqueryapplylist(request):
# 获取用户信息
user = request.user
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 获取列表数据,申请人只能查看自己申请的数据,管理员可以看到全部数据,审核人可以看到自己审核的数据
if user.is_superuser:
lists = QueryPrivilegesApply.objects.all().filter(
Q(title__contains=search) | Q(user_display__contains=search)).order_by('-apply_id')[
offset:limit].values(
'apply_id', 'title', 'instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
count = QueryPrivilegesApply.objects.all().filter(title__contains=search).count()
elif user.has_perm('sql.query_review'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
lists = QueryPrivilegesApply.objects.filter(group_id__in=group_ids).filter(
Q(title__contains=search) | Q(user_display__contains=search)).order_by('-apply_id')[offset:limit].values(
'apply_id', 'title', 'instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
count = QueryPrivilegesApply.objects.filter(group_id__in=group_ids).filter(
Q(title__contains=search) | Q(user_display__contains=search)).count()
else:
lists = QueryPrivilegesApply.objects.filter(user_name=user.username).filter(
Q(title__contains=search) | Q(user_display__contains=search)).order_by('-apply_id')[offset:limit].values(
'apply_id', 'title', 'instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
count = QueryPrivilegesApply.objects.filter(user_name=user.username).filter(
Q(title__contains=search) | Q(user_display__contains=search)).count()
# QuerySet 序列化
rows = [row for row in lists]
result = {"total": count, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 申请查询权限
@permission_required('sql.query_applypriv', raise_exception=True)
def applyforprivileges(request):
title = request.POST['title']
instance_name = request.POST['instance_name']
group_name = request.POST['group_name']
group_id = SqlGroup.objects.get(group_name=group_name).group_id
priv_type = request.POST['priv_type']
db_name = request.POST['db_name']
valid_date = request.POST['valid_date']
limit_num = request.POST['limit_num']
try:
workflow_remark = request.POST['apply_remark']
except Exception:
workflow_remark = ''
# 获取用户信息
user = request.user
# 服务端参数校验
result = {'status': 0, 'msg': 'ok', 'data': []}
if int(priv_type) == 1:
db_list = request.POST['db_list']
if title is None or instance_name is None or db_list is None or valid_date is None or limit_num is None:
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(priv_type) == 2:
table_list = request.POST['table_list']
if title is None or instance_name is None or db_name is None or valid_date is None or table_list is None or limit_num is None:
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
user_instances(request.user, 'slave').get(instance_name=instance_name)
except Exception:
context = {'errMsg': '你所在组未关联该从库!'}
return render(request, 'error.html', context)
# 判断是否需要限制到表级别的权限
# 库权限
if int(priv_type) == 1:
db_list = db_list.split(',')
# 检查申请账号是否已拥整个库的查询权限
own_dbs = QueryPrivileges.objects.filter(instance_name=instance_name, user_name=user.username,
db_name__in=db_list,
valid_date__gte=datetime.datetime.now(), priv_type=1,
is_deleted=0).values('db_name')
own_db_list = [table_info['db_name'] for table_info in own_dbs]
if own_db_list is None:
pass
else:
for db_name in db_list:
if db_name in own_db_list:
result['status'] = 1
result['msg'] = '你已拥有' + instance_name + '实例' + db_name + '库的全部查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 表权限
elif int(priv_type) == 2:
table_list = table_list.split(',')
# 检查申请账号是否已拥有该表的查询权限
own_tables = QueryPrivileges.objects.filter(instance_name=instance_name, user_name=user.username,
db_name=db_name,
table_name__in=table_list, valid_date__gte=datetime.datetime.now(),
priv_type=2, is_deleted=0).values('table_name')
own_table_list = [table_info['table_name'] for table_info in own_tables]
if own_table_list is None:
pass
else:
for table_name in table_list:
if table_name in own_table_list:
result['status'] = 1
result['msg'] = '你已拥有' + instance_name + '实例' + db_name + '.' + table_name + '表的查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 保存申请信息到数据库
applyinfo = QueryPrivilegesApply()
applyinfo.title = title
applyinfo.group_id = group_id
applyinfo.group_name = group_name
applyinfo.audit_auth_groups = Workflow.auditsettings(group_id, WorkflowDict.workflow_type['query'])
applyinfo.user_name = user.username
applyinfo.user_display = user.display
applyinfo.instance_name = instance_name
if int(priv_type) == 1:
applyinfo.db_list = ','.join(db_list)
applyinfo.table_list = ''
elif int(priv_type) == 2:
applyinfo.db_list = db_name
applyinfo.table_list = ','.join(table_list)
applyinfo.priv_type = int(priv_type)
applyinfo.valid_date = valid_date
applyinfo.status = WorkflowDict.workflow_status['audit_wait'] # 待审核
applyinfo.limit_num = limit_num
applyinfo.create_user = user.username
applyinfo.save()
apply_id = applyinfo.apply_id
# 调用工作流插入审核信息,查询权限申请workflow_type=1
auditresult = workflowOb.addworkflowaudit(request, WorkflowDict.workflow_type['query'], apply_id)
if auditresult['status'] == 0:
# 更新业务表审核状态,判断是否插入权限信息
query_audit_call_back(apply_id, auditresult['data']['workflow_status'])
except Exception as msg:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(msg)
else:
result = auditresult
return HttpResponse(json.dumps(result), content_type='application/json')
# 用户的查询权限管理
def getuserprivileges(request):
user_name = request.POST.get('user_name')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 判断权限,除了管理员外其他人只能查看自己的权限信息,
user = request.user
# 获取用户的权限数据
if user.is_superuser:
if user_name != 'all':
privilegeslist = QueryPrivileges.objects.all().filter(user_name=user_name,
is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()
).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.all().filter(user_name=user_name,
is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()).count()
else:
privilegeslist = QueryPrivileges.objects.all().filter(is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()
).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.all().filter(is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()
).count()
else:
privilegeslist = QueryPrivileges.objects.filter(user_name=user.username,
table_name__contains=search,
is_deleted=0,
valid_date__gte=datetime.datetime.now()
).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.filter(user_name=user.username,
table_name__contains=search,
is_deleted=0,
valid_date__gte=datetime.datetime.now()
).count()
# QuerySet 序列化
privilegeslist = serializers.serialize("json", privilegeslist)
privilegeslist = json.loads(privilegeslist)
privilegeslist_result = []
for i in range(len(privilegeslist)):
privilegeslist[i]['fields']['id'] = privilegeslist[i]['pk']
privilegeslist_result.append(privilegeslist[i]['fields'])
result = {"total": privilegeslistCount, "rows": privilegeslist_result}
# 返回查询结果
return HttpResponse(json.dumps(result), content_type='application/json')
# 变更权限信息
@permission_required('sql.query_mgtpriv', raise_exception=True)
def modifyqueryprivileges(request):
privilege_id = request.POST.get('privilege_id')
type = request.POST.get('type')
result = {'status': 0, 'msg': 'ok', 'data': []}
# type=1删除权限,type=2变更权限
privileges = QueryPrivileges()
if int(type) == 1:
# 删除权限
privileges.privilege_id = int(privilege_id)
privileges.is_deleted = 1
privileges.save(update_fields=['is_deleted'])
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(type) == 2:
# 变更权限
valid_date = request.POST.get('valid_date')
limit_num = request.POST.get('limit_num')
privileges.privilege_id = int(privilege_id)
privileges.valid_date = valid_date
privileges.limit_num = limit_num
privileges.save(update_fields=['valid_date', 'limit_num'])
return HttpResponse(json.dumps(result), content_type='application/json')
# 查询权限审核
@permission_required('sql.query_review', raise_exception=True)
def queryprivaudit(request):
# 获取用户信息
user = request.user
apply_id = int(request.POST['apply_id'])
audit_status = int(request.POST['audit_status'])
audit_remark = request.POST.get('audit_remark')
if audit_remark is None:
audit_remark = ''
if Workflow.can_review(request.user, apply_id, 1) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=apply_id,
workflow_type=WorkflowDict.workflow_type['query']).audit_id
# 调用工作流接口审核
auditresult = workflowOb.auditworkflow(request, audit_id, audit_status, user.username, audit_remark)
# 按照审核结果更新业务表审核状态
auditInfo = Workflow.auditinfo(audit_id)
if auditInfo.workflow_type == WorkflowDict.workflow_type['query']:
# 更新业务表审核状态,插入权限信息
query_audit_call_back(auditInfo.workflow_id, auditresult['data']['workflow_status'])
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(apply_id,)))
# 获取SQL查询结果
@permission_required('sql.query_submit', raise_exception=True)
def query(request):
instance_name = request.POST.get('instance_name')
sql_content = request.POST.get('sql_content')
db_name = request.POST.get('db_name')
limit_num = request.POST.get('limit_num')
result = {'status': 0, 'msg': 'ok', 'data': {}}
# 服务器端参数验证
if sql_content is None or db_name is None or instance_name is None or limit_num is None:
result['status'] = 1
result['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(result), content_type='application/json')
sql_content = sql_content.strip()
# 获取用户信息
user = request.user
# 过滤注释语句和非查询的语句
sql_content = ''.join(
map(lambda x: re.compile(r'(^--\s+.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
sql_content.splitlines(1))).strip()
# 去除空行
sql_content = re.sub('[\r\n\f]{2,}', '\n', sql_content)
sql_list = sql_content.strip().split('\n')
for sql in sql_list:
if re.match(r"^select|^show|^explain", sql.lower()):
break
else:
result['status'] = 1
result['msg'] = '仅支持^select|^show|^explain语法,请联系管理员!'
return HttpResponse(json.dumps(result), content_type='application/json')
# 按照分号截取第一条有效sql执行
sql_content = sql_content.strip().split(';')[0]
try:
# 查询权限校验
priv_check_info = query_priv_check(user, instance_name, db_name, sql_content, limit_num)
if priv_check_info['status'] == 0:
limit_num = priv_check_info['data']['limit_num']
priv_check = priv_check_info['data']['priv_check']
else:
return HttpResponse(json.dumps(priv_check_info), content_type='application/json')
if re.match(r"^explain", sql_content.lower()):
limit_num = 0
# 对查询sql增加limit限制
if re.match(r"^select", sql_content.lower()):
if re.search(r"limit\s+(\d+)$", sql_content.lower()) is None:
if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sql_content.lower()) is None:
sql_content = sql_content + ' limit ' + str(limit_num)
sql_content = sql_content + ';'
# 执行查询语句,统计执行时间
t_start = time.time()
sql_result = Dao(instance_name=instance_name).mysql_query(str(db_name), sql_content, limit_num)
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
sql_result['cost_time'] = cost_time
# 数据脱敏,同样需要检查配置,是否开启脱敏,语法树解析是否允许出错继续执行
hit_rule = 0 if re.match(r"^select", sql_content.lower()) else 2 # 查询是否命中脱敏规则,0, '未知', 1, '命中', 2, '未命中'
masking = 2 # 查询结果是否正常脱敏,1, '是', 2, '否'
t_start = time.time()
# 仅对查询语句进行脱敏
if SysConfig().sys_config.get('data_masking') and re.match(r"^select", sql_content.lower()):
try:
masking_result = datamasking.data_masking(instance_name, db_name, sql_content, sql_result)
if masking_result['status'] != 0 and SysConfig().sys_config.get('query_check'):
return HttpResponse(json.dumps(masking_result), content_type='application/json')
else:
hit_rule = masking_result['data']['hit_rule']
masking = 1 if hit_rule == 1 else 2
except Exception:
logger.error(traceback.format_exc())
hit_rule = 0
masking = 2
if SysConfig().sys_config.get('query_check'):
result['status'] = 1
result['msg'] = '脱敏数据报错,请联系管理员'
return HttpResponse(json.dumps(result), content_type='application/json')
t_end = time.time()
masking_cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
sql_result['masking_cost_time'] = masking_cost_time
result['data'] = sql_result
# 成功的查询语句记录存入数据库
if sql_result.get('Error'):
pass
else:
query_log = QueryLog()
query_log.username = user.username
query_log.user_display = user.display
query_log.db_name = db_name
query_log.instance_name = instance_name
query_log.sqllog = sql_content
if int(limit_num) == 0:
limit_num = int(sql_result['effect_row'])
else:
limit_num = min(int(limit_num), int(sql_result['effect_row']))
query_log.effect_row = limit_num
query_log.cost_time = cost_time
query_log.priv_check = priv_check
query_log.hit_rule = hit_rule
query_log.masking = masking
# 防止查询超时
try:
query_log.save()
except:
connection.close()
query_log.save()
except Exception as e:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(e)
# 返回查询结果
try:
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
except Exception:
return HttpResponse(json.dumps(result, default=str, bigint_as_string=True),
content_type='application/json')
# 获取sql查询记录
@permission_required('sql.menu_sqlquery', raise_exception=True)
def querylog(request):
# 获取用户信息
user = request.user
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 查询个人记录,超管查看所有数据
if user.is_superuser:
sql_log_count = QueryLog.objects.all().filter(
Q(sqllog__contains=search) | Q(user_display__contains=search)).count()
sql_log_list = QueryLog.objects.all().filter(
Q(sqllog__contains=search) | Q(user_display__contains=search)).order_by(
'-id')[offset:limit]
else:
sql_log_count = QueryLog.objects.filter(username=user.username).filter(sqllog__contains=search).count()
sql_log_list = QueryLog.objects.filter(username=user.username).filter(sqllog__contains=search).order_by('-id')[
offset:limit]
# QuerySet 序列化
sql_log_list = serializers.serialize("json", sql_log_list)
sql_log_list = json.loads(sql_log_list)
sql_log = [log_info['fields'] for log_info in sql_log_list]
result = {"total": sql_log_count, "rows": sql_log}
# 返回查询结果
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取SQL执行计划
@permission_required('sql.optimize_sqladvisor', raise_exception=True)
def explain(request):
sql_content = request.POST.get('sql_content')
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
result = {'status': 0, 'msg': 'ok', 'data': []}
# 服务器端参数验证
if sql_content is None or instance_name is None:
result['status'] = 1
result['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(result), content_type='application/json')
sql_content = sql_content.strip()
# 过滤非查询的语句
if re.match(r"^explain", sql_content.lower()):
pass
else:
result['status'] = 1
result['msg'] = '仅支持explain开头的语句,请检查'
return HttpResponse(json.dumps(result), content_type='application/json')
# 按照分号截取第一条有效sql执行
sql_content = sql_content.strip().split(';')[0]
# 执行获取执行计划语句
sql_result = Dao(instance_name=instance_name).mysql_query(str(db_name), sql_content)
result['data'] = sql_result
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
| 45.259939
| 134
| 0.584291
|
cf8253e0a1e406047ec1798dab40a4d6f9b1a904
| 1,043
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
shivanshu1086/recipe-app-api
|
5c869b7493a42e66c8588c2d17a01b73a022ddb3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
shivanshu1086/recipe-app-api
|
5c869b7493a42e66c8588c2d17a01b73a022ddb3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
shivanshu1086/recipe-app-api
|
5c869b7493a42e66c8588c2d17a01b73a022ddb3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-12-04 20:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.965517
| 118
| 0.604027
|
b5643f24c05aeab30bade0cc7965d19ffa6edcf2
| 67,104
|
py
|
Python
|
chainer/graph_optimizations/static_graph.py
|
zaltoprofen/chainer
|
3b03f9afc80fd67f65d5e0395ef199e9506b6ee1
|
[
"MIT"
] | 2
|
2018-10-09T15:37:43.000Z
|
2019-04-28T02:45:22.000Z
|
chainer/graph_optimizations/static_graph.py
|
zaltoprofen/chainer
|
3b03f9afc80fd67f65d5e0395ef199e9506b6ee1
|
[
"MIT"
] | 1
|
2019-10-17T09:56:18.000Z
|
2019-10-17T09:56:18.000Z
|
chainer/graph_optimizations/static_graph.py
|
zaltoprofen/chainer
|
3b03f9afc80fd67f65d5e0395ef199e9506b6ee1
|
[
"MIT"
] | null | null | null |
import sys
import weakref
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.function_node
def _is_xp(x):
return isinstance(x, np.ndarray) or isinstance(x, cuda.ndarray)
class ScheduleInfo(object):
"""A callable wrapper for a function in the static schedule.
Args:
func (FunctionNode): A function in the static schedule.
args: Arguments to 'func'.
kwargs: Keyword arguments to 'func'.
inputs_hooks (list of tuples): A list of hooks that instruct how to
update the ndarray references in 'args' so that they
refer to the correct master array in 'unique_arrays'.
return_hooks (list of tuples): A list of hooks that instruct how
to update the ndarray references in 'unique_arrays' so that
they refer to the correct arrays that were dynamically
allocated and returned by 'func'. These run after
'func' is called.
unique_arrays (list of ndarray): The master list of all unique
ndarrays that appear in the static schedule.
func_name (str): An optional name of the static function. This is
the name (if any) that was used as a decorater argument to
`@static_code(func_name=name)`.
"""
def __init__(self, func, args, kwargs, inputs_hooks, outputs_hooks,
return_hooks, delete_hooks, unique_arrays, array_infos,
func_name=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.inputs_hooks = inputs_hooks
self.outputs_hooks = outputs_hooks
self.return_hooks = return_hooks
self.unique_arrays = unique_arrays
self.array_infos = array_infos
assert len(self.array_infos) == len(self.unique_arrays)
self.func_name = func_name
self.in_list = None
if self.inputs_hooks:
self.in_list = self.kwargs['inputs']
if self.outputs_hooks:
self.out_list = self.kwargs['outputs']
# Check if 'func' wraps code of a 'FunctionNode':
self.function_node = None
if self.args:
maybe_func = self.args[0]
if isinstance(maybe_func, chainer.FunctionNode):
self.function_node = maybe_func
# List of indices in unique_arrays to delete.
self.delete_hooks = delete_hooks
def run_pre_hooks(self):
"""Run hooks to set correct references.
This method is called from '__call__()'.
Process the list of hooks which will modify the array references in
the arguments list of the static function. This method must be
called before executing the static function.
The hooks specify that
each array argument points to a "master" array reference in the
unique_arrays list. If the reference in unique_arrays changes, then
we must also change the corresponding array reference in the arguments
list. The hooks specify the mapping and this method updates the
references in args to the corresponding values from unique_arrays.
"""
for hook in self.inputs_hooks:
(ind, unique_ind) = hook
self.in_list[ind] = self.unique_arrays[unique_ind]
for hook in self.outputs_hooks:
(ind, unique_ind) = hook
self.out_list[ind] = self.unique_arrays[unique_ind]
for ind in self.delete_hooks:
self.unique_arrays[ind] = None
def run_post_hooks(self, return_arrays):
"""Run post-hooks.
This method should be called after calling the static function
`self.func(*self.args)`. This method sets any array references that
appear in `self.args` to None. This is safe because the master
array reference is still kept in `self.unique_arrays`.
Also, process the list of post-hooks which will modify the array
references in
the unique_arrays list to refer to the new dynamically-allocated arrays
that were returned by 'func'.
Args:
return_arrays (list of ndarray or None): The list of arrays that
were returned by the schedule function, if not None.
"""
for hook in self.inputs_hooks:
(ind, unique_ind) = hook
self.in_list[ind] = None
for hook in self.outputs_hooks:
(ind, unique_ind) = hook
self.out_list[ind] = None
for hook in self.return_hooks:
# Update the array reference in unique_arrays to refer to the
# array in the results array.
(ret_index, unique_list_index) = hook
# Note: input/output variables to a FunctionNode that are
# retained using retain_inputs() or retain_outputs() are
# not currently explicitly used as input arguments to the
# auto-wrapped functions, and so their corresponding array
# reference could be used inside a function wrapped with
# @static_code without the array explicitly appearing in the
# 'inputs' argument. It is therefore not safe to change the
# reference of such arrays, and so for them, we must be
# sure to copy the dynamically-allocated array into the
# same array that was used in the define-by-run code and
# set 'need_copy' to True in such cases.
need_copy = self.array_infos[unique_list_index].retain
# todo: possible memory leak when need_copy False is allowed?
if need_copy:
# This must be used if the model used retain_inputs() or
# retain_outputs().
self.unique_arrays[unique_list_index][...] = \
return_arrays[ret_index]
else:
# This is preferred, when possible, since it should
# be faster than a copy to simply update the array
# reference.
self.unique_arrays[unique_list_index] = \
return_arrays[ret_index]
def __call__(self):
self.run_pre_hooks()
ret = self.func(*self.args, **self.kwargs)
self.run_post_hooks(ret)
def __repr__(self):
out = 'function: ' + str(self.func) + '\n'
out += 'name: ' + str(self.func_name) + '\n'
out += 'args: ' + str(self.args) + '\n'
out += 'kwargs: ' + str(self.args) + '\n'
return out
class ArrayInfo(object):
"""Array information needed by the scheduler.
This contains information about one array used in the naive static
schedule corresponding to the define-by-run code.
"""
def __init__(self, array):
# Weak reference to the array in the define-by-run code.
self.weak_ref = weakref.ref(array)
self.id = id(array)
# The array (normal reference). Do not create in initializer.
self.array = None
self.shape = array.shape
self.dtype = array.dtype
# either numpy or cupy
self.ndarray_module = chainer.backend.get_array_module(array)
if self.ndarray_module is cuda.cupy:
# device id, if available.
self.device = cuda.get_device_from_array(array)
else:
# numpy (cpu)
self.device = -1
# todo: save array order ('C', 'F' as well?
# It specifies the input variable corresponding
# to this array as the tuple (pass_depth, in_var_index).
self.in_var_index = None
# It specifies the output variable corresponding
# to this array as the tuple (pass_depth, out_var_index).
self.out_var_index = None
# todo: set in initializer as keyword arg?
self.dynamically_allocated = False
# If the array was returned as a dynamically allocated array
# in the define-by-run code, this specifies the location
# in the schedule as the tuple (pass_depth, sched_func_index)
# where sched_func_index is the index of the corresponding
# ScheduleInfo object in the StaticScheduleFunction's
# self.schedule_info_list
self.dynamic_allocation_index = None
self.dynamic_allocation_pass_depth = None
self.dynamic_deletion_index = None
self.dynamic_deletion_pass_depth = None
# This is the same as self.dynamic_allocation_index, but for the
# case where the array was statically allocated in the
# define-by-run code.
self.static_allocation_index = None
# If the array needs to be retained (was included in
# retain_inputs/retain_outputs),
# this will be set to True later.
self.retain = False
def was_deleted(self):
return self.weak_ref() is None
def get_new_empty_array(self):
"""Make and return a new empty ndarray.
Make and return a new empty ndarray that has the same shape,
dtype, and device as the array that was supplied to the
initializer.
"""
# todo: set device id
return self.ndarray_module.empty(self.shape, dtype=self.dtype)
def __repr__(self):
out = 'shape: {}\n'.format(self.shape)
if self.was_deleted():
out += 'Weak reference: dead\n'
else:
out += 'Weak reference: alive\n'
if self.retain:
out += 'Retained with retain_inputs()/retain_outputs().\n'
if self.dynamically_allocated:
out += 'Dynamically allocated at\n'
out += \
' pass_depth: {}\n'.format(self.dynamic_allocation_pass_depth)
out += ' sched_index: {}\n'.format(self.dynamic_allocation_index)
out += 'array id: {}'.format(self.id)
return out
class StaticScheduleFunction(chainer.function_node.FunctionNode):
"""A function that executes the static schedule of a Chain.
An instance of this class executes the static schedule of computations
that are equivalent to executing the define-by-run code of a Chain.
This class is used by the `static_graph` decorator to wrap the
define-by-run
computations of a chain into two static schedules:
- The forward schedule corresponds to the computations that are executed by
the define-by-run code of the `__call__()` method of a chain. The static
schedule corresponding to these computations can be executed by calling the
`forward()` method of this class.
- The backward schedule corresponds to the computations that are executed
by the sequence of calls to `Function.backward()` that occur during when
backpropagating the gradients through the same chain. That is, for each
`Function.forward()` that was called during the forward propagation,
there will be a corresponding call to `Function.backward()` (of the
same Function object) in the backward schedule. This backward schedule
can be executed by calling the `backward()` method of this class.
Note the intended usage of this class:
Recall that a "static chain" referes to a chain that is decorated by the
`static_graph` decorator.
During the first forward pass of a static chain, the define-by-run code
is executed. However,
for subsequent iterations, that define-by-run code is replaced by an
instance
of this function and this function will be called instead. Since the
static
schedules contained by this function perform the same computations, it is
safe (and potentially much more efficient) to simply execute the static
schedule instead
of the define-by-run code. See `static_graph` for details.
Args:
schedule_manager (ScheduleManager): The schedule manager of this
schedule instance.
in_vars (tuple of Variable): The flattened tuple of input variables
that is supplied to
`__call__()` method of the chain that this schedule corresponds to.
unique_arrays (list of ndarray): A list of all unique array references
deeply used in an StaticScheduleFunction instance. It is 'None'
for the StaticScheduleFunction that corresponds to the "forward"
schedule, but the contained StaticScheduleFunction for the
"backward" schedule should take the unique_arrays of the
"forward" schedule.
"""
def __init__(self, schedule_manager, verbosity_level=0,
enable_double_backprop=False):
# A pass depth of 0 corresponds to the schedule for the forward pass.
# A pass depth of 1 corresponds to the schedule for the backward pass.
# A pass depth of 2 corresponds to the schedule for the
# double-backward pass, and so on.
self.pass_depth = 0
self.schedule_manager = schedule_manager
# A list of ScheduleInfo objects, each of which contains one function
# in the static schedule. The order of functions in this list is
# the order they should be called in the schedule.
self.schedule_info_list = []
# A list of all unique ndarrays used in this schedule and any deeply
# contained schedules (backward, double-backward schedules).
# That is, it is shared among all pass depths.
# Note that this typically includes the ndarray attributes of the
# parameters of the chain, the input variables to the chain,
# and any intermediate arrays (activations, etc) created while
# executing the define-by-run code of the chain.
self.unique_arrays = []
# A list of UniqueArray objects, where
# each object contains information such as what the array corresponds
# to (variable, parameter.data, etc), weak or regular reference,
# whether
# it was dynamically allocated or read-only in the schedule.
# It is the same length as unique_arrays.
self.unique_array_infos = []
# Maps id(ndarray) to its position in self.unique_arrays
# This is shared by this schedule and all deeply-contained schedules.
self.array_id_to_unique_index = dict()
self.backward_schedule_func = None
self.verbosity_level = verbosity_level
self.enable_double_backprop = enable_double_backprop
self.in_vars = None
self.chain = None
self.schedule_built = False
# A list of all parameters in the model (i.e., that exist when
# build_schedule() is called.
# This is shared among all deeply-contained schedules of this schedule.
self.params_list = []
# This list contains the grad_var corresponding to each variable
# in params_list. This is needed so that we can restore any grad_var
# that is set to None by outside code.
# This is shared among all deeply-contained schedules of this schedule.
self.grad_var_list = []
# Maps an array id (of a parameter) to its location.
# id(array) -> (index_in_self.params_list, attribute_location)
self.array_id_to_param_map = dict()
# Maps an array id (of an input variable for forward()) to its
# positional index.
# id(array) -> (index in inputs argument of forward())
self.array_id_to_input_var_map = dict()
# maps a Parameter id to the parameter's index in self.params_list
self.param_id_to_index = dict()
# A list of tuples that specify the mappings from static schedule
# arrays to parameter attributes.
# These are pre-hooks that are run before running the schedule.
self.param_hooks = []
# These are post hooks that are run after executing the schedule.
# They are used to update parameter attributes from dynamically-
# allocated arrays in the schedule.
self.param_post_hooks = []
# A list of tuples that specify the mappings from static schedule
# arrays to 'data' array attributes of the output variables.
self.out_var_hooks = []
# A list of tuples that specify the mapping from static schedule
# arrays to input variable index in the "inputs" argument of forward()
# This is used to update the array references in the static schedule
# that refer to the data attribute of input variables.
self.in_var_hooks = []
self.dynamically_allocated_unique_index = set()
# Maps an index in unique_arrays to the index in the returned
# output variables, if the index corresponds to an output
# variable.
self.unique_ind_to_out_var_ind = dict()
def get_unique_index_from_array(self, array):
"""Return the array index if it exists.
Return the index of the array in self.unique_array_infos if the
array already exists in self.unique_array_info with a valid
reference. Otherwise, return None.
"""
ar_id = id(array)
if ar_id in self.array_id_to_unique_index:
# It is possible that this id is stale if a previous
# array that had the same id has already been deleted.
# So, verify that the existing array with this id is
# still alive.
unique_ind = self.array_id_to_unique_index[ar_id]
info = self.unique_array_infos[unique_ind]
assert ar_id == info.id
if info.was_deleted():
# id was stale, so remove from the dict.
del self.array_id_to_unique_index[ar_id]
return None
else:
return self.array_id_to_unique_index[ar_id]
def get_contained_schedule(self):
# Make and return the backward schedule (relative to
# this schedule).
sched = StaticScheduleFunction(self.schedule_manager,
self.verbosity_level,
self.enable_double_backprop)
sched.pass_depth = self.pass_depth + 1
sched.unique_arrays = self.unique_arrays
sched.unique_array_infos = self.unique_array_infos
sched.array_id_to_unique_index = self.array_id_to_unique_index
sched.params_list = self.params_list
sched.grad_var_list = self.grad_var_list
sched.array_id_to_param_map = self.array_id_to_param_map
sched.param_hooks = self.param_hooks
sched.param_id_to_index = self.param_id_to_index
return sched
def is_empty(self):
"""Return True if this schedule is empty.
"""
return len(self.schedule_info_list) == 0
def append_function(self, func, args, kwargs, func_name=None):
"""Append a function to the static schedule.
Append a function `func` to the static schedule. `func` can
be any function that is decorated with `@static_code` and that
was called while executing the static chain's `__call___()`
method, which contains the define-by-run code. The code
in the `@static_code` decorator will call this method to
add the function to the schedule just after it executes in
the define-by-run code as follows:
`return_arrays = func(*args, **kwargs)`
During the next iteration when the static chain switches from define-
by-run to the static schedule, a corresponding `ScheduleInfo`
object will call `func` as above, except that the scheduler might
make modifications
to some of the arrays in `kwargs` before and after the function is
called to implement various memory optimizations.
Args:
func (function or method): The function to append to the schedule.
This is a function that was decorated with `@static_code`.
args: The arguments that were originally supplied to `func` in
the define-by-run code of the static chain.
kwargs: The keyword arguments that were originally supplied to
`func` in the define-by-run code of the static chain.
func_name (str): Optional name for `func`, for debugging
purposes.
return_arrays (tuple of ndarray) or None: The value that is
returned by `func`, if any.
"""
# Check previous function in the schedule, if available.
# Check the arrays in the retained inputs/outputs and force them
# to remain statically allocated in the schedule.
# ids of any retained arrays.
retained_ids = set()
last_sched_info_ind = len(self.schedule_info_list) - 1
if last_sched_info_ind >= 0:
prev_sched_info = self.schedule_info_list[last_sched_info_ind]
if prev_sched_info.function_node is not None:
# get retained inputs/outputs.
retained_in_vars = \
prev_sched_info.function_node.get_retained_inputs()
retained_out_vars = \
prev_sched_info.function_node.get_retained_outputs()
if (retained_in_vars is not None and
retained_out_vars is not None):
retained_vars = retained_in_vars + retained_out_vars
elif retained_in_vars is not None:
retained_vars = retained_in_vars
elif retained_out_vars is not None:
retained_vars = retained_out_vars
else:
retained_vars = None
if retained_vars is not None:
for var in retained_vars:
retained_ids.add(id(var.data))
for keep_id in retained_ids:
unique_ind = self.array_id_to_unique_index[keep_id]
array_info = self.unique_array_infos[unique_ind]
array_info.retain = True
# Note: the following line is not actually needed.
# array_info.array = array_info.weak_ref()
delete_hooks = []
for unique_ind, ar_info in enumerate(self.unique_array_infos):
# todo: this is O(N^2) and maybe too slow for large graphs.
# Optimize it later.
if ar_info.was_deleted():
if ar_info.dynamic_deletion_index is None:
if self.verbosity_level >= 2:
print('Adding delete hook:')
delete_hooks.append(unique_ind)
ar_info.dynamic_deletion_index = last_sched_info_ind + 1
ar_info.dynamic_deletion_pass_depth = self.pass_depth
# Call the `@static_code`-decorated function.
ret = func(*args, **kwargs)
inputs_hooks = []
if 'inputs' in kwargs:
in_list = kwargs['inputs']
assert isinstance(in_list, list)
for ind, x in enumerate(in_list):
if _is_xp(x):
unique_ind = self.get_unique_index_from_array(x)
if unique_ind is None:
# Note: we append None here because we cannot store any
# additional reference to the array.
# Otherwise, it would
# prevent garbage collection. Note that a
# weak reference
# will be stored in the ArrayInfo below.
self.unique_arrays.append(None)
self.unique_array_infos.append(ArrayInfo(x))
unique_ind = len(self.unique_arrays) - 1
self.array_id_to_unique_index[id(x)] = unique_ind
inputs_hooks.append((ind, unique_ind))
# Now that the hook has been added, we can delete
# array reference from 'args'.
in_list[ind] = None
outputs_hooks = []
if 'outputs' in kwargs:
out_list = kwargs['outputs']
assert isinstance(out_list, list)
for ind, x in enumerate(out_list):
if _is_xp(x):
unique_ind = self.get_unique_index_from_array(x)
if unique_ind is None:
self.unique_arrays.append(x)
# todo: enable the following line instead once the
# auto-intializing hooks are added. This will further
# reduce memory usage.
# self.unique_arrays.append(None)
self.unique_array_infos.append(ArrayInfo(x))
unique_ind = len(self.unique_arrays) - 1
self.array_id_to_unique_index[id(x)] = unique_ind
outputs_hooks.append((ind, unique_ind))
# Now that the hook has been added, we can delete
# array reference from 'args'.
out_list[ind] = None
# A list of hooks (each is a tuple) that will be used to set
# correct array references in 'unique_arrays' after executing
# the static schedule function 'func'. These hooks update
# the references in 'unique_arrays' to refer to the arrays
# that were dynamically allocated in the return value of
# 'func'.
return_hooks = []
if ret is not None:
assert (isinstance(ret, list) or
isinstance(ret, tuple))
for ret_index, item in enumerate(ret):
if _is_xp(item):
# note: id might not be unique if objects have been
# garbage collected.
item_id = id(item)
unique_index = self.get_unique_index_from_array(item)
if unique_index is None:
# Note: Append None instead of 'item' to prevent an
# extra reference from being stored. Otherwise it
# would prevent garbage collection.
self.unique_arrays.append(None)
ar_info = ArrayInfo(item)
ar_info.dynamically_allocated = True
sched_info_ind = len(self.schedule_info_list)
ar_info.dynamic_allocation_index = sched_info_ind
ar_info.dynamic_allocation_pass_depth = self.pass_depth
self.unique_array_infos.append(ar_info)
unique_index = len(self.unique_arrays) - 1
self.array_id_to_unique_index[item_id] = \
unique_index
else:
# Since all of the return arrays are supposed to
# have been dynamically allocated inside 'func',
# they had better not already be in unique_arrays.
# If so, it is an error.
unique_index = self.array_id_to_unique_index[item_id]
print('the current id: ', item_id)
print('the unique_index: ', unique_index)
print('array info: ',
self.unique_array_infos[unique_ind])
raise RuntimeError('Found result array from schedule '
'function already in '
'unique_arrays!')
return_hooks.append((ret_index, unique_index))
self.dynamically_allocated_unique_index.add(unique_index)
if self.verbosity_level >= 2:
print('Adding function to static schedule: ', func)
self.schedule_info_list.append(ScheduleInfo(func, args, kwargs,
inputs_hooks,
outputs_hooks,
return_hooks,
delete_hooks,
self.unique_arrays,
self.unique_array_infos,
func_name=func_name))
return ret
def __repr__(self):
out = 'StaticSchedule:\n'
if self.pass_depth == 0:
depth = 'forward pass'
elif self.pass_depth == 1:
depth = 'backward pass'
elif self.pass_depth == 2:
depth = 'double backward pass'
else:
depth = str(self.pass_depth)
out += 'Pass depth: ' + depth + '\n'
out += 'Length of unique_arrays: ' + \
str(len(self.unique_arrays)) + '\n'
for x in self.schedule_info_list:
out += str(x)
return out
def debug_print_ref_counts(self):
print('reference counts in unique_arrays:')
for ind in range(len(self.unique_arrays)):
print('index: ', ind)
print('reference count: ',
sys.getrefcount(self.unique_arrays[ind]))
def run_param_pre_hooks(self):
"""Run parameter reference updater hooks.
It also handles the case where the 'grad' attribute
was set to 'None' by outside Chainer code.
"""
for hook in self.param_hooks:
(unique_array_index, param_attribute_location) = hook
(params_list_index, attribute_location) = param_attribute_location
if attribute_location == 'data':
# This is the corresponding parameter array, which might
# have had its reference changed to a different array or set
# to None.
self.unique_arrays[unique_array_index] = \
self.params_list[params_list_index].data
elif attribute_location == 'grad':
# This is the corresponding parameter array, which might
# have had its reference changed to a different array or set
# to None.
self.params_list[params_list_index].grad = \
self.unique_arrays[unique_array_index]
def run_param_post_hooks(self):
"""Update parameter attributes after schedule is executed.
If any dynamically-allocated arrays in the schedule correspond to
a parameter attribute, it must be updated after the schedule is
run.
"""
if self.verbosity_level >= 2:
print('run_param_post_hooks()...')
for hook in self.param_post_hooks:
(unique_array_index, param_attribute_location) = hook
(params_list_index, attribute_location) = param_attribute_location
if attribute_location == 'data':
self.params_list[params_list_index].data = \
self.unique_arrays[unique_array_index]
elif attribute_location == 'grad':
self.params_list[params_list_index].grad = \
self.unique_arrays[unique_array_index]
def run_in_var_hooks(self, input_var_arrays):
"""Run hooks to update variable array references.
Args:
input_var_arrays (tuple of ndarray): The 'data' array attributes
of the input variables to this function.
"""
for hook in self.in_var_hooks:
(unique_array_index, in_var_ind) = hook
if self.verbosity_level >= 2:
print('input var hook:')
print('unique_array_index: ', unique_array_index)
print('in_var_ind: ', in_var_ind)
print('_run_in_var_hooks(): Using this input variable array '
'for forward pass: ', input_var_arrays[in_var_ind])
self.unique_arrays[unique_array_index] = \
input_var_arrays[in_var_ind]
def debug_print_unique_arrays_info(self):
for ind, item in enumerate(self.unique_arrays):
print('--- unique_arrays ---')
print('index: {0}; id: {1}'.format(ind, id(item)))
if item is not None:
print('shape: ', item.shape)
if ind in self.unique_ind_to_out_var_ind:
out_var_ind = self.unique_ind_to_out_var_ind[ind]
print('output variable at return index: ', out_var_ind)
if ind in self.dynamically_allocated_unique_index:
print('Dynamically allocated inside schedule.')
def run_out_var_hooks(self):
"""Run hooks to update output variable array references.
"""
for hook in self.out_var_hooks:
(out_var_ind, unique_list_index) = hook
out_var = self.out_vars[out_var_ind]
out_var.data = self.unique_arrays[unique_list_index]
if self.verbosity_level >= 2:
print('StaticScheduleFunction: running output variable hook: '
'out_var_ind, unique_list_index): ', hook)
def set_out_variables(self, out_vars):
"""Set output variables.
This should be called after the define-by-run code in the
chain's `__call__()` has already run but before running the
static schedule.
Args:
out_vars (list of Variable): The (flattened) list of output
variables obtained by performing a define-by-run
forward pass (or corresponding backward pass) on the
local sub-graph corresponding to the static chain.
"""
self.out_vars = out_vars
# Create output-variable update hooks.
for var_ind, var in enumerate(out_vars):
if var is not None:
key = id(var.data)
if key in self.array_id_to_unique_index:
unique_list_index = self.array_id_to_unique_index[key]
self.out_var_hooks.append((var_ind, unique_list_index))
self.unique_ind_to_out_var_ind[unique_list_index] = var_ind
else:
raise RuntimeError('Could not find output variable in '
'unique_arrays.')
def build_schedule(self, chain, in_vars):
"""Build the static schedule.
Perform one-time post-processing on the functions and arguments
that were
previously supplied in 'append_function()' to create the static
schedule.
This method must be called after the final call of 'append_function()'
and before calling 'forward()' for the first time.
Args:
chain: The static chain that uses this scheudle.
in_vars (list of Variable): The input variables to this static
schedule. This are the input variables (each having no
creator) of the local sub-graph corresponding to the
static chain.
"""
self.chain = chain
self.in_vars = in_vars
# Iterate through all array info objects and for any arrays that
# still have a valid reference, copy into unique_arrays.
if self.verbosity_level >= 2:
print('Building schedule for pass depth: ', self.pass_depth)
for ind, info in enumerate(self.unique_array_infos):
if self.verbosity_level >= 2:
print('unique array index: ', ind)
print('array info: ', info)
if not info.was_deleted():
self.unique_arrays[ind] = info.weak_ref()
# Verify that all array references are actually unique.
unique_ids = set()
for ar in self.unique_arrays:
if ar is not None:
assert id(ar) not in unique_ids
unique_ids.add(id(ar))
for param in chain.params():
param_key = id(param)
if param_key not in self.param_id_to_index:
self.params_list.append(param)
grad_var = param.grad_var
self.grad_var_list.append(grad_var)
param_index = len(self.params_list) - 1
self.param_id_to_index[param_key] = param_index
else:
# We have seen this parameter before.
param_index = self.param_id_to_index[param_key]
grad_var = param.grad_var
self.grad_var_list[param_index] = grad_var
if param.data is not None:
key = id(param.data)
if key not in self.array_id_to_param_map:
self.array_id_to_param_map[key] = (param_index, 'data')
if param.grad is not None:
key = id(param.grad)
if key not in self.array_id_to_param_map:
self.array_id_to_param_map[key] = (param_index, 'grad')
for var_ind, in_var in enumerate(self.in_vars):
assert in_var.data is not None
key = id(in_var.data)
self.array_id_to_input_var_map[key] = var_ind
# Iterate over all arrays used in the schedule and check which ones
# correspond to parameter arrays or input variables. When a match
# is found, create a corresponding hook function. This hook will
# run just before executing the schedule and set the array
# references used in the schedule to be consistent with the
# input variables and parameters.
assert len(self.unique_arrays) > 0
for unique_array_index, ar in enumerate(self.unique_arrays):
key = id(ar)
# Create pre-run parameter hooks.
if key in self.array_id_to_param_map:
param_attribute_location = self.array_id_to_param_map[key]
param_hook = (unique_array_index, param_attribute_location)
self.param_hooks.append(param_hook)
# Create pre-run input variable hooks.
if key in self.array_id_to_input_var_map:
in_var_ind = self.array_id_to_input_var_map[key]
in_var_hook = (unique_array_index, in_var_ind)
self.in_var_hooks.append(in_var_hook)
if self.verbosity_level >= 2:
print('build_schedule(): Adding input variable hook: ',
in_var_hook)
print('For input variable: ', ar)
# Create post-run hooks for any arrays that are dynamically
# allocated inside the schedule.
if unique_array_index in self.dynamically_allocated_unique_index:
if key in self.array_id_to_param_map:
param_attribute_location = self.array_id_to_param_map[key]
param_hook = (unique_array_index, param_attribute_location)
self.param_post_hooks.append(param_hook)
if self.verbosity_level >= 2:
print('self.param_hooks: ', self.param_hooks)
self.debug_print_unique_arrays_info()
# todo: We can potentially reduce memory usage by freeing memory
# of intermediate arrays in self.unique_arrays
# once they are no longer needed in the schedule or by
# parameters.
print('end of build_schedule()')
self.schedule_built = True
def forward(self, inputs):
if self.verbosity_level >= 2:
print('Calling StaticScheduleFunction.forward()...')
# Note: This method will be invoked every iteration starting
# from the second
# iteration. That is because the corresponding define-by-run
# code runs instead
# during the first iteration.
if not self.schedule_built:
raise RuntimeError('forward() was called before '
'build_schedule()!')
self.run_param_pre_hooks()
self.run_in_var_hooks(inputs)
if self.verbosity_level >= 2:
print('Running static schedule...')
# Run each function in the static schedule.
for x in self.schedule_info_list:
x()
if self.verbosity_level >= 2:
self.debug_print_unique_arrays_info()
self.run_out_var_hooks()
self.run_param_post_hooks()
ret = []
for y in self.out_vars:
if y is None or y.data is None:
ret.append(None)
else:
# todo: add test case for an example where the following
# copy is required (evaluation mode, repeated calls of
# chain that reuse same schedule).
ret.append(y.data.copy())
return tuple(ret)
def backward(self, target_input_indexes, grad_outputs):
if self.verbosity_level >= 2:
print('Calling StaticScheduleFunction.backward()...')
# The first time this method is called, the define-by-run code is
# executed in order to create a static schedule.
self.schedule_manager.end_forward()
if self.backward_schedule_func is None:
print('Creating new backward schedule...')
# Create backward schedule and run define-by-run backward code.
self.backward_schedule_func = self.get_contained_schedule()
# Make local copies of the variables in grad_outputs.
new_grad_outputs = []
for var in grad_outputs:
# Replace each input variable with a new variable having
# the same data.
new_grad_outputs.append(chainer.Variable(var.data))
with chainer.using_config('schedule_func',
self.backward_schedule_func):
with chainer.using_config('enable_backprop', True):
for ind, var in enumerate(new_grad_outputs):
# todo: possibly don't need the following:
self.out_vars[ind].grad = new_grad_outputs[ind].data
inputs = [param for param in self.chain.params()]
for var in self.in_vars:
inputs.append(var)
# Need shorter var to avoid "line too long error"
ugh = self.enable_double_backprop
chainer.grad(self.out_vars,
inputs,
grad_outputs=new_grad_outputs,
set_grad=True,
enable_double_backprop=ugh)
# We no longer need the backward graph from self.out_vars, so
# unchain them.
# todo (vogel): enable this eventually. For now, it
# causes some needed variables to be set to None
# in some models such as CIFAR example.
# for var in self.out_vars:
# var.unchain_backward()
# Note: var.grad_var is allowed to be None below:
backward_out_vars = [var.grad_var for var in self.in_vars]
self.backward_schedule_func.set_out_variables(backward_out_vars)
for n in range(len(self.in_vars)):
self.in_vars[n] = None
if self.verbosity_level >= 2:
print('building backward schedule.')
self.backward_schedule_func.build_schedule(self.chain,
new_grad_outputs)
return self.backward_schedule_func.apply(grad_outputs)
class ScheduleManager(object):
"""A manager of static schedules for a static chain.
This is a container of the static schedules that are used by a static
chain.
Args:
minimize_cache_size (bool): If `True`, attempt to reduce memory
usage by clearing the cached schedules whenever the training
mode changes (that is, whenever `chainer.config.train` changes
value) or whenever the mini-batch size changes.
"""
def __init__(self, minimize_cache_size=True, verbosity_level=0):
# Maps a key string to a list of schedule functions.
self.schedules = dict()
self.minimize_cache_size = minimize_cache_size
self.in_use_count = dict()
self.forward_over = False
self.prev_train_config = None
self.max_in_use_train = 0
self.train_count = 0
self.verbosity_level = verbosity_level
def get_schedule(self, in_vars, enable_double_backprop=False):
"""Get a static schedule.
Return a static schedule object (that is, an instance of
``StaticScheduleFunction``) that is compatible with
the current configuration and input variables to the supplied chain.
If there is no existing schedule available, return an empty schedule
object.
During the usual "training mode" (that is, when both
`chainer.config.enable_backprop` and `chainer.config.train`
are `True`), this method will always return a distince static
schedule each time it is called within the same iteration.
It will also try to reuse
existing schedules across iterations. Therefore, any schedule that
is returned in a given iteration cannot be returned again until
the following iteration. However, if either of these flags is
'False', then this method may return the same schedule instance
multiple times within the same iteration, as long as it is
compatible with `in_vars`.
Note that in order to implement the above behavior, the schedule
manager must be informed when the current iteration has finished.
This is accomplished by calling `end_forward()` after the
iteration has finished. If a backward pass is performed, then
`end_forward()` will be automatically called. Otherwise, it
will not be called and the user will be responsible for calling
it.
Args:
in_vars (tuple of :class:`~chainer.Variable`): The input
variables to the chain.
Returns:
An instance of ``StaticScheduleFunction``.
"""
if self.forward_over:
self.forward_over = False
if self.minimize_cache_size:
if chainer.config.train != self.prev_train_config:
# Training config changed, so clear caches.
self.prev_train_config = chainer.config.train
if self.verbosity_level >= 2:
print('Clearing schedule cache...')
self.schedules.clear()
self.in_use_count.clear()
if (chainer.config.train is False or
chainer.config.enable_backprop is False):
key_str = 'test:' + \
''.join(str(x.shape) + str(x.dtype) for x in in_vars)
# If the maximum number of in-use schedules in any iteration
# during training mode was exactly 1, assume it should also
# be 1 for test mode.
if key_str in self.schedules:
sched_list = self.schedules[key_str]
sched = sched_list[0]
else:
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
self.schedules[key_str] = [sched]
return sched
else:
key_str = 'train:' + \
''.join(str(x.shape) + str(x.dtype) for x in in_vars)
self.train_count += 1
if key_str in self.schedules:
sched_list = self.schedules[key_str]
available_index = self.in_use_count[key_str]
if available_index >= len(sched_list):
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
sched_list.append(sched)
sched = sched_list[available_index]
self.in_use_count[key_str] = available_index + 1
else:
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
self.schedules[key_str] = [sched]
self.in_use_count[key_str] = 1
return sched
def end_forward(self):
"""Make in-use schedules available for use in next iteration.
Set the in-use status of all schedules to "not in use" so that
they can be reused in the next iteration.
In the case that test mode is active
(`chainer.config.train` is `False`) and the static chain corresponding
to this manager was not called more than once in any iteration during
training mode, then this method will be called automatically.
"""
if not self.forward_over:
for key in self.in_use_count:
self.in_use_count[key] = 0
self.forward_over = True
if self.train_count > self.max_in_use_train:
self.max_in_use_train = self.train_count
if self.verbosity_level >= 2:
print('Maximum in-use schedules per training iteration: ',
self.max_in_use_train)
self.train_count = 0
def __repr__(self):
out = 'ScheduleManager:\n'
for key_str in self.schedules:
out += 'key string: ' + key_str
sched_list = self.schedules[key_str]
out += ' -> schedule list of length: ' + \
str(len(sched_list)) + '\n'
for sched in sched_list:
out += str(sched)
return out
def static_graph(*args, **kwargs):
"""Decorator to mark a Chain's ``__call__()`` as a static sub-graph.
This decorator marks the define-by-run code inside the `__call__()`
method of a Chain instance as corresponding to a static computation
graph or sub-graph. Such a chain will be referred to as a 'static chain'.
This allows various "static graph" optimizations to be performed, which
can result in significant speedups for some models.
When this decorator is used, the chain's define-by-run code executes
during the first iteration as usual. However, while the define-by-run
code is executing, a trace is also performed to incrementally create a
corresponding static schedule. This static schedule will only contain
the subset of the computations inside the define-by-run code that actually
needs to run every iteration. Specifically, this will contain the code
inside any functions called that were annotated with the `@static_code`
decorator, which will include all Chainer built-in functions, as well as
any user-defined functions that use `@static_code`. Then, starting
from the second iteration, when the static chain is called, its
static schedule code will be executed instead of its define-by-run code.
However, the user must also be careful of the following:
- The user is responsible for applying this decorator correctly. The
framework
does not check that the define-by-run code corresponds to a static
graph. The graph can be different between training and
evaluation mode (such as when dropout and/or batch normalization are
used), but should otherwise be static.
- When `chainer.config.enable_backprop` is enabled, if a backward pass
is not performed each iteration, then the user code must call a method
`chain.schedule_manager.end_forward()`on the static chain each iteration.
- Static graphs allow tradeoffs between computation and memory usage.
For example, the `minimize_cache_size` argument will typically result in
higher memory useage when set to `False` because all cached schedules
are retained.
- When this feature is enabled, only the Chainer function and/or link
calls inside the chain's `__call__()` method will be included in the
static schedule by default. An other code that the user puts in
`__call__()`, such as a print statement or code to increment a counter
for example, will not automatically get added. We will refer to such
code other than Chainer function/link calls as "side-effect" code.
Since side-effect code does not get included in the static schedule
by default, this means that it will only every execute once, during
the first iteration. There is a way to force side-effect code to be
included in the static schedule, however: the user can wrapp such
code inside a function that is decorated with
`@static_code` to ensure that it gets added to the static schedule.
For an example of this, refer to the documentation.
- This feature is experimental and advanced optimizations such
as kernel fusion and various memory optimizations are not implemented
yet.
Usage:
This decorator should only be applied
to define-by-run code that actually corresponds to a static subgraph.
Refer to the documenation for additional details and examples of
correct usage.
This decorator should be applied to each of the largest static
subgraphs in the model; it can also be applied to a static subgraph
that is not the largest subgraph, but that could result in reduced
performance.
It is not currently allowed to
mark a chain as static if it is contained within
another chain that is also marked as being static.
For example, suppose a
static graph `A` contains a static sub-graph `B`. Then, only the chain
corresponding to `A` should be marked as static and the chain
corresponding
to `B` should not be marked as static.
The behavior of a static chain depends on the training mode flag,
`chainer.config.train`. If it is `True`, then a static chain that is
called multiple times will try to use a distinct static schedule object
(that is, call a distinct instance of a FunctionNode that implements
that static schedule) on each call. The same schedule instance cannot
be reused until the forward pass has completed, which is signaled by
performing a backward pass through the model. It is therefore important
that the backward pass be performed after each forward pass during
training. Since this is usually the case, most usages of static chain
will not required any modifications to existing code other than applying
this decorator. However, if you would like to perform multiple forward
passes during training before performing a backward pass, then you
must call `chain.schedule_manager.end_forward()` after the end
of each forward pass.
If test mode is active (`chainer.config.train` is `False`) then it
is not necessary to inform the chain at the end of each forward pass
because in test mode, a static chain always attempts to reuse
existing static schedule objects. The same static schedule can be reused
during a single forward pass, because it is not necessary to compute
gradients.
It is also possible to disable static optimzations while in test mode by
setting the decorator argument `force_test_define_by_run=True`.
Note: If either 'chainer.config.enable_backprop' or 'chainer.config.train'
is set to 'False', then cached static schedules will be reused when
possible to reduce memory usage.
Double-backprop:
Double-backpropagation is not enabled by default. It can be enabled by
supplying the keyword argument ``enable_double_backprop=True``
to this decorator. Note: this feature has not been tested yet.
Restrictions on input arguments and return values of a static chain:
Recall that unlike a function, there is no restrictions on the
arguments to a chain. However, there currently are some restrictions
when a static chain is used. Specifically, the arguments to a static
chain must consist of a variable, list or tuple. In the case of a list
or tuple, the elements are required to be an instance of variable,
list, or tuple. There can be an arbitrary number of nested lists/
tuples. No other object types are allowed.
In addition, keyword arguments are not allowed.
The return value of a static chain must be a
variable, list, or tuple in which each element of the list or
tuple is also a variable, list, or tuple.
This decorator can be supplied with the following optional keyword
arguments. This is an experimental feature, and the API and arguments
might change
Args:
force_test_define_by_run (bool): If `True`, disable static graph
optimizations during test mode (that is, when
`chainer.config.train` is False). This may be needed in order
for some existing RNN links such as LSTM to work correctly,
since some existing links do not correspond to a static graph
in some cases.
The default is `False`.
minimize_cache_size (bool): If `True`, minimize the number of cached
static schedules in order to reduce memory usage. For example,
if the mini-batch size changes or the training mode changes,
the schedules will need to be recomputed, but memory is also
saved by not retaining all cached schedules.
The default value is `True`.
verbosity_level (int): Depending on the value, print additional
information:
0: Warnings only. (the default value)
1: Show only information that is collected during the first
iteration and when a new static schedule is created.
2: Detailed debugging information, possibly showing new
information every iteration.
enable_double_backprop (bool): If `True`, enable double-backprop.
The default value is `False` (not enabled).
Returns:
Wrapped ``__call__()`` method with static chain support.
"""
# todo: consider to allow nested use of this decorator.
force_test_define_by_run = False
# todo: enable eventually
minimize_cache_size = False
verbosity_level = 0
enable_double_backprop = False
zero_args = False
if len(args) == 1 and not kwargs and callable(args[0]):
callable_arg = args[0]
zero_args = True
elif kwargs:
if 'force_test_define_by_run' in kwargs:
force_test_define_by_run = kwargs['force_test_define_by_run']
if 'minimize_cache_size' in kwargs:
minimize_cache_size = kwargs['minimize_cache_size']
if 'verbosity_level' in kwargs:
verbosity_level = kwargs['verbosity_level']
if 'enable_double_backprop' in kwargs:
enable_double_backprop = kwargs['enable_double_backprop']
def wrap(func):
def wrapped_func(*inner_args, **inner_kwargs):
# The static subgraph optimization feature can be turned off using
# a configuration, in which case this decorator merely calls the
# wrapped function without introducing any side effects.
if not chainer.config.use_static_graph:
return func(*inner_args, **inner_kwargs)
if verbosity_level >= 2:
print('Calling static chain...')
chain = inner_args[0]
# The arguments to `__call__()` of the static chain.
# These could consist of any combination of nested lists and/or
# tuples of variables or arrays.
chain_args = inner_args[1:]
if chainer.config.train is False and force_test_define_by_run:
return func(*inner_args, **inner_kwargs)
chain_args_flat, in_unflatten_inds, __ = _flatten_args(chain_args)
# Since it is allowed for in_vars to be either variables or arrays,
# we force to variables.
flat_vars = []
for x in chain_args_flat:
# This assumes x is either a variable or ndarray.
# todo: check this and handle case when it is not.
if not isinstance(x, chainer.Variable):
flat_vars.append(chainer.Variable(x))
else:
flat_vars.append(x)
flat_vars = tuple(flat_vars)
if not hasattr(chain, 'schedule_manager'):
chain.schedule_manager = ScheduleManager(
minimize_cache_size=minimize_cache_size,
verbosity_level=verbosity_level)
schedule_manager = chain.schedule_manager
# To prevent "line too long" error
edb = enable_double_backprop
chain.static_schedule = \
schedule_manager.get_schedule(flat_vars,
enable_double_backprop=edb)
if verbosity_level >= 2:
print('Current schedule manager info: ', schedule_manager)
if not chain.static_schedule.is_empty():
# Call the static schedule code.
if verbosity_level >= 2:
print('This is the 2nd or greater iteration. Calling '
'the existing static schedule...')
chain.static_schedule.debug_print_ref_counts()
out_vars_flat = chain.static_schedule.apply(flat_vars)
out_vars = _unflatten_args(out_vars_flat,
chain._out_vars_unflatten_inds)
else:
# This is the first iteration. Calling the define-by-run code.
assert isinstance(chain, chainer.Chain)
if verbosity_level >= 2:
print('This is the first iteration. Calling the '
'define-by-run code.: ', func)
# First check that this chain is not called from inside another
# static chain because it is not allowed.
if chainer.config.schedule_func is not None:
raise RuntimeError('Not allowed to nest static chains: ',
chain)
new_args = []
new_args.append(chain)
new_flat_vars = []
for var in flat_vars:
# Replace each input variable with a new variable having
# the same data. This is needed so that the chain-local
# computation graph will be rooted at the input variables.
new_flat_vars.append(chainer.Variable(var.data))
unflat_in_args = _unflatten_args_as_list(new_flat_vars,
in_unflatten_inds)
for item in unflat_in_args:
new_args.append(item)
inner_args = tuple(new_args)
with chainer.using_config('schedule_func',
chain.static_schedule):
# Execute the chain's call() method. As the define-by-run
# code executes, the static schedule is constructed.
out_vars = func(*inner_args, **inner_kwargs)
out_vars_flat_dbr, chain._out_vars_unflatten_inds, __ = \
_flatten_args(out_vars)
sched_out_vars = list(out_vars_flat_dbr)
chain.static_schedule.set_out_variables(sched_out_vars)
# Mark the static schedule as complete.
chain.static_schedule.build_schedule(chain, new_flat_vars)
# Now that the static schedule is available, call it using the
# flattened input variables. This will cause the
# static schedule function node to be included in the
# computational graph.
out_vars_flat = chain.static_schedule.apply(flat_vars)
out_vars = _unflatten_args(out_vars_flat,
chain._out_vars_unflatten_inds)
if verbosity_level >= 2:
print('Returing from 1st call of the static chain.')
return out_vars
return wrapped_func
if zero_args:
return wrap(callable_arg)
else:
return wrap
def _flatten_args(xs):
"""Flatten the input into a tuple of variables.
In the typical case, `xs` is a tuple or list of objects where each
object is either a variable, list, or tuple. In the case where it is
a list of tuple, the objects in the list or tuple could also be either
a variable, list or tuple. Although the non-list and non-tuple items
are typically an instance of variable, any object other than list or
tuple is allowed.
This function simply flattens the hierarchical lists/tuples so that all
objects that are deeply contained in `xs` that are non-list and non-tuple
will be returned in a single tuple.
Args:
xs:
Returns:
The flattened tuple, allong with the indecies and count so that the
items can be unflattened later (i.e., by calling `_unflatten_args()`.
fixme: does not work if xs is a variable only.
"""
inds = []
ys = []
i = 0
if not isinstance(xs, (list, tuple)):
inds.append(('s', ))
return (xs,), inds, 0
for x in xs:
if isinstance(x, (list, tuple)):
x, sub_inds, total = _flatten_args(x, )
inds.append(('i', i, i+total, sub_inds))
i += total
else:
x = [x]
inds.append(('f', i))
i += 1
ys.extend([y for y in x])
return tuple(ys), inds, i
# todo: this only outputs tuples of tuples. Any list in the original input
# will be converted to a tuple, changing the types of the input arguments
# to the static chain.
def _unflatten_args(xs, inds):
ys = []
for ind in inds:
code = ind[0]
if code == 's':
return xs[0]
elif code == 'i':
i_start, i_end, sub_inds = ind[1:]
y = _unflatten_args(xs[i_start:i_end], sub_inds)
else:
i = ind[1]
y = xs[i]
ys.append(y)
return tuple(ys)
def _unflatten_args_as_list(xs, inds):
ys = []
for ind in inds:
code = ind[0]
if code == 's':
return xs[0]
elif code == 'i':
i_start, i_end, sub_inds = ind[1:]
y = _unflatten_args(xs[i_start:i_end], sub_inds)
else:
i = ind[1]
y = xs[i]
ys.append(y)
return ys
| 45.617947
| 79
| 0.613093
|
b9bb710b6094c5454943d86e9a2ee38b24dfed6d
| 25,167
|
py
|
Python
|
idaes/gas_solid_contactors/properties/oxygen_iron_OC_oxidation/solid_phase_thermo.py
|
eslickj/idaes-pse
|
328ed07ffb0b4d98c03e972675ea32c41dd2531a
|
[
"RSA-MD"
] | 112
|
2019-02-11T23:16:36.000Z
|
2022-03-23T20:59:57.000Z
|
idaes/gas_solid_contactors/properties/oxygen_iron_OC_oxidation/solid_phase_thermo.py
|
eslickj/idaes-pse
|
328ed07ffb0b4d98c03e972675ea32c41dd2531a
|
[
"RSA-MD"
] | 621
|
2019-03-01T14:44:12.000Z
|
2022-03-31T19:49:25.000Z
|
idaes/gas_solid_contactors/properties/oxygen_iron_OC_oxidation/solid_phase_thermo.py
|
eslickj/idaes-pse
|
328ed07ffb0b4d98c03e972675ea32c41dd2531a
|
[
"RSA-MD"
] | 154
|
2019-02-01T23:46:33.000Z
|
2022-03-23T15:07:10.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
This package provides the necessary constraints for solid phase properties of
an iron-based oxygen carrier
Components - Fe2O3, Fe3O4, Al2O3
Equations written in this model were primarily derived from:
National Institute of Standards and Technology, NIST Chemistry WebBook,
https://webbook.nist.gov/chemistry/ (accessed March 10, 2018).
"""
# Import Pyomo libraries
from pyomo.environ import (Constraint,
Param,
Reals,
value,
Var,
units as pyunits)
from pyomo.util.calc_var_value import calculate_variable_from_constraint
# Import IDAES cores
from idaes.core import (declare_process_block_class,
MaterialFlowBasis,
PhysicalParameterBlock,
StateBlockData,
StateBlock,
MaterialBalanceType,
EnergyBalanceType,
Component,
SolidPhase)
from idaes.core.util.initialization import (fix_state_vars,
revert_state_vars,
solve_indexed_blocks)
from idaes.core.util.misc import add_object_reference
from idaes.core.util import get_solver
from idaes.core.util.model_statistics import (
degrees_of_freedom,
number_unfixed_variables_in_activated_equalities)
import idaes.logger as idaeslog
# Some more information about this module
__author__ = "Chinedu Okoli"
# Set up logger
_log = idaeslog.getLogger(__name__)
@declare_process_block_class("SolidPhaseParameterBlock")
class PhysicalParameterData(PhysicalParameterBlock):
"""
Property Parameter Block Class
Contains parameters and indexing sets associated with properties for
methane CLC.
"""
def build(self):
'''
Callable method for Block construction.
'''
super(PhysicalParameterData, self).build()
self._state_block_class = SolidPhaseStateBlock
# Create Phase object
self.Sol = SolidPhase()
# Create Component objects
self.Fe2O3 = Component()
self.Fe3O4 = Component()
self.Al2O3 = Component()
# -------------------------------------------------------------------------
""" Pure solid component properties"""
# Mol. weights of solid components - units = kg/mol. ref: NIST webbook
mw_comp_dict = {'Fe2O3': 0.15969, 'Fe3O4': 0.231533, 'Al2O3': 0.10196}
self.mw_comp = Param(
self.component_list,
mutable=False,
initialize=mw_comp_dict,
doc="Molecular weights of solid components [kg/mol]",
units=pyunits.kg/pyunits.m**3)
# Skeletal density of solid components - units = kg/m3. ref: NIST
dens_mass_comp_skeletal_dict = {
'Fe2O3': 5250, 'Fe3O4': 5000, 'Al2O3': 3987}
self.dens_mass_comp_skeletal = Param(
self.component_list,
mutable=False,
initialize=dens_mass_comp_skeletal_dict,
doc='Skeletal density of solid components [kg/m3]',
units=pyunits.kg/pyunits.m**3)
# Ideal gas spec. heat capacity parameters(Shomate) of
# components - ref: NIST webbook. Shomate equations from NIST.
# Parameters A-E are used for cp calcs while A-H are used for enthalpy
# calc.
# 1e3*cp_comp = A + B*T + C*T^2 + D*T^3 + E/(T^2)
# where T = Temperature (K)/1000, and cp_comp = (kJ/mol.K)
# H_comp = H - H(298.15) = A*T + B*T^2/2 + C*T^3/3 +
# D*T^4/4 - E/T + F - H where T = Temp (K)/1000 and H_comp = (kJ/mol)
cp_param_dict = {
('Al2O3', 1): 102.4290,
('Al2O3', 2): 38.74980,
('Al2O3', 3): -15.91090,
('Al2O3', 4): 2.628181,
('Al2O3', 5): -3.007551,
('Al2O3', 6): -1717.930,
('Al2O3', 7): 146.9970,
('Al2O3', 8): -1675.690,
('Fe3O4', 1): 200.8320000,
('Fe3O4', 2): 1.586435e-7,
('Fe3O4', 3): -6.661682e-8,
('Fe3O4', 4): 9.452452e-9,
('Fe3O4', 5): 3.18602e-8,
('Fe3O4', 6): -1174.1350000,
('Fe3O4', 7): 388.0790000,
('Fe3O4', 8): -1120.8940000,
('Fe2O3', 1): 110.9362000,
('Fe2O3', 2): 32.0471400,
('Fe2O3', 3): -9.1923330,
('Fe2O3', 4): 0.9015060,
('Fe2O3', 5): 5.4336770,
('Fe2O3', 6): -843.1471000,
('Fe2O3', 7): 228.3548000,
('Fe2O3', 8): -825.5032000}
self.cp_param = Param(self.component_list,
range(1, 10),
mutable=False,
initialize=cp_param_dict,
doc="Shomate equation heat capacity parameters")
# Std. heat of formation of comp. - units = kJ/(mol comp) - ref: NIST
enth_mol_form_comp_dict = {'Fe2O3': -825.5032, 'Fe3O4': -1120.894,
'Al2O3': -1675.690}
self.enth_mol_form_comp = Param(
self.component_list,
mutable=False,
initialize=enth_mol_form_comp_dict,
doc="Component molar heats of formation [kJ/mol]",
units=pyunits.kJ/pyunits.mol)
# -------------------------------------------------------------------------
""" Mixed solid properties"""
# These are setup as fixed vars to allow for parameter estimation
# Particle size
self.particle_dia = Var(domain=Reals,
initialize=1.5e-3,
doc='Diameter of solid particles [m]',
units=pyunits.m)
self.particle_dia.fix()
# TODO -provide reference
# Minimum fluidization velocity - EPAT value used for Davidson model
self.velocity_mf = Var(domain=Reals,
initialize=0.039624,
doc='Velocity at minimum fluidization [m/s]',
units=pyunits.m/pyunits.s)
self.velocity_mf.fix()
# Minimum fluidization voidage - educated guess as rough
# estimate from ergun equation results (0.4) are suspicious
self.voidage_mf = Var(domain=Reals,
initialize=0.45,
doc='Voidage at minimum fluidization [-]',
units=pyunits.m**3/pyunits.m**3)
self.voidage_mf.fix()
# Particle thermal conductivity
self.therm_cond_sol = Var(
domain=Reals,
initialize=12.3e-3,
doc='Thermal conductivity of solid particles [kJ/m.K.s]',
units=pyunits.kJ/pyunits.m/pyunits.K/pyunits.s)
self.therm_cond_sol.fix()
@classmethod
def define_metadata(cls, obj):
obj.add_properties({
'flow_mass': {'method': None, 'units': 'kg/s'},
'particle_porosity': {'method': None, 'units': None},
'temperature': {'method': None, 'units': 'K'},
'mass_frac_comp': {'method': None, 'units': None},
'dens_mass_skeletal': {'method': '_dens_mass_skeletal',
'units': 'kg/m3'},
'dens_mass_particle': {'method': '_dens_mass_particle',
'units': 'kg/m3'},
'cp_mol_comp': {'method': '_cp_mol_comp',
'units': 'kJ/mol.K'},
'cp_mass': {'method': '_cp_mass', 'units': 'kJ/kg.K'},
'enth_mass': {'method': '_enth_mass', 'units': 'kJ/kg'},
'enth_mol_comp': {'method': '_enth_mol_comp',
'units': 'kJ/mol'}})
obj.add_default_units({'time': pyunits.s,
'length': pyunits.m,
'mass': pyunits.kg,
'amount': pyunits.mol,
'temperature': pyunits.K})
class _SolidPhaseStateBlock(StateBlock):
"""
This Class contains methods which should be applied to State Blocks as a
whole, rather than individual elements of indexed State Blocks.
"""
def initialize(blk, state_args=None, hold_state=False,
state_vars_fixed=False, outlvl=idaeslog.NOTSET,
solver="ipopt", optarg={"tol": 1e-8}):
"""
Initialization routine for property package.
Keyword Arguments:
state_args : Dictionary with initial guesses for the state vars
chosen. Note that if this method is triggered
through the control volume, and if initial guesses
were not provided at the unit model level, the
control volume passes the inlet values as initial
guess.
Keys for the state_args dictionary are:
flow_mass, temperature, and mass_frac_comp
outlvl : sets output level of initialization routine
optarg : solver options dictionary object (default=None)
solver : str indicating whcih solver to use during
initialization (default = "ipopt")
hold_state : flag indicating whether the initialization routine
should unfix any state variables fixed during
initialization (default=False).
- True - states varaibles are not unfixed, and
a dict of returned containing flags for
which states were fixed during
initialization.
- False - state variables are unfixed after
initialization by calling the
relase_state method
Returns:
If hold_states is True, returns a dict containing flags for
which states were fixed during initialization.
"""
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="properties")
solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag="properties")
init_log.info_high('Starting initialization')
# Deactivate the constraints specific for outlet block i.e.
# when defined state is False
for k in blk.keys():
if blk[k].config.defined_state is False:
blk[k].sum_component_eqn.deactivate()
# Fix state variables if not already fixed
if state_vars_fixed is False:
flags = fix_state_vars(blk, state_args)
else:
# Check when the state vars are fixed already result in dof 0
for k in blk.keys():
if degrees_of_freedom(blk[k]) != 0:
raise Exception("State vars fixed but degrees of freedom "
"for state block is not zero during "
"initialization.")
# ---------------------------------------------------------------------
# Initialise values
for k in blk.keys():
if hasattr(blk[k], "density_skeletal_constraint"):
calculate_variable_from_constraint(
blk[k].dens_mass_skeletal,
blk[k].density_skeletal_constraint)
if hasattr(blk[k], "mixture_heat_capacity_eqn"):
calculate_variable_from_constraint(
blk[k].cp_mass,
blk[k].mixture_heat_capacity_eqn)
if hasattr(blk[k], "mixture_enthalpy_eqn"):
calculate_variable_from_constraint(
blk[k].enth_mass,
blk[k].mixture_enthalpy_eqn)
for j in blk[k]._params.component_list:
if hasattr(blk[k], "cp_shomate_eqn"):
calculate_variable_from_constraint(blk[k].cp_mol_comp[j],
blk[k].cp_shomate_eqn[j]
)
if hasattr(blk[k], "enthalpy_shomate_eqn"):
calculate_variable_from_constraint(
blk[k].enth_mol_comp[j],
blk[k].enthalpy_shomate_eqn[j])
# Solve property block if non-empty
free_vars = 0
for k in blk.keys():
free_vars += number_unfixed_variables_in_activated_equalities(
blk[k])
if free_vars > 0:
# Create solver
opt = get_solver(solver, optarg)
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
res = solve_indexed_blocks(opt, [blk], tee=slc.tee)
else:
res = ""
init_log.info_high("Initialization complete {}.".format(
idaeslog.condition(res))
)
# ---------------------------------------------------------------------
if state_vars_fixed is False:
if hold_state is True:
return flags
else:
blk.release_state(flags)
def release_state(blk, flags, outlvl=0):
"""
Method to relase state variables fixed during initialization.
Keyword Arguments:
flags : dict containing information of which state variables
were fixed during initialization, and should now be
unfixed. This dict is returned by initialize if
hold_state=True.
outlvl : sets output level of of logging
"""
if flags is None:
return
# Unfix state variables
revert_state_vars(blk, flags)
# Activate state variable related constraints
for k in blk.keys():
if blk[k].config.defined_state is False:
blk[k].sum_component_eqn.activate()
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="properties")
init_log.info_high('States released.')
@declare_process_block_class("SolidPhaseStateBlock",
block_class=_SolidPhaseStateBlock)
class SolidPhaseStateBlockData(StateBlockData):
"""
Property package for gas phase properties of methane combustion in CLC FR
"""
def build(self):
"""
Callable method for Block construction
"""
super(SolidPhaseStateBlockData, self).build()
# Object reference for molecular weight if needed by CV1D
# Molecular weights
add_object_reference(self, "mw_comp",
self.config.parameters.mw_comp)
self._make_state_vars()
def _make_state_vars(self):
"""List the necessary state variable objects."""
self.flow_mass = Var(initialize=1.0,
domain=Reals,
doc='Component mass flowrate [kg/s]',
units=pyunits.kg/pyunits.s)
self.particle_porosity = Var(domain=Reals,
initialize=0.27,
doc='Porosity of oxygen carrier [-]',
units=pyunits.m**3/pyunits.m**3)
self.mass_frac_comp = Var(
self._params.component_list,
initialize=1 / len(self._params.component_list),
doc='State component mass fractions [-]',
units=pyunits.kg/pyunits.kg)
self.temperature = Var(initialize=298.15,
domain=Reals,
doc='State temperature [K]',
units=pyunits.K)
# Create standard constraints
# Sum mass fractions if not inlet block
if self.config.defined_state is False:
def sum_component_eqn(b):
return 1e2 == 1e2 * sum(b.mass_frac_comp[j]
for j in b._params.component_list)
self.sum_component_eqn = Constraint(rule=sum_component_eqn)
def _dens_mass_skeletal(self):
# Skeletal density of OC solid particles
self.dens_mass_skeletal = Var(domain=Reals,
initialize=3251.75,
doc='Skeletal density of OC [kg/m3]',
units=pyunits.kg/pyunits.m**3)
def density_skeletal_constraint(b):
return (b.dens_mass_skeletal * sum(
b.mass_frac_comp[j] /
b._params.dens_mass_comp_skeletal[j]
for j in b._params.component_list) ==
1)
try:
# Try to build constraint
self.density_skeletal_constraint = Constraint(
rule=density_skeletal_constraint)
except AttributeError:
# If constraint fails, clean up so that DAE can try again later
self.del_component(self.dens_mass_skeletal)
self.del_component(self.density_skeletal_constraint)
raise
def _dens_mass_particle(self):
# Particle density of OC (includes the OC pores)
self.dens_mass_particle = Var(
domain=Reals,
initialize=3251.75,
doc='Particle density of oxygen carrier [kg/m3]',
units=pyunits.kg/pyunits.m**3)
def density_particle_constraint(b):
return (b.dens_mass_particle == (1 - b.particle_porosity) *
b.dens_mass_skeletal)
try:
# Try to build constraint
self.density_particle_constraint = Constraint(
rule=density_particle_constraint)
except AttributeError:
# If constraint fails, clean up so that DAE can try again later
self.del_component(self.dens_mass_particle)
self.del_component(self.density_particle_constraint)
raise
def _cp_mol_comp(self):
# Pure component solid heat capacities
self.cp_mol_comp = Var(
self._params.component_list,
domain=Reals,
initialize=1.0,
doc="Pure component solid heat capacities [kJ/mol.K]",
units=pyunits.kJ/pyunits.mol/pyunits.K)
def pure_component_cp_mol(b, j):
return b.cp_mol_comp[j] == 1e-3*(
b._params.cp_param[j, 1] +
b._params.cp_param[j, 2]*(b.temperature*1e-3) +
b._params.cp_param[j, 3]*(b.temperature*1e-3)**2 +
b._params.cp_param[j, 4]*(b.temperature*1e-3)**3 +
b._params.cp_param[j, 5]/((b.temperature*1e-3)**2))
try:
# Try to build constraint
self.cp_shomate_eqn = Constraint(self._params.component_list,
rule=pure_component_cp_mol)
except AttributeError:
# If constraint fails, clean up so that DAE can try again later
self.del_component(self.cp_mol_comp)
self.del_component(self.cp_shomate_eqn)
raise
def _cp_mass(self):
# Mixture heat capacities
self.cp_mass = Var(domain=Reals,
initialize=1.0,
doc="Mixture heat capacity, mass-basis [kJ/kg.K]",
units=pyunits.kJ/pyunits.kg/pyunits.K)
def cp_mass(b):
return b.cp_mass == sum(b.cp_mol_comp[j]*b.mass_frac_comp[j]
* (1/b._params.mw_comp[j])
for j in b._params.component_list)
try:
# Try to build constraint
self.mixture_heat_capacity_eqn = Constraint(rule=cp_mass)
except AttributeError:
# If constraint fails, clean up so that DAE can try again later
self.del_component(self.cp_mass)
self.del_component(self.mixture_heat_capacity_eqn)
raise
def _enth_mol_comp(self):
# Pure component vapour enthalpies
self.enth_mol_comp = Var(
self._params.component_list,
domain=Reals,
initialize=1.0,
doc="Pure component enthalpies [kJ/mol]",
units=pyunits.kJ/pyunits.mol)
def pure_comp_enthalpy(b, j):
return b.enth_mol_comp[j] == (
b._params.cp_param[j, 1]*(b.temperature*1e-3) +
b._params.cp_param[j, 2]*((b.temperature*1e-3)**2)/2 +
b._params.cp_param[j, 3]*((b.temperature*1e-3)**3)/3 +
b._params.cp_param[j, 4]*((b.temperature*1e-3)**4)/4 -
b._params.cp_param[j, 5]/(b.temperature*1e-3) +
b._params.cp_param[j, 6] -
b._params.cp_param[j, 8])
try:
# Try to build constraint
self.enthalpy_shomate_eqn = Constraint(self._params.component_list,
rule=pure_comp_enthalpy)
except AttributeError:
# If constraint fails, clean up so that DAE can try again later
self.del_component(self.enth_mol_comp)
self.del_component(self.enthalpy_shomate_eqn)
raise
def _enth_mass(self):
# Mixture mass enthalpy
self.enth_mass = Var(domain=Reals,
initialize=0.0,
doc='Mixture specific enthalpy [kJ/kg]',
units=pyunits.kJ/pyunits.kg)
try:
# Try to build constraint
self.mixture_enthalpy_eqn = Constraint(expr=(
self.enth_mass == sum(
self.mass_frac_comp[j] *
self.enth_mol_comp[j]
* (1/self._params.mw_comp[j])
for j in self._params.component_list
)))
except AttributeError:
# If constraint fails, clean up so that DAE can try again later
self.del_component(self.enth_mass)
self.del_component(self.mixture_enthalpy_eqn)
raise
def get_material_flow_terms(b, p, j):
return b.flow_mass*b.mass_frac_comp[j]
def get_enthalpy_flow_terms(b, p):
return b.flow_mass*b.enth_mass
def get_material_density_terms(b, p, j):
return b.dens_mass_particle * b.mass_frac_comp[j]
def get_energy_density_terms(b, p):
return b.dens_mass_particle * b.enth_mass
def define_state_vars(b):
return {"flow_mass": b.flow_mass,
'particle_porosity': b.particle_porosity,
"temperature": b.temperature,
"mass_frac_comp": b.mass_frac_comp}
def get_material_flow_basis(b):
return MaterialFlowBasis.mass
def model_check(blk):
"""
Model checks for property block
"""
# Check temperature bounds
if value(blk.temperature) < blk.temperature.lb:
_log.error('{} Temperature set below lower bound.'
.format(blk.name))
if value(blk.temperature) > blk.temperature.ub:
_log.error('{} Temperature set above upper bound.'
.format(blk.name))
def default_material_balance_type(blk):
return MaterialBalanceType.componentTotal
def default_energy_balance_type(blk):
return EnergyBalanceType.enthalpyTotal
| 43.391379
| 81
| 0.524655
|
82a5f1df01716b0c83a70610a9644f9257031d32
| 26,748
|
py
|
Python
|
pysurvival/models/multi_task.py
|
msloma144/pysurvival
|
4f8107f368b3865a3ed763f952dc6ee504bda599
|
[
"Apache-2.0"
] | null | null | null |
pysurvival/models/multi_task.py
|
msloma144/pysurvival
|
4f8107f368b3865a3ed763f952dc6ee504bda599
|
[
"Apache-2.0"
] | null | null | null |
pysurvival/models/multi_task.py
|
msloma144/pysurvival
|
4f8107f368b3865a3ed763f952dc6ee504bda599
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import torch
import numpy as np
import copy
import multiprocessing
from pysurvival import HAS_GPU
from pysurvival import utils
from pysurvival.utils import neural_networks as nn
from pysurvival.utils import optimization as opt
from pysurvival.models import BaseModel
import sys
# %matplotlib inline
class BaseMultiTaskModel(BaseModel):
""" Base class for all Multi-Task estimators:
* Multi-Task Logistic Regression model (MTLR)
* Neural Multi-Task Logistic Regression model (N-MTLR)
BaseMultiTaskModel shouldn't be used as is.
The underlying model is written in PyTorch.
The original Multi-Task model, a.k.a the Multi-Task Logistic Regression
model (MTLR), was first introduced by Chun-Nam Yu et al. in
*Learning Patient-Specific Cancer Survival Distributions as a Sequence of
Dependent Regressors*
The Neural Multi-Task Logistic Regression model (N-MTLR) was developed
by S. Fotso in the paper *Deep Neural Networks for Survival
Analysis Based on a Multi-Task Framework*, allowing the use of
Neural Networks within the original design.
Parameters
----------
* `structure`: **list of dictionaries** --
Provides the structure of the MLP built within the N-MTLR.
ex: `structure = [ {'activation': 'ReLU', 'num_units': 128}, ]`.
Each dictionary corresponds to a fully connected hidden layer:
* `num_units` is the number of hidden units in this layer
* `activation` is the activation function that will be used.
The list of all available activation functions can be found :
* Atan
* BentIdentity
* BipolarSigmoid
* CosReLU
* ELU
* Gaussian
* Hardtanh
* Identity
* InverseSqrt
* LeakyReLU
* LeCunTanh
* LogLog
* LogSigmoid
* ReLU
* SELU
* Sigmoid
* Sinc
* SinReLU
* Softmax
* Softplus
* Softsign
* Swish
* Tanh
In case there are more than one dictionary,
each hidden layer will be applied in the resulting MLP,
using the order it is provided in the structure:
ex: structure = [ {'activation': 'relu', 'num_units': 128},
{'activation': 'tanh', 'num_units': 128}, ]
* `bins`: **int** *(default=100)* --
Number of subdivisions of the time axis
* `auto_scaler`: **boolean** *(default=True)* --
Determines whether a sklearn scaler should be automatically applied
"""
def __init__(self, structure, bins=100, auto_scaler=True):
# Saving the attributes
self.loss_values = []
self.bins = bins
self.structure = structure
# Initializing the elements from BaseModel
super(BaseMultiTaskModel, self).__init__(auto_scaler)
def get_times(self, T, is_min_time_zero=True, extra_pct_time=0.1):
""" Building the time axis (self.times) as well as the time intervals
( all the [ t(k-1), t(k) ) in the time axis.
"""
# Setting the min_time and max_time
max_time = max(T)
if is_min_time_zero:
min_time = 0.
else:
min_time = min(T)
# Setting optional extra percentage time
if 0. <= extra_pct_time <= 1.:
p = extra_pct_time
else:
raise Exception("extra_pct_time has to be between [0, 1].")
# Building time points and time buckets
self.times = np.linspace(min_time, max_time * (1. + p), self.bins)
self.get_time_buckets()
self.num_times = len(self.time_buckets)
def compute_XY(self, X, T, E, is_min_time_zero, extra_pct_time):
""" Given the survival_times, events and time_points vectors,
it returns a ndarray of the encodings for all units
such that:
Y = [[0, 0, 1, 0, 0], # unit experienced an event at t = 3
[0, 1, 0, 0, 0], # unit experienced an event at t = 2
[0, 1, 1, 1, 1],] # unit was censored at t = 2
"""
# building times axis
self.get_times(T, is_min_time_zero, extra_pct_time)
n_units = T.shape[0]
# Initializing the output variable
Y_cens, Y_uncens = [], []
X_cens, X_uncens = [], []
if isinstance(X, list):
for input_ in X:
X_cens.append([])
X_uncens.append([])
# Building the output variable
for i, (t, e) in enumerate(zip(T, E)):
y = np.zeros(self.num_times + 1)
min_abs_value = [abs(a_j_1 - t) for (a_j_1, a_j) in self.time_buckets]
index = np.argmin(min_abs_value)
if e == 1:
y[index] = 1.
if isinstance(X, list):
for j, input_ in enumerate(X):
X_uncens[j].append(input_[i, :].tolist())
else:
X_uncens.append(X[i, :].tolist())
Y_uncens.append(y.tolist())
else:
y[(index):] = 1.
if isinstance(X, list):
for j, input_ in enumerate(X):
X_cens[j].append(input_[i, :].tolist())
else:
X_cens.append(X[i, :].tolist())
Y_cens.append(y.tolist())
# Transform into torch.Tensor
if isinstance(X, list):
for j, input_ in enumerate(X_cens):
X_cens[j] = torch.FloatTensor(input_)
if torch.cuda.is_available():
X_cens[j] = X_cens[j].cuda()
for j, input_ in enumerate(X_uncens):
X_uncens[j] = torch.FloatTensor(input_)
if torch.cuda.is_available():
X_uncens[j] = X_uncens[j].cuda()
else:
X_cens = torch.FloatTensor(X_cens)
X_uncens = torch.FloatTensor(X_uncens)
if torch.cuda.is_available():
X_cens = X_cens.cuda()
X_uncens = X_uncens.cuda()
Y_cens = torch.FloatTensor(Y_cens)
Y_uncens = torch.FloatTensor(Y_uncens)
if torch.cuda.is_available():
Y_cens = Y_cens.cuda()
Y_uncens = Y_uncens.cuda()
return X_cens, X_uncens, Y_cens, Y_uncens
def loss_function(self, model, X_cens, X_uncens, Y_cens, Y_uncens,
Triangle, l2_reg, l2_smooth, min_clamp_value=1e-8,
max_clamp_value=torch.finfo(torch.float32).max-1):
""" Computes the loss function of the any MTLR model.
All the operations have been vectorized to ensure optimal speed
"""
score_cens = model(X_cens)
score_uncens = model(X_uncens)
# Likelihood Calculations -- Uncensored
temp = torch.clamp(torch.mm(score_uncens, Triangle), min=1e-8, max=88.5)
phi_uncens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_phi_uncens = torch.sum(phi_uncens * Y_uncens, dim=1)
# Likelihood Calculations -- Censored
temp = torch.clamp(torch.mm(score_cens, Triangle), min=1e-8, max=88.5)
phi_cens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_phi_cens = torch.sum(phi_cens * Y_cens, dim=1)
# Likelihood Calculations -- Normalization
temp = torch.clamp(torch.mm(score_uncens, Triangle), min=1e-8, max=88.5)
z_uncens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_z_uncens = torch.sum(z_uncens, dim=1)
temp = torch.clamp(torch.mm(score_cens, Triangle), min=1e-8, max=88.5)
z_cens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_z_cens = torch.sum(z_cens, dim=1)
reduc_phi_uncens = torch.clamp(reduc_phi_uncens, min=min_clamp_value, max=max_clamp_value)
reduc_phi_cens = torch.clamp(reduc_phi_cens, min=min_clamp_value, max=max_clamp_value)
reduc_z_uncens = torch.clamp(reduc_z_uncens, min=min_clamp_value, max=max_clamp_value)
reduc_z_cens = torch.clamp(reduc_z_cens, min=min_clamp_value, max=max_clamp_value)
# MTLR cost function
loss = - (
torch.sum(torch.log(reduc_phi_uncens))
+ torch.sum(torch.log(reduc_phi_cens))
- torch.sum(torch.log(reduc_z_uncens))
- torch.sum(torch.log(reduc_z_cens))
)
# print(f"loss_loss: {loss}")
# Adding the regularized loss
nb_set_parameters = len(list(model.parameters()))
for i, w in enumerate(model.parameters()):
loss += l2_reg * torch.sum(w * w) / 2.
if i >= nb_set_parameters - 2:
loss += l2_smooth * norm_diff(w)
return loss
def fit(self, X, T, E, init_method='glorot_uniform', optimizer='adam',
lr=1e-4, num_epochs=1000, dropout=0.2, l2_reg=1e-2,
l2_smooth=1e-2, batch_normalization=False, bn_and_dropout=False,
verbose=True, extra_pct_time=0.1, is_min_time_zero=True, max_norm=1.0,
min_clamp_value=1e-8, max_clamp_value=torch.finfo(torch.float32).max -1):
""" Fit the estimator based on the given parameters.
Parameters:
-----------
* `X` : **array-like**, *shape=(n_samples, n_features)* --
The input samples.
* `T` : **array-like** --
The target values describing when the event of interest or censoring
occurred.
* `E` : **array-like** --
The values that indicate if the event of interest occurred i.e.:
E[i]=1 corresponds to an event, and E[i] = 0 means censoring,
for all i.
* `init_method` : **str** *(default = 'glorot_uniform')* --
Initialization method to use. Here are the possible options:
* `glorot_uniform`: Glorot/Xavier uniform initializer
* `he_uniform`: He uniform variance scaling initializer
* `uniform`: Initializing tensors with uniform (-1, 1) distribution
* `glorot_normal`: Glorot normal initializer,
* `he_normal`: He normal initializer.
* `normal`: Initializing tensors with standard normal distribution
* `ones`: Initializing tensors to 1
* `zeros`: Initializing tensors to 0
* `orthogonal`: Initializing tensors with a orthogonal matrix,
* `optimizer`: **str** *(default = 'adam')* --
iterative method for optimizing a differentiable objective function.
Here are the possible options:
- `adadelta`
- `adagrad`
- `adam`
- `adamax`
- `rmsprop`
- `sparseadam`
- `sgd`
* `lr`: **float** *(default=1e-4)* --
learning rate used in the optimization
* `num_epochs`: **int** *(default=1000)* --
The number of iterations in the optimization
* `dropout`: **float** *(default=0.5)* --
Randomly sets a fraction rate of input units to 0
at each update during training time, which helps prevent overfitting.
* `l2_reg`: **float** *(default=1e-4)* --
L2 regularization parameter for the model coefficients
* `l2_smooth`: **float** *(default=1e-4)* --
Second L2 regularizer that ensures the parameters vary smoothly
across consecutive time points.
* `batch_normalization`: **bool** *(default=True)* --
Applying Batch Normalization or not
* `bn_and_dropout`: **bool** *(default=False)* --
Applying Batch Normalization and Dropout at the same time
* `display_loss`: **bool** *(default=True)* --
Whether or not showing the loss function values at each update
* `verbose`: **bool** *(default=True)* --
Whether or not producing detailed logging about the modeling
* `extra_pct_time`: **float** *(default=0.1)* --
Providing an extra fraction of time in the time axis
* `is_min_time_zero`: **bool** *(default=True)* --
Whether the the time axis starts at 0
* `max_norm`: **float** *(default=1.0)* --
Max l2 norm for gradient clipping
**Returns:**
* self : object
Example:
--------
#### 1 - Importing packages
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from pysurvival.models.simulations import SimulationModel
from pysurvival.models.multi_task import LinearMultiTaskModel
from pysurvival.utils.metrics import concordance_index
#%matplotlib inline # To use with Jupyter notebooks
#### 2 - Generating the dataset from a Weibull parametric model
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'Weibull',
risk_type = 'linear',
censored_parameter = 10.0,
alpha = .01, beta = 3.0 )
# Generating N random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features = 3)
# Showing a few data-points
time_column = 'time'
event_column = 'event'
dataset.head(2)
#### 3 - Creating the modeling dataset
# Defining the features
features = sim.features
# Building training and testing sets #
index_train, index_test = train_test_split( range(N), test_size = 0.2)
data_train = dataset.loc[index_train].reset_index( drop = True )
data_test = dataset.loc[index_test].reset_index( drop = True )
# Creating the X, T and E input
X_train, X_test = data_train[features], data_test[features]
T_train, T_test = data_train['time'].values, data_test['time'].values
E_train, E_test = data_train['event'].values, data_test['event'].values
#### 4 - Initializing a MTLR model and fitting the data.
# Building a Linear model
mtlr = LinearMultiTaskModel(bins=50)
mtlr.fit(X_train, T_train, E_train, lr=5e-3, init_method='orthogonal')
# Building a Neural MTLR
# structure = [ {'activation': 'Swish', 'num_units': 150}, ]
# mtlr = NeuralMultiTaskModel(structure=structure, bins=150)
# mtlr.fit(X_train, T_train, E_train, lr=5e-3, init_method='adam')
#### 5 - Cross Validation / Model Performances
c_index = concordance_index(mtlr, X_test, T_test, E_test) #0.95
print('C-index: {:.2f}'.format(c_index))
"""
# Checking data format (i.e.: transforming into numpy array)
X, T, E = utils.check_data(X, T, E)
input_shape = []
# Extracting data parameters
if isinstance(X, list):
nb_inputs = len(X)
for data in X:
nb_units, num_vars = data.shape
input_shape.append(num_vars)
# Scaling data
if self.auto_scaler:
for index, data in enumerate(X):
X[index] = self.scaler.fit_transform(data)
else:
nb_inputs = 1
nb_units, self.num_vars = X.shape
input_shape.append(self.num_vars)
# Scaling data
if self.auto_scaler:
X = self.scaler.fit_transform(X)
# Building the time axis, time buckets and output Y
X_cens, X_uncens, Y_cens, Y_uncens \
= self.compute_XY(X, T, E, is_min_time_zero, extra_pct_time)
# Initializing the model
model = nn.NeuralNet(input_shape, self.num_times, self.structure,
init_method, dropout, batch_normalization,
bn_and_dropout)
# Creating the Triangular matrix
Triangle = np.tri(self.num_times, self.num_times + 1, dtype=np.float32)
Triangle = torch.FloatTensor(Triangle)
if torch.cuda.is_available():
model = model.cuda()
Triangle = Triangle.cuda()
# Performing order 1 optimization
model, loss_values = opt.optimize(self.loss_function, model, optimizer,
lr, num_epochs, verbose, X_cens=X_cens, X_uncens=X_uncens,
Y_cens=Y_cens, Y_uncens=Y_uncens, Triangle=Triangle,
l2_reg=l2_reg, l2_smooth=l2_smooth, max_norm=max_norm,
min_clamp_value=min_clamp_value, max_clamp_value=max_clamp_value)
# Saving attributes
self.model = model.eval()
self.loss_values = loss_values
return self
def predict(self, x, t=None):
""" Predicting the hazard, density and survival functions
Parameters:
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the data
if self.auto_scaler:
if x.ndim == 1:
x = self.scaler.transform(x.reshape(1, -1))
elif x.ndim == 2:
x = self.scaler.transform(x)
x = torch.FloatTensor(x)
if torch.cuda.is_available():
x = x.cuda()
else:
# Ensuring x has 2 dimensions
if isinstance(x, list):
for index in range(len(x)):
if x[index].ndim == 1:
x[index] = np.reshape(x[index], (1, -1))
# Transforming into pytorch objects
x[index] = torch.FloatTensor(x[index])
if torch.cuda.is_available():
x[index] = x[index].cuda()
else:
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Transforming into pytorch objects
x = torch.FloatTensor(x)
if torch.cuda.is_available():
x = x.cuda()
# Predicting using linear/nonlinear function
score_torch = self.model(x)
score = score_torch.data.cpu().numpy()
# Cretaing the time triangles
Triangle1 = np.tri(self.num_times, self.num_times + 1)
Triangle2 = np.tri(self.num_times + 1, self.num_times + 1)
# Calculating the score, density, hazard and Survival
phi = np.exp(np.dot(score, Triangle1))
div = np.repeat(np.sum(phi, 1).reshape(-1, 1), phi.shape[1], axis=1)
density = (phi / div)
Survival = np.dot(density, Triangle2)
hazard = density[:, :-1] / Survival[:, 1:]
# Returning the full functions of just one time point
if t is None:
return hazard, density, Survival
else:
min_abs_value = [abs(a_j_1 - t) for (a_j_1, a_j) in self.time_buckets]
index = np.argmin(min_abs_value)
return hazard[:, index], density[:, index], Survival[:, index]
def predict_risk(self, x, use_log=False):
""" Computing the risk score
Parameters:
-----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `use_log`: **bool** *(default=True)* --
Applies the log function to the risk values
"""
risk = super(BaseMultiTaskModel, self).predict_risk(x)
if use_log:
return np.log(risk)
else:
return risk
class LinearMultiTaskModel(BaseMultiTaskModel):
""" LinearMultiTaskModel is the original Multi-Task model,
a.k.a the Multi-Task Logistic Regression model (MTLR).
It was first introduced by Chun-Nam Yu et al. in
Learning Patient-Specific Cancer Survival Distributions
as a Sequence of Dependent Regressors
Reference:
----------
* http://www.cs.cornell.edu/~cnyu/papers/nips11_survival.pdf
Parameters:
----------
* bins: int
Number of subdivisions of the time axis
* auto_scaler: boolean (default=True)
Determines whether a sklearn scaler should be automatically
applied
"""
def __init__(self, bins=100, auto_scaler=True):
super(LinearMultiTaskModel, self).__init__(
structure=None, bins=bins, auto_scaler=auto_scaler)
def fit(self, X, T, E, init_method='glorot_uniform', optimizer='adam',
lr=1e-4, num_epochs=1000, l2_reg=1e-2, l2_smooth=1e-2,
verbose=True, extra_pct_time=0.1, is_min_time_zero=True, max_norm=1.0,
min_clamp_value=1e-8, max_clamp_value=torch.finfo(torch.float32).max-1):
super(LinearMultiTaskModel, self).fit(X=X, T=T, E=E,
init_method=init_method, optimizer=optimizer,
lr=lr, num_epochs=num_epochs, dropout=None, l2_reg=l2_reg,
l2_smooth=l2_smooth, batch_normalization=False,
bn_and_dropout=False, verbose=verbose,
extra_pct_time=extra_pct_time, is_min_time_zero=is_min_time_zero,
max_norm=max_norm,
min_clamp_value=min_clamp_value, max_clamp_value=max_clamp_value)
return self
class NeuralMultiTaskModel(BaseMultiTaskModel):
""" NeuralMultiTaskModel is the Neural Multi-Task Logistic Regression
model (N-MTLR) was developed by Fotso S. in
Deep Neural Networks for Survival Analysis Based on a
Multi-Task Framework,
allowing the use of Neural Networks within the original design.
Reference:
----------
* https://arxiv.org/pdf/1801.05512
Parameters:
----------
* `structure`: **list of dictionaries** --
Provides the structure of the MLP built within the N-MTLR.
ex: `structure = [ {'activation': 'ReLU', 'num_units': 128}, ]`.
Each dictionary corresponds to a fully connected hidden layer:
* `units` is the number of hidden units in this layer
* `activation` is the activation function that will be used.
The list of all available activation functions can be found :
* Atan
* BentIdentity
* BipolarSigmoid
* CosReLU
* ELU
* Gaussian
* Hardtanh
* Identity
* InverseSqrt
* LeakyReLU
* LeCunTanh
* LogLog
* LogSigmoid
* ReLU
* SELU
* Sigmoid
* Sinc
* SinReLU
* Softmax
* Softplus
* Softsign
* Swish
* Tanh
In case there are more than one dictionary,
each hidden layer will be applied in the resulting MLP,
using the order it is provided in the structure:
ex: structure = [ {'activation': 'relu', 'num_units': 128},
{'activation': 'tanh', 'num_units': 128}, ]
* `bins`: **int** *(default=100)* --
Number of subdivisions of the time axis
* `auto_scaler`: **boolean** *(default=True)* --
Determines whether a sklearn scaler should be automatically applied
"""
def __init__(self, structure, bins=100, auto_scaler=True):
# Checking the validity of structure
structure = nn.check_mlp_structure(structure)
# print(structure)
# Initializing the instance
super(NeuralMultiTaskModel, self).__init__(
structure=structure, bins=bins, auto_scaler=auto_scaler)
def __repr__(self):
""" Representing the class object """
if self.structure is None:
super(NeuralMultiTaskModel, self).__repr__()
return self.name
else:
S = len(self.structure)
self.name = self.__class__.__name__
empty = len(self.name)
self.name += '( '
for i, s in enumerate(self.structure):
if isinstance(s, list):
for s_ in s:
n = 'Layer({}): '.format(i + 1)
activation = nn.activation_function(s_['activation'],
return_text=True)
n += 'activation = {}, '.format(s_['activation'])
if 'num_units' in s_.keys():
n += 'units = {} '.format(s_['num_units'])
if i != S - 1:
self.name += n + '; \n'
self.name += empty * ' ' + ' '
else:
self.name += n
self.name = self.name + ')'
else:
n = 'Layer({}): '.format(i + 1)
activation = nn.activation_function(s['activation'],
return_text=True)
n += 'activation = {}, '.format(s['activation'])
if 'num_units' in s.keys():
n += 'units = {} '.format(s['num_units'])
if i != S - 1:
self.name += n + '; \n'
self.name += empty * ' ' + ' '
else:
self.name += n
self.name = self.name + ')'
return self.name
def norm_diff(W):
""" Special norm function for the last layer of the MTLR """
dims = len(W.shape)
if dims == 1:
diff = W[1:] - W[:-1]
elif dims == 2:
diff = W[1:, :] - W[:-1, :]
return torch.sum(diff * diff)
| 38.320917
| 111
| 0.5532
|
a3b241dc602e766afd44be5031adafb64eabf4a7
| 14,388
|
py
|
Python
|
train.py
|
miliadis/DeepVideoCS
|
760851192d6b5a7b21ea05b3c202db02f39276f5
|
[
"BSD-2-Clause"
] | 65
|
2017-12-16T09:53:17.000Z
|
2021-12-22T12:30:00.000Z
|
train.py
|
miliadis/DeepVideoCS
|
760851192d6b5a7b21ea05b3c202db02f39276f5
|
[
"BSD-2-Clause"
] | 4
|
2019-07-28T03:42:44.000Z
|
2021-03-03T02:11:37.000Z
|
train.py
|
miliadis/DeepVideoCS
|
760851192d6b5a7b21ea05b3c202db02f39276f5
|
[
"BSD-2-Clause"
] | 25
|
2018-01-19T13:32:06.000Z
|
2021-10-04T06:21:58.000Z
|
import argparse
import os
import time
import logging
from utils.log import setup_logging, ResultsLog, save_checkpoint, results_add
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim.lr_scheduler as sc
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import models
import datasets
import numpy as np
import utils.metrics as metrics
import random
from torch.autograd import Variable
import layers.loss_functions as loss
from bisect import bisect_right
parser = argparse.ArgumentParser(
description='PyTorch Video Compressive Sensing - Training')
parser.add_argument('data_train', help='path to training dataset')
parser.add_argument('data_val', help='path to validation dataset')
parser.add_argument('--hdf5', action='store_true', default=False)
parser.add_argument('--mean', default=None, help='Mean file')
parser.add_argument('--std', default=None, help='Standard deviation file')
parser.add_argument('--workers', default=0, type=int,
help='number of data loading workers (default: 0)')
parser.add_argument('--gpus', type=int, nargs='+',
help='GPUs list: e.g., 0 1', default=[0])
# Model params
parser.add_argument('arch', help='choose model name', default='fcnet')
parser.add_argument('layers_k', type=int, default=7,
help='number of FC layers in decoder')
parser.add_argument('--pretrained_net', help='pre-trained model path')
parser.add_argument('--mask_path', default=None,
help='provide a pre-defined compressive sensing mask')
parser.add_argument('--bernoulli_p', type=int, default=40,
help='percentage of 1s for creating mask')
parser.add_argument('--block_opts', type=int, nargs='+',
help='Item order: (temporal size, spatial size, video chunks)', default=[16, 8, 1])
parser.add_argument('--block_overlap', action='store_false',
help='overlapping blocks or not')
parser.add_argument('--noise', type=int,
help='Noise Level in dB: e.g., 20, 30, 40', default=None)
parser.add_argument('--seed', type=int, default=5347, help='random seed')
# Optimization
parser.add_argument('--epochs', default=1000, type=int,
help='number of total epochs to run')
parser.add_argument('--batch-size', default=200, type=int,
help='mini-batch size (default: 200)')
parser.add_argument('--encoder_lr', default=0.1, type=float,
help='initial learning rate for encoder')
parser.add_argument('--decoder_lr', default=0.01, type=float,
help='initial learning rate for decoder')
parser.add_argument('--encoder_annual', type=float, nargs='+',
help='Item order: (divide by, for every # epochs, until epoch #, then lr=0)', default=[0.5, 10, 400])
parser.add_argument('--decoder_annual', type=float, nargs='+',
help='Item order: (divide by, at epoch [#])', default=[0.1, 400])
parser.add_argument('--gradient_clipping', default=10, type=int,
help='gradient clipping to prevent explosion')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum')
parser.add_argument('--weight-decay', default=0, type=float,
help='weight decay (default: 0)')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
# Monitoring
parser.add_argument('--print-freq', default=1000, type=int,
help='print frequency (default: 1000)')
parser.add_argument('--results_dir', default='./results', help='results dir')
parser.add_argument('--save', default='', help='folder to save checkpoints')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
best_psnr = 0
def main():
global args, best_psnr
args = parser.parse_args()
# massage args
block_opts = []
block_opts = args.block_opts
block_opts.append(args.block_overlap)
time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if args.save == '':
args.save = time_stamp
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
setup_logging(os.path.join(save_path, 'log_%s.txt' % time_stamp))
results_file = os.path.join(save_path, 'results.%s')
results = ResultsLog(results_file % 'csv', results_file % 'html')
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.encoder_lr > 0:
encoder_learn = True
else:
encoder_learn = False
# create model
if args.pretrained_net is not None:
logging.info("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](
block_opts, pretrained=args.pretrained_net, mask_path=args.mask_path, mean=args.mean, std=args.std,
noise=args.noise, encoder_learn=encoder_learn, p=args.bernoulli_p, K=args.layers_k)
else:
logging.info("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](
block_opts, mask_path=args.mask_path, mean=args.mean, std=args.std,
noise=args.noise, encoder_learn=encoder_learn, p=args.bernoulli_p, K=args.layers_k)
model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
# define loss function (criterion) and optimizer
mseloss = loss.EuclideanDistance(args.batch_size)
# annual scedule
if encoder_learn:
optimizer = torch.optim.SGD([
{'params': model.module.measurements.parameters(), 'lr': args.encoder_lr},
{'params': model.module.reconstruction.parameters()}],
args.decoder_lr, momentum=args.momentum, weight_decay=args.weight_decay)
def lambda1(epoch): return 0.0 if epoch >= args.encoder_annual[2] else (
args.encoder_annual[0] ** bisect_right(range(args.encoder_annual[1], args.encoder_annual[2], args.encoder_annual[1]), epoch))
def lambda2(
epoch): return args.decoder_annual[0] ** bisect_right([args.decoder_annual[1]], epoch)
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=[lambda1, lambda2])
else:
optimizer = torch.optim.SGD([
{'params': model.module.reconstruction.parameters()}],
args.decoder_lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[args.decoder_annual[1]], gamma=args.decoder_annual[0])
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
logging.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_psnr = checkpoint['best_psnr']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logging.info("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
train_loader = torch.utils.data.DataLoader(
datasets.videocs.VideoCS(args.data_train, args.block_opts, transforms.Compose([
transforms.ToTensor(),
]), hdf5=args.hdf5),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.videocs.VideoCS(args.data_val, args.block_opts, transforms.Compose([
transforms.ToTensor(),
]), hdf5=False),
batch_size=1, shuffle=False,
num_workers=0, pin_memory=True)
# Save initial mask
if encoder_learn:
initial_weights = binarization(
model.module.measurements.weight.clone())
perc_1 = initial_weights.mean().cpu().data.numpy()[0]
logging.info('Percentage of 1: {}'.format(perc_1))
np.save(save_path + '/initial_mask.npy',
model.module.measurements.weight.clone())
else:
# binarize weights
model.module.measurements.binarization()
perc_1 = model.module.measurements.weight.clone().mean().cpu().item()
logging.info('Percentage of 1: {}'.format(perc_1))
# perform first validation
validate(val_loader, model, encoder_learn)
for epoch in range(args.start_epoch, args.epochs):
logging.info(scheduler.get_last_lr())
if encoder_learn:
save_binary_weights_before = binarization(
model.module.measurements.weight.clone())
# train for one epoch
train_loss = train(train_loader, model, optimizer, epoch,
mseloss, encoder_learn, args.gradient_clipping)
# Annual schedule enforcement
scheduler.step()
if encoder_learn:
save_binary_weights_after = binarization(
model.module.measurements.weight.clone())
diff = np.int(torch.abs(save_binary_weights_after -
save_binary_weights_before).sum().cpu().data.numpy())
perc_1 = save_binary_weights_after.mean().cpu().item()
logging.info(
'Binary Weights Changed: {} - Percentage of 1: {}'.format(diff, perc_1))
else:
perc1 = model.module.measurements.weight.clone().mean().cpu().item()
logging.info('Percentage of 1: {}'.format(perc_1))
# evaluate on validation set
psnr = validate(val_loader, model, encoder_learn)
# remember best psnr and save checkpoint
is_best = psnr > best_psnr
best_psnr = max(psnr, best_psnr)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_psnr': best_psnr,
'optimizer': optimizer.state_dict(),
}, is_best, path=save_path)
results_add(epoch, results, train_loss, psnr)
if encoder_learn:
model.module.measurements.restore()
def binarization(weights):
weights = weights.clamp(-1.0, 1.0)
weights = 0.5 * (weights.sign() + 1)
weights[weights == 0.5] = 1
return weights
def train(train_loader, model, optimizer, epoch, mseloss, encoder_learn, gradient_clip):
batch_time = metrics.AverageMeter()
data_time = metrics.AverageMeter()
losses = metrics.AverageMeter()
psnr = metrics.AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (video_blocks, pad_block_size, block_shape) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = video_blocks.cuda()
input_var = Variable(video_blocks.cuda())
target_var = Variable(target)
# compute output
model.module.pad_frame_size = pad_block_size.numpy()
model.module.patch_shape = block_shape.numpy()
if encoder_learn:
model.module.measurements.binarization()
output, y = model(input_var)
loss = mseloss.compute_loss(output, target_var)
# record loss
losses.update(loss.item(), video_blocks.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
if encoder_learn:
# restore real-valued weights
model.module.measurements.restore()
nn.utils.clip_grad_norm_(model.module.parameters(), gradient_clip)
else:
nn.utils.clip_grad_norm_(
model.module.reconstruction.parameters(), gradient_clip)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
return losses.avg
def validate(val_loader, model, encoder_learn):
batch_time = metrics.AverageMeter()
psnr = metrics.AverageMeter()
# switch to evaluate mode
model.cuda()
model.eval()
# binarize weights
if encoder_learn:
model.module.measurements.binarization()
end = time.time()
for i, (video_frames, pad_frame_size, patch_shape) in enumerate(val_loader):
video_input = video_frames.cuda()
print(val_loader.dataset.videos[i])
# compute output
model.module.pad_frame_size = pad_frame_size.numpy()
model.module.patch_shape = patch_shape.numpy()
reconstructed_video, y = model(video_input)
# original video
reconstructed_video = reconstructed_video.cpu().data.numpy()
original_video = video_input.cpu().data.numpy()
# measure accuracy and record loss
psnr_video = metrics.psnr_accuracy(reconstructed_video, original_video)
psnr.update(psnr_video, video_frames.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'PSNR {psnr.val:.3f} ({psnr.avg:.3f})'.format(
i + 1, len(val_loader), batch_time=batch_time,
psnr=psnr))
# restore real-valued weights
if encoder_learn:
model.module.measurements.restore()
print(' * PSNR {psnr.avg:.3f}'.format(psnr=psnr))
return psnr.avg
if __name__ == '__main__':
main()
| 38.470588
| 137
| 0.637962
|
265e1931e75c859082cf39be72f7d57b0148131b
| 464
|
py
|
Python
|
dev_01.py
|
soundmaking/led8x8m
|
383fe39c9e328951a25fd23298a4a4c11e8c964e
|
[
"MIT"
] | null | null | null |
dev_01.py
|
soundmaking/led8x8m
|
383fe39c9e328951a25fd23298a4a4c11e8c964e
|
[
"MIT"
] | null | null | null |
dev_01.py
|
soundmaking/led8x8m
|
383fe39c9e328951a25fd23298a4a4c11e8c964e
|
[
"MIT"
] | null | null | null |
from time import sleep
from random import randint
from led8x8m import LedMatrix
ledmx = LedMatrix()
def rand_xy():
x = randint(0, 7)
y = randint(0, 7)
return x, y
def rand_list():
my_list = []
for _ in range(25):
x, y = rand_xy()
my_list.append((x, y))
return my_list
while 1:
this_list = rand_list()
for _ in range(15):
for x, y in this_list:
ledmx.xy_on(x, y)
sleep(0.00125)
| 16.571429
| 30
| 0.571121
|
8cac9d798a05b0d5ae5f00b7ecc6be6b3a3b82b4
| 5,565
|
py
|
Python
|
Data Visualization/School/code.py
|
ALDOR99/Python
|
a76f37bb3e573cd3fdcfc19f4f73494cafa9140e
|
[
"MIT"
] | 2
|
2021-05-27T19:13:02.000Z
|
2021-06-02T13:26:35.000Z
|
Data Visualization/School/code.py
|
ALDOR99/Python
|
a76f37bb3e573cd3fdcfc19f4f73494cafa9140e
|
[
"MIT"
] | null | null | null |
Data Visualization/School/code.py
|
ALDOR99/Python
|
a76f37bb3e573cd3fdcfc19f4f73494cafa9140e
|
[
"MIT"
] | 1
|
2021-06-07T18:17:35.000Z
|
2021-06-07T18:17:35.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 13 17:27:37 2021
@author: ali_d
"""
#school
import numpy as np
import pandas as pd
# plotly
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
from wordcloud import WordCloud
# matplotlib
import matplotlib.pyplot as plt
data = pd.read_csv("cwurData.csv")
#data1 = pd.read_csv("education_expenditure_supplementary_data.csv")
data2 = pd.read_csv("educational_attainment_supplementary_data.csv")
data3 = pd.read_csv("school_and_country_table.csv")
data4 = pd.read_csv("shanghaiData.csv")
data5 = pd.read_csv("timesData.csv")
# world_rank = Dünya sıralaması
# university_name = Üniversite adı
# country = ülke
# teaching = öğretme
# international = Uluslararası
# research = Araştırma
# citations = alıntılar
# income = Gelir
# total_score = toplam puan
# num_students = öğrenci sayısı
# student_staff_ratio = Öğrenci personeli oranı
# international_students = Uluslararası öğrenciler
# female_male_ratio = kadın erkek oranı
# year = yıl
print(data5.head())
print(data5.info())
print()
#%% Line Charts
df = data5.iloc[:100,:]
trace1 = go.Scatter(
x = df.world_rank,
y = df.citations,
mode = "lines",
name = "citations",
marker = dict(color = 'rgba(16, 112, 2, 0.8)'),
text= df.university_name)
trace2 = go.Scatter(
x = df.world_rank,
y = df.teaching,
mode = "lines+markers",
name = "teaching",
marker = dict(color = 'rgba(80, 26, 80, 0.8)'),
text= df.university_name)
data=[trace1,trace2]
layout = dict(title ="Citation and Teaching vs World Rank of Top 100 Universities",
xaxis=dict(title="World Rank",ticklen=5,zeroline =False)
)
fig = dict(data=data,layout=layout)
plot(fig)
#%%
df2 = data5.iloc[:100,:]
a = go.Scatter(
x = df2.world_rank,
y = df2.research,
mode = "lines",
name = "world_rank",
marker = dict(color = 'rgba(3, 111, 20, 0.9)'),
text= df.university_name)
b =go.Scatter(
x = df2.world_rank,
y = df2.teaching,
mode = "lines+markers",
name = "teaching",
marker = dict(color = 'rgba(8, 12, 30, 0.8)'),
text= df.university_name)
data =[a,b]
layout = dict(title ="Teaching and World rank vs research of Top 100 Universities",
xaxis=dict(title="research ",ticklen=5,zeroline =False))
fig = dict(data =data ,layout = layout)
plot(fig)
#%%
df2014 = data5[data5.year == 2014].iloc[:100,:]
df2015 = data5[data5.year == 2015].iloc[:100,:]
df2016 = data5[data5.year == 2016].iloc[:100,:]
trace1 =go.Scatter(
x = df2014.world_rank,
y = df2014.citations,
mode = "markers",
name = "2014",
marker = dict(color = 'rgba(255, 128, 255, 0.8)'),
text= df2014.university_name)
trace2 =go.Scatter(
x = df2015.world_rank,
y = df2015.citations,
mode = "markers",
name = "2015",
marker = dict(color = 'rgba(255, 128, 2, 0.8)'),
text= df2015.university_name)
# trace3
trace3 =go.Scatter(
x = df2016.world_rank,
y = df2016.citations,
mode = "markers",
name = "2016",
marker = dict(color = 'rgba(0, 255, 200, 0.8)'),
text= df2016.university_name)
data = [trace1,trace2,trace3]
layout = dict(title = 'Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years',
xaxis= dict(title= 'World Rank',ticklen= 5,zeroline= False),
yaxis= dict(title= 'Citation',ticklen= 5,zeroline= False)
)
fig = dict(data = data, layout = layout)
plot(fig)
#%%
print(data5.columns)
df2014 = data5[data5.year == 2014].iloc[:100,:]
df2015 = data5[data5.year == 2015].iloc[:100,:]
df2016 = data5[data5.year == 2016].iloc[:100,:]
trace1 =go.Scatter(
x = df2014.world_rank,
y = df2014.num_students,
mode = "markers",
name = "2014",
marker = dict(color = 'rgba(255, 128, 255, 0.8)'),
text= df2014.university_name)
trace2 =go.Scatter(
x = df2015.world_rank,
y = df2015.num_students,
mode = "markers",
name = "2015",
marker = dict(color = 'rgba(255, 128, 2, 0.8)'),
text= df2015.university_name)
# trace3
trace3 =go.Scatter(
x = df2016.world_rank,
y = df2016.num_students,
mode = "markers",
name = "2016",
marker = dict(color = 'rgba(0, 255, 200, 0.8)'),
text= df2016.university_name)
data = [trace1,trace2,trace3]
layout = dict(title = 'num_students vs world rank of top 100 universities with 2014, 2015 and 2016 years',
xaxis= dict(title= 'World Rank',ticklen= 5,zeroline= False),
yaxis= dict(title= 'Citation',ticklen= 5,zeroline= False)
)
fig = dict(data = data, layout = layout)
plot(fig)
| 20.384615
| 106
| 0.548967
|
71c558c80445c65bb4d4b7a3b5b72fb2fd49d838
| 4,614
|
py
|
Python
|
t4k/transformer/time_series_target_encoder.py
|
Yoshiki-Takahashi/tools4kaggle
|
eb2779687867e876f6beec1351140cfec046b152
|
[
"MIT"
] | null | null | null |
t4k/transformer/time_series_target_encoder.py
|
Yoshiki-Takahashi/tools4kaggle
|
eb2779687867e876f6beec1351140cfec046b152
|
[
"MIT"
] | null | null | null |
t4k/transformer/time_series_target_encoder.py
|
Yoshiki-Takahashi/tools4kaggle
|
eb2779687867e876f6beec1351140cfec046b152
|
[
"MIT"
] | null | null | null |
from sklearn.base import BaseEstimator, TransformerMixin
import category_encoders as ce
import numpy as np
import pandas as pd
class TimeSeriesTargetEncoder(BaseEstimator, TransformerMixin):
"""
Encode category to target value frequency on past data.
Parameters
----------
cols: list
a list of columns to encode, if None, all columns will be encoded
time_col: str
a name of time series column used for identity the time of that row
handle_unknown: float
a value used for unknown category
"""
def __init__(self,cols=None, time_col=None, handle_unknown=None, valid_appearance=0.05, ignore_first=0.1):
self.cols = cols
self.time_col = time_col
self.handle_unknown = handle_unknown
self.valid_appearance = valid_appearance
self.ignore_first = ignore_first
def fit(self, X, y):
"""
Encode category data.
Parameters
----------
X : Pandas.DataFrame
Training data.
y : None
None
Returns
-------
self : encoder
Returns self.
"""
order_series = X[self.time_col]
features = X[self.cols]
# filter_rare category
self.valid_category_dict = {}
for col in self.cols:
freq = features[col].value_counts() / features.shape[0]
self.valid_category_dict[col] = freq[freq > self.valid_appearance].index.values
# sort by order column
ordered = features.assign(order_series=order_series) \
.assign(target_series=y).sort_values('order_series') \
.apply(lambda col:col.cat.add_categories(['na']) if col.dtype.name=='category' else col) \
.fillna('na')
ordered_y = ordered['target_series']
ohe = ce.OneHotEncoder(cols=self.cols, use_cat_names=True, handle_unknown='ignore')
one_hot_encoded = ohe.fit_transform(ordered.drop(['target_series'],axis=1))
time_incre_count = one_hot_encoded.cumsum()
one_hot_target = one_hot_encoded * np.repeat(ordered_y[:,np.newaxis], one_hot_encoded.shape[1], axis=1)
time_incre_target = one_hot_target.cumsum()
te_table_all = (time_incre_target / time_incre_count).drop(['order_series'],axis=1)
te_table_all['order_series'] = one_hot_encoded['order_series']
self.te_table = te_table_all.groupby(by='order_series').mean()
self.te_table['unknown_category'] = np.nan
self.te_table_columns = list(self.te_table.columns)
return self
def transform(self, X):
order_series = X[self.time_col]
features = X[self.cols]
# filter rare appearance
features = features.apply(lambda column: column.apply(\
lambda x:x if x in self.valid_category_dict[column.name] else 'na_test'
), axis=0)
ordered = features.assign(order_series=order_series).sort_values('order_series') \
.apply(lambda col:col.cat.add_categories(['na_test']) if col.dtype.name=='category' else col) \
.fillna('na_test')
result_list = []
def convert_for_te(row):
concat_col_name = [col + '_' + str(val) for col, val in zip(self.cols, row)]
return [elem if elem in self.te_table_columns else 'unknown_category' for elem in concat_col_name]
te_colname_list_array = ordered[self.cols].apply(convert_for_te, axis=1).values
order_array = ordered.order_series.values
# search corresponded te_table index
corresponded_id = np.searchsorted(self.te_table.index.values, order_array, side='left')
corresponded_id = corresponded_id - 1
corresponded_id = np.clip(corresponded_id, a_min=0, a_max=None)
corresponded_order = self.te_table.index.values[corresponded_id]
# correspond index and cols df
corresponded_df = pd.DataFrame({
'corr_order':corresponded_order,
'corr_te_cols':te_colname_list_array
})
corresponded_df = corresponded_df.apply(lambda row:\
pd.Series(self.te_table.loc[row['corr_order'],row['corr_te_cols']].values, index=self.cols),
axis=1)
corresponded_df.index = ordered.index
# ignore earlier time row
threshold_version = self.te_table.index[int(self.ignore_first * self.te_table.shape[0])]
corresponded_df[order_array < threshold_version] = np.nan
return corresponded_df.sort_index()
def predict(self, X):
return X
| 39.435897
| 111
| 0.640225
|
38f00aeade41815fa0ef310ce4a6484bb7e1cc5a
| 23,284
|
py
|
Python
|
Main/modules/lin_decoder.py
|
XIAO-LI-UPF/Natural-Language-ParagraphGeneration
|
7d13e087bd2baa9a7f393fcc63e84e6aee1a3e34
|
[
"MIT"
] | 6
|
2019-11-20T14:12:25.000Z
|
2021-01-04T13:23:30.000Z
|
Main/modules/lin_decoder.py
|
XIAO-LI-UPF/Natural-Language-ParagraphGeneration
|
7d13e087bd2baa9a7f393fcc63e84e6aee1a3e34
|
[
"MIT"
] | 2
|
2020-02-17T21:24:40.000Z
|
2020-07-21T11:24:07.000Z
|
Main/modules/lin_decoder.py
|
XIAO-LI-UPF/Natural-Language-ParagraphGeneration
|
7d13e087bd2baa9a7f393fcc63e84e6aee1a3e34
|
[
"MIT"
] | null | null | null |
import dynet as dy
import dynet_modules as dm
import numpy as np
import random
from utils import *
from time import time
from collections import defaultdict
from modules.seq_encoder import SeqEncoder
from modules.bag_encoder import BagEncoder
from modules.tree_encoder import TreeEncoder
class LinDecoder(Decoder):
def __init__(self, args, model):
super().__init__(args, model)
self.train_input_key = 'input_tokens'
self.train_output_key = 'gold_linearized_tokens'
self.pred_input_key = 'input_tokens'
self.pred_output_key = 'linearized_tokens'
if 'seq' in self.args.tree_vecs:
self.seq_encoder = SeqEncoder(self.args, self.model, 'lin_seq')
if 'bag' in self.args.tree_vecs:
self.bag_encoder = BagEncoder(self.args, self.model, 'lin_bag')
if 'tree' in self.args.tree_vecs:
self.tree_encoder = TreeEncoder(self.args, self.model, 'lin_tree')
self.l2r_linearizer = L2RLinearizer(self.args, self.model) if 'l2r' in self.args.lin_decoders else None
self.r2l_linearizer = R2LLinearizer(self.args, self.model) if 'r2l' in self.args.lin_decoders else None
self.h2d_linearizer = H2DLinearizer(self.args, self.model) if 'h2d' in self.args.lin_decoders else None
self.log(f'Initialized <{self.__class__.__name__}>, params = {self.model.parameter_count()}')
def encode(self, sent):
# encode
if 'seq' in self.args.tree_vecs:
self.seq_encoder.encode(sent, 'linearized_tokens' if self.args.pred_seq else 'gold_linearized_tokens')
if 'bag' in self.args.tree_vecs:
self.bag_encoder.encode(sent)
if 'tree' in self.args.tree_vecs:
self.tree_encoder.encode(sent, self.args.pred_tree)
sum_vecs(sent, 'lin_vec', ['feat', 'lin_seq', 'lin_bag', 'lin_tree'])
def predict(self, sent, pipeline=False):
# top-down traverse to sort each domain
sent_agenda = [SentSequence(sent)]
self.encode(sent)
for token in traverse_topdown(sent.root):
all_agendas = []
ranks = {}
if 'l2r' in self.args.lin_decoders:
init_seq = self.l2r_linearizer.init_seq(token)
agenda, _ = self.l2r_linearizer.decode(init_seq)
all_agendas.append(agenda)
if 'r2l' in self.args.lin_decoders:
init_seq = self.r2l_linearizer.init_seq(token)
agenda, _ = self.r2l_linearizer.decode(init_seq)
all_agendas.append(agenda)
if 'h2d' in self.args.lin_decoders:
init_seq = self.h2d_linearizer.init_seq(token)
agenda, _ = self.h2d_linearizer.decode(init_seq)
all_agendas.append(agenda)
best_seqs = self.vote_best_seq(sent, all_agendas, self.args.beam_size)
token['linearized_domain'] = [t for t in best_seqs[0].linearized_tokens()] # remove <$$$>
new_agenda = []
for sent_seq in sent_agenda:
for seq in best_seqs:
new_seq = sent_seq.append(seq)
new_agenda.append(new_seq)
new_agenda.sort(key=lambda x: -x.score)
sent_agenda = new_agenda[:self.args.beam_size]
sent['nbest_linearized_tokens'] = [seq.get_sorted_tokens() for seq in sent_agenda]
sent['linearized_tokens'] = sent['nbest_linearized_tokens'][0]
def train_one_step(self, sent):
domain_total = domain_correct = loss_value = 0
t0 = time()
errs = []
self.encode(sent)
sent_agenda = [SentSequence(sent)]
for token in traverse_topdown(sent.root):
all_agendas = []
# training left-to-right
if 'l2r' in self.args.lin_decoders:
gold_seq = self.l2r_linearizer.init_seq(token)
while not self.l2r_linearizer.finished(gold_seq):
agenda, gold_seq = self.l2r_linearizer.decode(gold_seq, True)
all_agendas.append(agenda)
if gold_seq is not agenda[0]:
scores = [gold_seq.score_expr] + [seq.score_expr for seq in agenda if seq is not gold_seq]
errs.append(dy.hinge(dy.concatenate(scores), 0))
# right-to-left
if 'r2l' in self.args.lin_decoders:
gold_seq = self.r2l_linearizer.init_seq(token)
while not self.r2l_linearizer.finished(gold_seq):
agenda, gold_seq = self.r2l_linearizer.decode(gold_seq, True)
all_agendas.append(agenda)
if gold_seq is not agenda[0]:
scores = [gold_seq.score_expr] + [seq.score_expr for seq in agenda if seq is not gold_seq]
errs.append(dy.hinge(dy.concatenate(scores), 0))
# head-to-dep
if 'h2d' in self.args.lin_decoders:
gold_seq = self.h2d_linearizer.init_seq(token)
agenda = [gold_seq]
if self.h2d_linearizer.finished(gold_seq):
all_agendas.append(agenda)
else:
while not self.h2d_linearizer.finished(gold_seq):
agenda, gold_seq = self.h2d_linearizer.decode(gold_seq, True)
all_agendas.append(agenda)
# update only against all incorrect sequences (exclude lower scoring gold seq)
if gold_seq is not agenda[0]:
scores = [gold_seq.score_expr] + [seq.score_expr for seq in agenda if not seq.correct]
errs.append(dy.hinge(dy.concatenate(scores), 0))
new_agenda = []
best_seqs = self.vote_best_seq(sent, all_agendas, self.args.beam_size)
for sent_seq in sent_agenda:
for seq in best_seqs:
new_seq = sent_seq.append(seq)
new_agenda.append(new_seq)
new_agenda.sort(key=lambda x: -x.score)
sent_agenda = new_agenda[:self.args.beam_size]
if token['deps']:
domain_total += 1
domain_correct += agenda[0].correct
sent['nbest_linearized_tokens'] = [seq.get_sorted_tokens() for seq in sent_agenda]
# random sequence from the beam to give the downstream training set more realistic input
sent['linearized_tokens'] = random.choice(sent['nbest_linearized_tokens'])
loss = dy.esum(errs) if errs else 0
loss_value = loss.value() if loss else 0
return {'time': time()-t0,
'loss': loss_value,
'loss_expr': loss,
'total': domain_total,
'correct': domain_correct
}
def evaluate(self, sents):
gold_seqs = [sent[self.train_output_key] for sent in sents]
pred_seqs = [sent[self.pred_output_key] for sent in sents]
pred_bleu = eval_all(gold_seqs, pred_seqs)
if 'nbest_linearized_tokens' in sents[0]:
rand_seqs = [random.choice(sent['nbest_linearized_tokens']) for sent in sents]
orac_seqs = [max([(sent_bleu(gs, ps), ps) for ps in sent['nbest_linearized_tokens']], key=lambda x: x[0])[1] \
for gs, sent in zip(gold_seqs, sents)]
rand_bleu = eval_all(gold_seqs, rand_seqs)
orac_bleu = eval_all(gold_seqs, orac_seqs)
self.log(f'<PRED>{pred_bleu*100:.2f}')
self.log(f'<RAND>{rand_bleu*100:.2f}')
self.log(f'<ORAC>{orac_bleu*100:.2f}')
return pred_bleu
def vote_best_seq(self, sent, all_agendas, top=1):
all_seqs = defaultdict(float)
ids2seq = {}
for agenda in all_agendas:
min_score = min(seq.score for seq in agenda)
for seq in agenda:
ids = seq.ids()
all_seqs[ids] += (seq.score - min_score)
if ids not in ids2seq or seq.score > ids2seq[ids].score:
ids2seq[ids] = seq
sorted_ids = sorted(ids2seq, key=lambda x: -all_seqs[ids])
sorted_seqs = [ids2seq[ids] for ids in sorted_ids]
return sorted_seqs[:top]
class L2RLinearizer:
def __init__(self, args, model):
print('<L2RLinearizer>')
self.args = args
Pointer = {'simple': dm.SimplePointer, 'glimpse':dm.GlimpsePointer, 'self':dm.SelfPointer}[self.args.pointer_type]
self.pointer = Pointer(model, self.args.token_dim)
self.seq_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.init_vec = model.add_parameters(self.args.token_dim)
def init_seq(self, token):
return SequenceL2R(self.seq_lstm.initial_state().add_input(self.init_vec), token, [], token['domain'])
def finished(self, seq):
return len(seq.rest) == 0
def decode(self, gold_seq, train_mode=False):
agenda = [gold_seq]
steps = len(gold_seq.rest)
for i in range(steps):
new_agenda = []
for seq in agenda:
cand_mat = dy.concatenate_cols([t.vecs['lin_vec'] for t in seq.rest])
scores = self.pointer.point(seq.state.output(), cand_mat)
# scores = dy.log_softmax(scores)
for t, s in zip(seq.rest, scores):
if self.args.no_lin_constraint or seq.check_order(t):
new_seq = seq.append(t, s)
new_agenda.append(new_seq)
if train_mode and new_seq.is_gold():
gold_seq = new_seq
new_agenda.sort(key=lambda x: -x.score)
agenda = new_agenda[:self.args.beam_size]
if train_mode and gold_seq not in agenda:
break
return agenda, gold_seq
class R2LLinearizer:
def __init__(self, args, model):
print('<R2LLinearizer>')
self.args = args
Pointer = {'simple': dm.SimplePointer, 'glimpse':dm.GlimpsePointer, 'self':dm.SelfPointer}[self.args.pointer_type]
self.pointer = Pointer(model, self.args.token_dim)
self.seq_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.init_vec = model.add_parameters(self.args.token_dim)
def init_seq(self, token):
return SequenceR2L(self.seq_lstm.initial_state().add_input(self.init_vec), token, [], token['domain'])
def finished(self, seq):
return len(seq.rest) == 0
def decode(self, gold_seq, train_mode=False):
agenda = [gold_seq]
steps = len(gold_seq.rest)
for i in range(steps):
new_agenda = []
for seq in agenda:
cand_mat = dy.concatenate_cols([t.vecs['lin_vec'] for t in seq.rest])
scores = self.pointer.point(seq.state.output(), cand_mat)
# scores = dy.log_softmax(scores)
for t, s in zip(seq.rest, scores):
if self.args.no_lin_constraint or seq.check_order(t):
new_seq = seq.append(t, s)
# print(new_seq, 'g' if new_seq.is_gold() else 'w')
new_agenda.append(new_seq)
if train_mode and new_seq.is_gold():
gold_seq = new_seq
new_agenda.sort(key=lambda x: -x.score)
agenda = new_agenda[:self.args.beam_size]
if train_mode and gold_seq not in agenda:
break
return agenda, gold_seq
class H2DLinearizer:
def __init__(self, args, model):
print('<H2DLinearizer>')
self.args = args
Pointer = {'simple': dm.SimplePointer, 'glimpse':dm.GlimpsePointer, 'self':dm.SelfPointer}[self.args.pointer_type]
self.l_pointer = Pointer(model, self.args.token_dim, self.args.token_dim)
self.r_pointer = Pointer(model, self.args.token_dim, self.args.token_dim)
self.h2l_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.h2r_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
def finished(self, seq):
return len(seq.rest) == 0
def init_seq(self, token):
lstate = self.h2l_lstm.initial_state().add_input(token.vecs['lin_vec'])
rstate = self.h2r_lstm.initial_state().add_input(token.vecs['lin_vec'])
return SequenceH2D(lstate, rstate, token, [t for t in token['deps'] if t.not_empty()])
# return SequenceH2D(lstate, rstate, token, token['deps'])
def decode(self, gold_seq, train_mode=False):
agenda = [gold_seq]
steps = len(gold_seq.rest)
for i in range(steps):
new_agenda = []
gold_seq = None
ids2seq = {}
for seq in agenda:
cand_mat = dy.concatenate_cols([t.vecs['lin_vec'] for t in seq.rest])
l_scores = self.l_pointer.point(seq.lstate.output(), cand_mat)
r_scores = self.r_pointer.point(seq.rstate.output(), cand_mat)
for t, s in zip(seq.rest, l_scores):
if self.args.no_lin_constraint or t not in seq.l_order or t is seq.l_order[-1]:
new_seq = seq.append_left(t, s)
ids = new_seq.ids()
if ids not in ids2seq or new_seq.score > ids2seq[ids].score:
ids2seq[ids] = new_seq
if train_mode and new_seq.is_gold() and (not gold_seq or new_seq.score > gold_seq.score):
gold_seq = new_seq
for t, s in zip(seq.rest, r_scores):
if self.args.no_lin_constraint or t not in seq.r_order or t is seq.r_order[0]:
new_seq = seq.append_right(t, s)
ids = new_seq.ids()
if ids not in ids2seq or new_seq.score > ids2seq[ids].score:
ids2seq[ids] = new_seq
if train_mode and new_seq.is_gold() and (not gold_seq or new_seq.score > gold_seq.score):
gold_seq = new_seq
new_agenda = list(ids2seq.values())
new_agenda.sort(key=lambda x: -x.score)
agenda = new_agenda[:self.args.beam_size]
if train_mode and gold_seq not in agenda:
break
return agenda, gold_seq
class SequenceL2R:
def __init__(self, state, head, tokens, rest, lost_rest=[], prev=None):
self.state = state
self.head = head
self.tokens = tokens
self.rest = rest
self.gold_lost_rest = lost_rest
self.prev = prev
if prev is None:
self.score = 0
self.score_expr = 0
self.correct = True
self.gold_lost_rest = self.head['lost']
self.required_order = self.head['order'][:]
else:
self.score = prev.score
self.score_expr = prev.score_expr
self.required_order = prev.required_order[:]
def __repr__(self):
return ' '.join(str(t['original_id']) for t in self.tokens) + '(' +' '.join(str(t['original_id']) for t in self.rest) + ')'
def ids(self):
return tuple(t['tid'] for t in self.tokens)
def lemmas(self):
return tuple(t['lemma'] for t in self.tokens)
def oids(self):
return [t['original_id'] for t in self.tokens]
def linearized_tokens(self):
return self.tokens
def check_order(self, tk):
return tk not in self.required_order or tk is self.required_order[0]
def append(self, tk, s):
state = self.state.add_input(tk.vecs['lin_vec'])
lost_rest = [t for t in self.gold_lost_rest if t['original_id'] != tk['original_id']] # non-empty only in training
seq = SequenceL2R(state, self.head, self.tokens+[tk], [t for t in self.rest if t is not tk], lost_rest, self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.required_order:
seq.required_order.remove(tk)
return seq
def is_gold(self, lost=False):
rest_ids = [t['original_id'] for t in self.rest + (self.gold_lost_rest if lost else [])]
self.correct = self.prev.correct and all(self.tokens[-1]['original_id'] < i for i in rest_ids)
return self.correct
class SequenceR2L:
def __init__(self, state, head, tokens, rest, prev=None):
self.state = state
self.head = head
self.tokens = tokens
self.rest = rest
self.prev = prev
if prev is None:
self.score = 0
self.score_expr = 0
self.correct = True
self.required_order = self.head['order'][:]
else:
self.score = prev.score
self.score_expr = prev.score_expr
self.required_order = prev.required_order[:]
def __repr__(self):
return ' '.join(str(t['original_id']) for t in self.tokens) + '(' +' '.join(str(t['original_id']) for t in self.rest) + ')'
def linearized_tokens(self):
return self.tokens
def ids(self):
return tuple(t['tid'] for t in self.tokens)
def lemmas(self):
return tuple(t['lemma'] for t in self.tokens)
def oids(self):
return [t['original_id'] for t in self.tokens]
def check_order(self, tk):
return tk not in self.required_order or tk is self.required_order[-1]
def append(self, tk, s):
state = self.state.add_input(tk.vecs['lin_vec'])
seq = SequenceR2L(state, self.head, [tk]+self.tokens, [t for t in self.rest if t is not tk], self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.required_order:
seq.required_order.remove(tk)
return seq
def is_gold(self):
self.correct = self.prev.correct and all(self.tokens[0]['original_id'] > t['original_id'] for t in self.rest)
return self.correct
class SequenceH2D:
"""
Double-ended Sequence, starts with the head token,
appends dependents on both sides from near to far,
allows spurious ambiguity of the gold sequence,
"""
def __init__(self, lstate, rstate, head, rest, lost_rest=[], ldeps=[], rdeps=[], prev=None):
self.lstate = lstate
self.rstate = rstate
self.head = head
self.ldeps = ldeps # grow inside-out
self.rdeps = rdeps # grow inside-out
self.rest = rest
self.gold_lost_rest = lost_rest
self.prev = prev
if prev is None:
self.score = 0
self.score_expr = 0
self.correct = True
self.l_order = self.head['l_order'][:]
self.r_order = self.head['r_order'][:]
self.gold_lost_rest = self.head['lost']
# print('lost', [t['original_id'] for t in self.head['lost']])
else:
self.score = prev.score
self.score_expr = prev.score_expr
self.l_order = prev.l_order[:]
self.r_order = prev.r_order[:]
self.correct = prev.correct
def ids(self):
return tuple(t['tid'] for t in self.ldeps + [self.head] + self.rdeps)
# return tuple(t['tid'] for t in self.linearized_tokens())
def oids(self):
return [t['original_id'] for t in self.ldeps + [self.head] + self.rdeps]
def linearized_tokens(self):
# all content tokens (excluding <$$$>)
return [t for t in (self.ldeps + [self.head] + self.rdeps) if t['lemma'] != '<$$$>']
def __repr__(self):
return ' '.join(str(t) for t in self.ldeps) + \
'<' + str(self.head) + '>' + \
' '.join(str(t) for t in self.rdeps) + \
' [' + ' '.join(str(t) for t in self.rest) + ']' +\
' {' + ' '.join(str(t) for t in self.gold_lost_rest) + '}'
def lmost(self):
return self.ldeps[0] if self.ldeps else self.head
def rmost(self):
return self.rdeps[-1] if self.rdeps else self.head
def append_left(self, tk, s):
lstate = self.lstate.add_input(tk.vecs['lin_vec'])
rstate = self.rstate
ldeps = [tk] + self.ldeps
rdeps = self.rdeps
lost_rest = [t for t in self.gold_lost_rest if t['original_id'] != tk['original_id']] # non-empty only in training
seq = SequenceH2D(lstate, rstate, self.head, [t for t in self.rest if t is not tk], lost_rest, ldeps, rdeps, self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.l_order:
seq.l_order.remove(tk)
return seq
def append_right(self, tk, s):
lstate = self.lstate
rstate = self.rstate.add_input(tk.vecs['lin_vec'])
ldeps = self.ldeps
rdeps = self.rdeps + [tk]
lost_rest = [t for t in self.gold_lost_rest if t['original_id'] != tk['original_id']] # non-empty only in training
seq = SequenceH2D(lstate, rstate, self.head, [t for t in self.rest if t is not tk], lost_rest, ldeps, rdeps, self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.r_order:
seq.r_order.remove(tk)
return seq
def is_gold(self, lost=False):
lmost, rmost = self.lmost(), self.rmost()
rest_ids = [t['original_id'] for t in self.rest + (self.gold_lost_rest if lost else [])]
ids = [t['original_id'] for t in self.linearized_tokens()]
if lmost['lemma'] == '<$$$>' and rest_ids and min(rest_ids) < lmost['original_id']:
self.correct = False
elif rmost['lemma'] == '<$$$>' and rest_ids and max(rest_ids) > rmost['original_id']:
self.correct = False
else:
self.correct = self.prev.correct and len(ids) == len(set(ids)) and ids == sorted(ids) and \
not any(min(ids) < tid < max(ids) for tid in rest_ids)
return self.correct
class SentSequence:
def __init__(self, sent, domain_seqs = {}):
self.sorted_tokens = []
self.sent = sent
self.score_expr = 0
self.score = 0
self.inv_num = None
self.domain_seqs = domain_seqs
if not self.domain_seqs:
self.domain_seqs[0] = self.sent.root['deps']
def append(self, domain_seq):
new_seq = SentSequence(self.sent, copy(self.domain_seqs))
new_seq.domain_seqs[domain_seq.head['tid']] = domain_seq.linearized_tokens()
new_seq.score_expr = self.score_expr + domain_seq.score_expr
new_seq.score = self.score + domain_seq.score
return new_seq
def is_gold(self):
return all(seq.correct for seq in self.domain_seqs)
def get_sorted_tokens(self):
if not self.sorted_tokens:
self.sorted_tokens = self.flatten(self.sent.root)
return self.sorted_tokens
def get_inv_num(self):
if self.inv_num is None:
self.inv_num = inverse_num(self.get_sorted_tokens()) ** 0.5
return self.inv_num
def flatten(self, head):
return sum([(self.flatten(tk) if (tk is not head) else ([tk] if tk['tid'] else []) ) \
for tk in self.domain_seqs[head['tid']]], [])
| 41.357016
| 131
| 0.587528
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.