hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d23a830cb4e13b7e73b64bf53b38781e2f971948 | 3,133 | py | Python | auth_service.py | open-bms/open-bms-core | 36d9e2992f9dedeb5794f49993bfdb8da3fe4be7 | [
"MIT"
] | null | null | null | auth_service.py | open-bms/open-bms-core | 36d9e2992f9dedeb5794f49993bfdb8da3fe4be7 | [
"MIT"
] | 2 | 2021-12-14T04:07:24.000Z | 2021-12-15T23:28:54.000Z | auth_service.py | open-bms/open-bms-core | 36d9e2992f9dedeb5794f49993bfdb8da3fe4be7 | [
"MIT"
] | null | null | null | """The auth service module configures the flask app for the OpenBMS auth service.
The auth service provides an API for managing and authenticating user accounts. Users
may authenticate through a number of supported identity provides using SAML or through a
native OpenBMS account using an email address and password. The authentication service
also maintains user roles and permissions.
The auth service can be run in a development environment with the following command:
$ poetry run python auth_service.py
The auth service can be run in a production environment using gunicorn:
$ poetry run gunicorn auth:app
The auth_service.py script should not be run directly in a production environment due to
security and performance concerns.
"""
import sys
from os import environ
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from flask_mongoengine import MongoEngine
from auth.api import auth_api_v1
from util.logstash import configure_logstash_handler
# create new flask app
app = Flask(__name__)
"""The WSGI Flask application."""
configure_logstash_handler(app)
# expose the auth API
app.register_blueprint(auth_api_v1)
with app.app_context():
# establish a connection to the database
app.config["SQLALCHEMY_DATABASE_URI"] = environ.get("SQLALCHEMY_DATABASE_URI")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
postgres = SQLAlchemy(app)
"""Provides access to the PostgreSQL database."""
try:
# verify the database connection
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
app.logger.info("Connected to the PostgreSQL database.")
except Exception as e:
sys.exit(f"Failed to connect to the PostgreSQL database: {e}")
# establish a connection to the document store
app.config["MONGODB_HOST"] = environ.get("MONGODB_HOST")
mongo = MongoEngine(app)
"""Provides access to the MongoDB database."""
try:
# verify the document store connection
mongo.connection.server_info()
app.logger.info("Connected to the MongoDB database.")
except Exception as e:
sys.exit(f"Failed to connect to the MongoDB database: {e}")
@app.route("/health")
def health_check():
"""Attempt to ping the database and respond with a status code 200.
This endpoint is verify that the server is running and that the database is
accessible.
"""
response = {"service": "OK"}
try:
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
response["database"] = "OK"
except Exception as e:
app.logger.error(e)
response["database"] = "ERROR"
try:
mongo.connection.server_info()
response["document_store"] = "OK"
except Exception as e:
app.logger.error(e)
response["document_store"] = "ERROR"
return response
if __name__ == "__main__" and environ.get("FLASK_ENV") == "development":
app.run(host="0.0.0.0", port=5000, debug=True) # nosec
elif __name__ == "__main__":
sys.exit("Development server can only be run in development mode.")
| 33.688172 | 88 | 0.721673 | """The auth service module configures the flask app for the OpenBMS auth service.
The auth service provides an API for managing and authenticating user accounts. Users
may authenticate through a number of supported identity provides using SAML or through a
native OpenBMS account using an email address and password. The authentication service
also maintains user roles and permissions.
The auth service can be run in a development environment with the following command:
$ poetry run python auth_service.py
The auth service can be run in a production environment using gunicorn:
$ poetry run gunicorn auth:app
The auth_service.py script should not be run directly in a production environment due to
security and performance concerns.
"""
import sys
from os import environ
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from flask_mongoengine import MongoEngine
from auth.api import auth_api_v1
from util.logstash import configure_logstash_handler
# create new flask app
app = Flask(__name__)
"""The WSGI Flask application."""
configure_logstash_handler(app)
# expose the auth API
app.register_blueprint(auth_api_v1)
with app.app_context():
# establish a connection to the database
app.config["SQLALCHEMY_DATABASE_URI"] = environ.get("SQLALCHEMY_DATABASE_URI")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
postgres = SQLAlchemy(app)
"""Provides access to the PostgreSQL database."""
try:
# verify the database connection
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
app.logger.info("Connected to the PostgreSQL database.")
except Exception as e:
sys.exit(f"Failed to connect to the PostgreSQL database: {e}")
# establish a connection to the document store
app.config["MONGODB_HOST"] = environ.get("MONGODB_HOST")
mongo = MongoEngine(app)
"""Provides access to the MongoDB database."""
try:
# verify the document store connection
mongo.connection.server_info()
app.logger.info("Connected to the MongoDB database.")
except Exception as e:
sys.exit(f"Failed to connect to the MongoDB database: {e}")
@app.route("/health")
def health_check():
"""Attempt to ping the database and respond with a status code 200.
This endpoint is verify that the server is running and that the database is
accessible.
"""
response = {"service": "OK"}
try:
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
response["database"] = "OK"
except Exception as e:
app.logger.error(e)
response["database"] = "ERROR"
try:
mongo.connection.server_info()
response["document_store"] = "OK"
except Exception as e:
app.logger.error(e)
response["document_store"] = "ERROR"
return response
if __name__ == "__main__" and environ.get("FLASK_ENV") == "development":
app.run(host="0.0.0.0", port=5000, debug=True) # nosec
elif __name__ == "__main__":
sys.exit("Development server can only be run in development mode.")
| 0 | 0 | 0 |
11482d584e2b935307aae8ac905f448fa6b0334e | 1,119 | py | Python | cnn_code/download.py | neurocaience/deepfreeze | 2a8c7da7519df2bacb640917695bd7d226e8d4f4 | [
"MIT"
] | 1 | 2020-11-17T06:41:10.000Z | 2020-11-17T06:41:10.000Z | cnn_code/download.py | neurocaience/DeepFreeze | 2a8c7da7519df2bacb640917695bd7d226e8d4f4 | [
"MIT"
] | null | null | null | cnn_code/download.py | neurocaience/DeepFreeze | 2a8c7da7519df2bacb640917695bd7d226e8d4f4 | [
"MIT"
] | 1 | 2020-06-18T04:25:48.000Z | 2020-06-18T04:25:48.000Z | """=============================================================================
Download experimental directory.
============================================================================="""
import argparse
import os
# ------------------------------------------------------------------------------
def mkdir(directory):
"""Make directory if it does not exist. Void return.
"""
if not os.path.exists(directory):
os.makedirs(directory)
# ------------------------------------------------------------------------------
def download(directory):
"""Download directory and save locally.
"""
remote = '/scratch/gpfs/gwg3/fe/experiments/%s' % directory
local = '/Users/gwg/fe/experiments/'
mkdir(local)
cmd = 'rsync --progress -r ' \
'gwg3@tigergpu.princeton.edu:%s %s' % (remote, local)
os.system(cmd)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--directory', type=str, required=True)
args = p.parse_args()
download(args.directory)
| 31.971429 | 80 | 0.416443 | """=============================================================================
Download experimental directory.
============================================================================="""
import argparse
import os
# ------------------------------------------------------------------------------
def mkdir(directory):
"""Make directory if it does not exist. Void return.
"""
if not os.path.exists(directory):
os.makedirs(directory)
# ------------------------------------------------------------------------------
def download(directory):
"""Download directory and save locally.
"""
remote = '/scratch/gpfs/gwg3/fe/experiments/%s' % directory
local = '/Users/gwg/fe/experiments/'
mkdir(local)
cmd = 'rsync --progress -r ' \
'gwg3@tigergpu.princeton.edu:%s %s' % (remote, local)
os.system(cmd)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--directory', type=str, required=True)
args = p.parse_args()
download(args.directory)
| 0 | 0 | 0 |
79370707ea19958677a1687502db406a21323fe7 | 576 | py | Python | test/tablature_test.py | illume/eyestabs | 9ce717743a6a4fe7b561c68599e9352da3acf080 | [
"Unlicense"
] | null | null | null | test/tablature_test.py | illume/eyestabs | 9ce717743a6a4fe7b561c68599e9352da3acf080 | [
"Unlicense"
] | null | null | null | test/tablature_test.py | illume/eyestabs | 9ce717743a6a4fe7b561c68599e9352da3acf080 | [
"Unlicense"
] | null | null | null | ################################################################################
# User Libs
import test_utils
import test.unittest as unittest
import tablature as tab
# Std Libs
import os
################################################################################
################################################################################
if __name__ == '__main__':
unittest.main() | 27.428571 | 81 | 0.401042 | ################################################################################
# User Libs
import test_utils
import test.unittest as unittest
import tablature as tab
# Std Libs
import os
################################################################################
class TestTablature(unittest.TestCase):
def test_fixtures_work(self):
self.assert_(os.path.exists(test_utils.fixture_path('tab/tab1.txt')))
################################################################################
if __name__ == '__main__':
unittest.main() | 87 | 18 | 52 |
758fa9c537d5f3560f051905e3a75eea1d8a820b | 77 | py | Python | ofcourse/tests/__main__.py | liam-middlebrook/ofCourse | e93dc1b7fa825ad130a2b2a6eb8b5048e2c4005d | [
"Apache-2.0"
] | null | null | null | ofcourse/tests/__main__.py | liam-middlebrook/ofCourse | e93dc1b7fa825ad130a2b2a6eb8b5048e2c4005d | [
"Apache-2.0"
] | null | null | null | ofcourse/tests/__main__.py | liam-middlebrook/ofCourse | e93dc1b7fa825ad130a2b2a6eb8b5048e2c4005d | [
"Apache-2.0"
] | null | null | null | import test_yaml
import test_new
test_yaml.run_tests()
test_new.run_tests()
| 12.833333 | 21 | 0.831169 | import test_yaml
import test_new
test_yaml.run_tests()
test_new.run_tests()
| 0 | 0 | 0 |
0915537fd83ad76cc12260da6e25dbff6438a263 | 1,718 | py | Python | ietf/utils/models.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 25 | 2022-03-05T08:26:52.000Z | 2022-03-30T15:45:42.000Z | ietf/utils/models.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 219 | 2022-03-04T17:29:12.000Z | 2022-03-31T21:16:14.000Z | ietf/utils/models.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 22 | 2022-03-04T15:34:34.000Z | 2022-03-28T13:30:59.000Z | # Copyright The IETF Trust 2015-2020, All Rights Reserved
import itertools
from django.db import models
class ForeignKey(models.ForeignKey):
"A local ForeignKey proxy which provides the on_delete value required under Django 2.0."
class OneToOneField(models.OneToOneField):
"A local OneToOneField proxy which provides the on_delete value required under Django 2.0."
def object_to_dict(instance):
"""
Similar to django.forms.models.model_to_dict() but more comprehensive.
Taken from https://stackoverflow.com/questions/21925671/#answer-29088221
with a minor tweak: .id --> .pk
"""
opts = instance._meta
data = {}
for f in itertools.chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.pk for i in f.value_from_object(instance)]
return data
| 38.177778 | 95 | 0.708382 | # Copyright The IETF Trust 2015-2020, All Rights Reserved
import itertools
from django.db import models
class DumpInfo(models.Model):
date = models.DateTimeField()
host = models.CharField(max_length=128)
tz = models.CharField(max_length=32, default='UTC')
class VersionInfo(models.Model):
time = models.DateTimeField(auto_now=True)
command = models.CharField(max_length=32)
switch = models.CharField(max_length=16)
version = models.CharField(max_length=64)
used = models.BooleanField(default=True)
class Meta:
verbose_name_plural = 'VersionInfo'
class ForeignKey(models.ForeignKey):
"A local ForeignKey proxy which provides the on_delete value required under Django 2.0."
def __init__(self, to, on_delete=models.CASCADE, **kwargs):
return super(ForeignKey, self).__init__(to, on_delete=on_delete, **kwargs)
class OneToOneField(models.OneToOneField):
"A local OneToOneField proxy which provides the on_delete value required under Django 2.0."
def __init__(self, to, on_delete=models.CASCADE, **kwargs):
return super(OneToOneField, self).__init__(to, on_delete=on_delete, **kwargs)
def object_to_dict(instance):
"""
Similar to django.forms.models.model_to_dict() but more comprehensive.
Taken from https://stackoverflow.com/questions/21925671/#answer-29088221
with a minor tweak: .id --> .pk
"""
opts = instance._meta
data = {}
for f in itertools.chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.pk for i in f.value_from_object(instance)]
return data
| 245 | 451 | 102 |
0a6321c2240f9ea7f8a93b45daf5ef2dc3b73b95 | 11,227 | py | Python | tests/test_config.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | null | null | null | tests/test_config.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | null | null | null | tests/test_config.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | null | null | null | """Test config utils."""
# pylint: disable=too-many-public-methods,protected-access
import os
import tempfile
import unittest
import unittest.mock as mock
import pytest
from voluptuous import MultipleInvalid
from homeassistant.core import DOMAIN, HomeAssistantError, Config
import homeassistant.config as config_util
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME,
CONF_TIME_ZONE, CONF_ELEVATION, CONF_CUSTOMIZE, __version__,
TEMP_FAHRENHEIT)
from homeassistant.util import location as location_util, dt as dt_util
from homeassistant.helpers.entity import Entity
from tests.common import (
get_test_config_dir, get_test_home_assistant)
CONFIG_DIR = get_test_config_dir()
YAML_PATH = os.path.join(CONFIG_DIR, config_util.YAML_CONFIG_FILE)
VERSION_PATH = os.path.join(CONFIG_DIR, config_util.VERSION_FILE)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
def create_file(path):
"""Create an empty file."""
with open(path, 'w'):
pass
class TestConfig(unittest.TestCase):
"""Test the configutils."""
def tearDown(self): # pylint: disable=invalid-name
"""Clean up."""
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
if os.path.isfile(YAML_PATH):
os.remove(YAML_PATH)
if os.path.isfile(VERSION_PATH):
os.remove(VERSION_PATH)
if hasattr(self, 'hass'):
self.hass.stop()
def test_create_default_config(self):
"""Test creation of default config."""
config_util.create_default_config(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
def test_find_config_file_yaml(self):
"""Test if it finds a YAML config file."""
create_file(YAML_PATH)
self.assertEqual(YAML_PATH, config_util.find_config_file(CONFIG_DIR))
@mock.patch('builtins.print')
def test_ensure_config_exists_creates_config(self, mock_print):
"""Test that calling ensure_config_exists.
If not creates a new config file.
"""
config_util.ensure_config_exists(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
self.assertTrue(mock_print.called)
def test_ensure_config_exists_uses_existing_config(self):
"""Test that calling ensure_config_exists uses existing config."""
create_file(YAML_PATH)
config_util.ensure_config_exists(CONFIG_DIR, False)
with open(YAML_PATH) as f:
content = f.read()
# File created with create_file are empty
self.assertEqual('', content)
def test_load_yaml_config_converts_empty_files_to_dict(self):
"""Test that loading an empty file returns an empty dict."""
create_file(YAML_PATH)
self.assertIsInstance(
config_util.load_yaml_config_file(YAML_PATH), dict)
def test_load_yaml_config_raises_error_if_not_dict(self):
"""Test error raised when YAML file is not a dict."""
with open(YAML_PATH, 'w') as f:
f.write('5')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_malformed_yaml(self):
"""Test error raised if invalid YAML."""
with open(YAML_PATH, 'w') as f:
f.write(':')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_unsafe_yaml(self):
"""Test error raised if unsafe YAML."""
with open(YAML_PATH, 'w') as f:
f.write('hello: !!python/object/apply:os.system')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_preserves_key_order(self):
"""Test removal of library."""
with open(YAML_PATH, 'w') as f:
f.write('hello: 0\n')
f.write('world: 1\n')
self.assertEqual(
[('hello', 0), ('world', 1)],
list(config_util.load_yaml_config_file(YAML_PATH).items()))
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
@mock.patch('builtins.print')
def test_create_default_config_detect_location(self, mock_detect,
mock_elev, mock_print):
"""Test that detect location sets the correct config keys."""
config_util.ensure_config_exists(CONFIG_DIR)
config = config_util.load_yaml_config_file(YAML_PATH)
self.assertIn(DOMAIN, config)
ha_conf = config[DOMAIN]
expected_values = {
CONF_LATITUDE: 32.8594,
CONF_LONGITUDE: -117.2073,
CONF_ELEVATION: 101,
CONF_TEMPERATURE_UNIT: 'F',
CONF_NAME: 'Home',
CONF_TIME_ZONE: 'America/Los_Angeles'
}
assert expected_values == ha_conf
assert mock_print.called
@mock.patch('builtins.print')
def test_create_default_config_returns_none_if_write_error(self,
mock_print):
"""Test the writing of a default configuration.
Non existing folder returns None.
"""
self.assertIsNone(
config_util.create_default_config(
os.path.join(CONFIG_DIR, 'non_existing_dir/'), False))
self.assertTrue(mock_print.called)
def test_entity_customization(self):
"""Test entity customization through configuration."""
self.hass = get_test_home_assistant()
config = {CONF_LATITUDE: 50,
CONF_LONGITUDE: 50,
CONF_NAME: 'Test',
CONF_CUSTOMIZE: {'test.test': {'hidden': True}}}
config_util.process_ha_core_config(self.hass, config)
entity = Entity()
entity.entity_id = 'test.test'
entity.hass = self.hass
entity.update_ha_state()
state = self.hass.states.get('test.test')
assert state.attributes['hidden']
def test_remove_lib_on_upgrade(self):
"""Test removal of library on upgrade."""
with tempfile.TemporaryDirectory() as config_dir:
version_path = os.path.join(config_dir, '.HA_VERSION')
lib_dir = os.path.join(config_dir, 'deps')
check_file = os.path.join(lib_dir, 'check')
with open(version_path, 'wt') as outp:
outp.write('0.7.0')
os.mkdir(lib_dir)
with open(check_file, 'w'):
pass
self.hass = get_test_home_assistant()
self.hass.config.config_dir = config_dir
assert os.path.isfile(check_file)
config_util.process_ha_config_upgrade(self.hass)
assert not os.path.isfile(check_file)
def test_not_remove_lib_if_not_upgrade(self):
"""Test removal of library with no upgrade."""
with tempfile.TemporaryDirectory() as config_dir:
version_path = os.path.join(config_dir, '.HA_VERSION')
lib_dir = os.path.join(config_dir, 'deps')
check_file = os.path.join(lib_dir, 'check')
with open(version_path, 'wt') as outp:
outp.write(__version__)
os.mkdir(lib_dir)
with open(check_file, 'w'):
pass
self.hass = get_test_home_assistant()
self.hass.config.config_dir = config_dir
config_util.process_ha_config_upgrade(self.hass)
assert os.path.isfile(check_file)
def test_loading_configuration(self):
"""Test loading core config onto hass object."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {
'latitude': 60,
'longitude': 50,
'elevation': 25,
'name': 'Huis',
'temperature_unit': 'F',
'time_zone': 'America/New_York',
})
assert config.latitude == 60
assert config.longitude == 50
assert config.elevation == 25
assert config.location_name == 'Huis'
assert config.temperature_unit == TEMP_FAHRENHEIT
assert config.time_zone.zone == 'America/New_York'
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
def test_discovering_configuration(self, mock_detect, mock_elevation):
"""Test auto discovery for missing core configs."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {})
assert config.latitude == 32.8594
assert config.longitude == -117.2073
assert config.elevation == 101
assert config.location_name == 'San Diego'
assert config.temperature_unit == TEMP_FAHRENHEIT
assert config.time_zone.zone == 'America/Los_Angeles'
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=None)
@mock.patch('homeassistant.util.location.elevation', return_value=0)
def test_discovering_configuration_auto_detect_fails(self, mock_detect,
mock_elevation):
"""Test config remains unchanged if discovery fails."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {})
blankConfig = Config()
assert config.latitude == blankConfig.latitude
assert config.longitude == blankConfig.longitude
assert config.elevation == blankConfig.elevation
assert config.location_name == blankConfig.location_name
assert config.temperature_unit == blankConfig.temperature_unit
assert config.time_zone == blankConfig.time_zone
| 35.528481 | 77 | 0.625367 | """Test config utils."""
# pylint: disable=too-many-public-methods,protected-access
import os
import tempfile
import unittest
import unittest.mock as mock
import pytest
from voluptuous import MultipleInvalid
from homeassistant.core import DOMAIN, HomeAssistantError, Config
import homeassistant.config as config_util
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME,
CONF_TIME_ZONE, CONF_ELEVATION, CONF_CUSTOMIZE, __version__,
TEMP_FAHRENHEIT)
from homeassistant.util import location as location_util, dt as dt_util
from homeassistant.helpers.entity import Entity
from tests.common import (
get_test_config_dir, get_test_home_assistant)
CONFIG_DIR = get_test_config_dir()
YAML_PATH = os.path.join(CONFIG_DIR, config_util.YAML_CONFIG_FILE)
VERSION_PATH = os.path.join(CONFIG_DIR, config_util.VERSION_FILE)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
def create_file(path):
"""Create an empty file."""
with open(path, 'w'):
pass
class TestConfig(unittest.TestCase):
"""Test the configutils."""
def tearDown(self): # pylint: disable=invalid-name
"""Clean up."""
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
if os.path.isfile(YAML_PATH):
os.remove(YAML_PATH)
if os.path.isfile(VERSION_PATH):
os.remove(VERSION_PATH)
if hasattr(self, 'hass'):
self.hass.stop()
def test_create_default_config(self):
"""Test creation of default config."""
config_util.create_default_config(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
def test_find_config_file_yaml(self):
"""Test if it finds a YAML config file."""
create_file(YAML_PATH)
self.assertEqual(YAML_PATH, config_util.find_config_file(CONFIG_DIR))
@mock.patch('builtins.print')
def test_ensure_config_exists_creates_config(self, mock_print):
"""Test that calling ensure_config_exists.
If not creates a new config file.
"""
config_util.ensure_config_exists(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
self.assertTrue(mock_print.called)
def test_ensure_config_exists_uses_existing_config(self):
"""Test that calling ensure_config_exists uses existing config."""
create_file(YAML_PATH)
config_util.ensure_config_exists(CONFIG_DIR, False)
with open(YAML_PATH) as f:
content = f.read()
# File created with create_file are empty
self.assertEqual('', content)
def test_load_yaml_config_converts_empty_files_to_dict(self):
"""Test that loading an empty file returns an empty dict."""
create_file(YAML_PATH)
self.assertIsInstance(
config_util.load_yaml_config_file(YAML_PATH), dict)
def test_load_yaml_config_raises_error_if_not_dict(self):
"""Test error raised when YAML file is not a dict."""
with open(YAML_PATH, 'w') as f:
f.write('5')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_malformed_yaml(self):
"""Test error raised if invalid YAML."""
with open(YAML_PATH, 'w') as f:
f.write(':')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_unsafe_yaml(self):
"""Test error raised if unsafe YAML."""
with open(YAML_PATH, 'w') as f:
f.write('hello: !!python/object/apply:os.system')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_preserves_key_order(self):
"""Test removal of library."""
with open(YAML_PATH, 'w') as f:
f.write('hello: 0\n')
f.write('world: 1\n')
self.assertEqual(
[('hello', 0), ('world', 1)],
list(config_util.load_yaml_config_file(YAML_PATH).items()))
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
@mock.patch('builtins.print')
def test_create_default_config_detect_location(self, mock_detect,
mock_elev, mock_print):
"""Test that detect location sets the correct config keys."""
config_util.ensure_config_exists(CONFIG_DIR)
config = config_util.load_yaml_config_file(YAML_PATH)
self.assertIn(DOMAIN, config)
ha_conf = config[DOMAIN]
expected_values = {
CONF_LATITUDE: 32.8594,
CONF_LONGITUDE: -117.2073,
CONF_ELEVATION: 101,
CONF_TEMPERATURE_UNIT: 'F',
CONF_NAME: 'Home',
CONF_TIME_ZONE: 'America/Los_Angeles'
}
assert expected_values == ha_conf
assert mock_print.called
@mock.patch('builtins.print')
def test_create_default_config_returns_none_if_write_error(self,
mock_print):
"""Test the writing of a default configuration.
Non existing folder returns None.
"""
self.assertIsNone(
config_util.create_default_config(
os.path.join(CONFIG_DIR, 'non_existing_dir/'), False))
self.assertTrue(mock_print.called)
def test_core_config_schema(self):
for value in (
{'temperature_unit': 'K'},
{'time_zone': 'non-exist'},
{'latitude': '91'},
{'longitude': -181},
{'customize': 'bla'},
{'customize': {'invalid_entity_id': {}}},
{'customize': {'light.sensor': 100}},
):
with pytest.raises(MultipleInvalid):
config_util.CORE_CONFIG_SCHEMA(value)
config_util.CORE_CONFIG_SCHEMA({
'name': 'Test name',
'latitude': '-23.45',
'longitude': '123.45',
'temperature_unit': 'c',
'customize': {
'sensor.temperature': {
'hidden': True,
},
},
})
def test_entity_customization(self):
"""Test entity customization through configuration."""
self.hass = get_test_home_assistant()
config = {CONF_LATITUDE: 50,
CONF_LONGITUDE: 50,
CONF_NAME: 'Test',
CONF_CUSTOMIZE: {'test.test': {'hidden': True}}}
config_util.process_ha_core_config(self.hass, config)
entity = Entity()
entity.entity_id = 'test.test'
entity.hass = self.hass
entity.update_ha_state()
state = self.hass.states.get('test.test')
assert state.attributes['hidden']
def test_remove_lib_on_upgrade(self):
"""Test removal of library on upgrade."""
with tempfile.TemporaryDirectory() as config_dir:
version_path = os.path.join(config_dir, '.HA_VERSION')
lib_dir = os.path.join(config_dir, 'deps')
check_file = os.path.join(lib_dir, 'check')
with open(version_path, 'wt') as outp:
outp.write('0.7.0')
os.mkdir(lib_dir)
with open(check_file, 'w'):
pass
self.hass = get_test_home_assistant()
self.hass.config.config_dir = config_dir
assert os.path.isfile(check_file)
config_util.process_ha_config_upgrade(self.hass)
assert not os.path.isfile(check_file)
def test_not_remove_lib_if_not_upgrade(self):
"""Test removal of library with no upgrade."""
with tempfile.TemporaryDirectory() as config_dir:
version_path = os.path.join(config_dir, '.HA_VERSION')
lib_dir = os.path.join(config_dir, 'deps')
check_file = os.path.join(lib_dir, 'check')
with open(version_path, 'wt') as outp:
outp.write(__version__)
os.mkdir(lib_dir)
with open(check_file, 'w'):
pass
self.hass = get_test_home_assistant()
self.hass.config.config_dir = config_dir
config_util.process_ha_config_upgrade(self.hass)
assert os.path.isfile(check_file)
def test_loading_configuration(self):
"""Test loading core config onto hass object."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {
'latitude': 60,
'longitude': 50,
'elevation': 25,
'name': 'Huis',
'temperature_unit': 'F',
'time_zone': 'America/New_York',
})
assert config.latitude == 60
assert config.longitude == 50
assert config.elevation == 25
assert config.location_name == 'Huis'
assert config.temperature_unit == TEMP_FAHRENHEIT
assert config.time_zone.zone == 'America/New_York'
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
def test_discovering_configuration(self, mock_detect, mock_elevation):
"""Test auto discovery for missing core configs."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {})
assert config.latitude == 32.8594
assert config.longitude == -117.2073
assert config.elevation == 101
assert config.location_name == 'San Diego'
assert config.temperature_unit == TEMP_FAHRENHEIT
assert config.time_zone.zone == 'America/Los_Angeles'
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=None)
@mock.patch('homeassistant.util.location.elevation', return_value=0)
def test_discovering_configuration_auto_detect_fails(self, mock_detect,
mock_elevation):
"""Test config remains unchanged if discovery fails."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {})
blankConfig = Config()
assert config.latitude == blankConfig.latitude
assert config.longitude == blankConfig.longitude
assert config.elevation == blankConfig.elevation
assert config.location_name == blankConfig.location_name
assert config.temperature_unit == blankConfig.temperature_unit
assert config.time_zone == blankConfig.time_zone
| 761 | 0 | 27 |
b6635285c7d00ae82d0c261aeba71329a67efc08 | 14,586 | py | Python | ipf/glue2/compute.py | pauldalewilliams/ipf | 2bee1746d74724105a88b6b152bab4318ff32bfd | [
"Apache-2.0"
] | 1 | 2018-03-16T23:25:10.000Z | 2018-03-16T23:25:10.000Z | ipf/glue2/compute.py | pauldalewilliams/ipf | 2bee1746d74724105a88b6b152bab4318ff32bfd | [
"Apache-2.0"
] | 2 | 2020-07-26T02:42:48.000Z | 2022-03-23T16:37:49.000Z | ipf/glue2/compute.py | pauldalewilliams/ipf | 2bee1746d74724105a88b6b152bab4318ff32bfd | [
"Apache-2.0"
] | 3 | 2020-06-15T18:20:15.000Z | 2021-05-25T15:50:35.000Z |
###############################################################################
# Copyright 2011-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import json
import os
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.error import NoMoreInputsError, StepError
from ipf.sysinfo import ResourceName
from ipf.step import Step
from ipf.ipfinfo import IPFInformation, IPFInformationJson, IPFInformationTxt
from .computing_activity import ComputingActivities, ComputingActivityTeraGridXml, ComputingActivityOgfJson
from .computing_manager import ComputingManager, ComputingManagerTeraGridXml, ComputingManagerOgfJson
from .computing_manager_accel_info import ComputingManagerAcceleratorInfo, ComputingManagerAcceleratorInfoOgfJson
from .computing_service import ComputingService, ComputingServiceTeraGridXml, ComputingServiceOgfJson
from .computing_share import ComputingShares, ComputingShareTeraGridXml, ComputingShareOgfJson
from .computing_share_accel_info import ComputingShareAcceleratorInfo, ComputingShareAcceleratorInfoOgfJson
from .execution_environment import ExecutionEnvironments, ExecutionEnvironmentTeraGridXml
from .execution_environment import ExecutionEnvironmentTeraGridXml
from .execution_environment import ExecutionEnvironmentOgfJson
from .accelerator_environment import AcceleratorEnvironments
from .accelerator_environment import AcceleratorEnvironmentsOgfJson
from .accelerator_environment import AcceleratorEnvironment
from .accelerator_environment import AcceleratorEnvironmentOgfJson
from .location import Location, LocationOgfJson, LocationTeraGridXml
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
| 44.066465 | 189 | 0.593857 |
###############################################################################
# Copyright 2011-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import json
import os
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.error import NoMoreInputsError, StepError
from ipf.sysinfo import ResourceName
from ipf.step import Step
from ipf.ipfinfo import IPFInformation, IPFInformationJson, IPFInformationTxt
from .computing_activity import ComputingActivities, ComputingActivityTeraGridXml, ComputingActivityOgfJson
from .computing_manager import ComputingManager, ComputingManagerTeraGridXml, ComputingManagerOgfJson
from .computing_manager_accel_info import ComputingManagerAcceleratorInfo, ComputingManagerAcceleratorInfoOgfJson
from .computing_service import ComputingService, ComputingServiceTeraGridXml, ComputingServiceOgfJson
from .computing_share import ComputingShares, ComputingShareTeraGridXml, ComputingShareOgfJson
from .computing_share_accel_info import ComputingShareAcceleratorInfo, ComputingShareAcceleratorInfoOgfJson
from .execution_environment import ExecutionEnvironments, ExecutionEnvironmentTeraGridXml
from .execution_environment import ExecutionEnvironmentTeraGridXml
from .execution_environment import ExecutionEnvironmentOgfJson
from .accelerator_environment import AcceleratorEnvironments
from .accelerator_environment import AcceleratorEnvironmentsOgfJson
from .accelerator_environment import AcceleratorEnvironment
from .accelerator_environment import AcceleratorEnvironmentOgfJson
from .location import Location, LocationOgfJson, LocationTeraGridXml
#######################################################################################################################
class PublicStep(Step):
def __init__(self):
Step.__init__(self)
self.description = "creates a single data containing all nonsensitive compute-related information"
self.time_out = 5
self.requires = [IPFInformation, ResourceName, Location,
ComputingService, ComputingShares, ComputingManager, ExecutionEnvironments, AcceleratorEnvironments, ComputingManagerAcceleratorInfo, ComputingShareAcceleratorInfo]
self.produces = [Public]
def run(self):
public = Public()
public.resource_name = self._getInput(ResourceName).resource_name
public.ipfinfo = [self._getInput(IPFInformation)]
# the old TeraGridXML wants a site_name, so just derive it
public.site_name = public.resource_name[public.resource_name.find(
".")+1:]
public.location = [self._getInput(Location)]
public.service = [self._getInput(ComputingService)]
public.share = self._getInput(ComputingShares).shares
public.manager = [self._getInput(ComputingManager)]
public.manager_accel_info = [
self._getInput(ComputingManagerAcceleratorInfo)]
public.share_accel_info = [
self._getInput(ComputingShareAcceleratorInfo)]
public.environment = self._getInput(ExecutionEnvironments).exec_envs
public.accelenvironment = self._getInput(
AcceleratorEnvironments).accel_envs
public.id = public.resource_name
self._output(public)
#######################################################################################################################
class Public(Data):
def __init__(self):
Data.__init__(self)
self.ipfinfo = []
self.location = []
self.service = []
self.share = []
self.manager = []
self.environment = []
self.accelenvironment = []
def fromJson(self, doc):
self.ipfinfo = []
for idoc in doc.get("Ipfinfo", []):
self.ipfinfo.append(ipfinfo().fromJson(idoc))
self.location = []
for ldoc in doc.get("Location", []):
self.location.append(Location().fromJson(ldoc))
self.service = []
for sdoc in doc.get("ComputingService"):
self.service.append(ComputingService().fromJson(sdoc))
self.share = []
for sdoc in doc.get("ComputingShare", []):
self.share.append(ComputingShare().fromJson(sdoc))
self.manager = []
for mdoc in doc.get("ComputingManager"):
self.manager.append(ComputingManager().fromJson(mdoc))
self.environment = []
for edoc in doc.get("ExecutionEnvironment", []):
self.environment.append(ExecutionEnvironment().fromJson(edoc))
self.accleenvironment = []
for edoc in doc.get("AcceleratorEnvironment", []):
self.environment.append(AcceleratorEnvironment().fromJson(edoc))
#######################################################################################################################
class PublicTeraGridXml(Representation):
data_cls = Public
def __init__(self, data):
Representation.__init__(self, Representation.MIME_TEXT_XML, data)
def get(self):
return self.toDom().toxml()
def toDom(self):
doc = getDOMImplementation().createDocument("http://info.teragrid.org/2009/03/ctss",
"V4glue2RP", None)
# hack - minidom doesn't output name spaces
doc.documentElement.setAttribute(
"xmlns", "http://info.teragrid.org/2009/03/ctss")
glue2 = doc.createElementNS(
"http://info.teragrid.org/glue/2009/02/spec_2.0_r02", "glue2")
doc.documentElement.appendChild(glue2)
# WS-MDS doesn't want a namespace on glue2
# setAttribute("xmlns","http://info.teragrid.org/glue/2009/02/spec_2.0_r02")
setAttribute("Timestamp", dateTimeToText(
self.data.manager[0].CreationTime))
setAttribute("UniqueID", ""+self.data.resource_name)
resource = doc.createElement("ResourceID")
resource.appendChild(doc.createTextNode(self.data.resource_name))
appendChild(resource)
site = doc.createElement("SiteID")
site.appendChild(doc.createTextNode(self.data.site_name))
appendChild(site)
entities = doc.createElement("Entities")
appendChild(entities)
for location in self.data.location:
entities.appendChild(LocationTeraGridXml(
location).toDom().documentElement.firstChild)
for service in self.data.service:
entities.appendChild(ComputingServiceTeraGridXml(
service).toDom().documentElement.firstChild)
for share in self.data.share:
entities.appendChild(ComputingShareTeraGridXml(
share).toDom().documentElement.firstChild)
for manager in self.data.manager:
entities.appendChild(ComputingManagerTeraGridXml(
manager).toDom().documentElement.firstChild)
for environment in self.data.environment:
entities.appendChild(ExecutionEnvironmentTeraGridXml(
environment).toDom().documentElement.firstChild)
return doc
#######################################################################################################################
class PublicOgfJson(Representation):
data_cls = Public
def __init__(self, data):
Representation.__init__(
self, Representation.MIME_APPLICATION_JSON, data)
def get(self):
return json.dumps(self.toJson(), indent=4)
def toJson(self):
doc = {}
if self.data.ipfinfo is not None:
doc["PublisherInfo"] = [IPFInformationJson(
ipfinfo).toJson() for ipfinfo in self.data.ipfinfo]
if len(self.data.location) > 0:
doc["Location"] = [LocationOgfJson(
location).toJson() for location in self.data.location]
if self.data.service is not None:
doc["ComputingService"] = [ComputingServiceOgfJson(
service).toJson() for service in self.data.service]
if len(self.data.share) > 0:
doc["ComputingShare"] = [ComputingShareOgfJson(
share).toJson() for share in self.data.share]
if len(self.data.share_accel_info) > 0:
csai = [ComputingShareAcceleratorInfoOgfJson(
exec_env).toJson() for exec_env in self.data.share_accel_info]
csaii = list([_f for _f in csai if _f])
if len(csaii) > 0:
doc["ComputingShareAcceleratorInfo"] = csaii
if len(self.data.manager) > 0:
doc["ComputingManager"] = [ComputingManagerOgfJson(
manager).toJson() for manager in self.data.manager]
if len(self.data.environment) > 0:
doc["ExecutionEnvironment"] = [ExecutionEnvironmentOgfJson(
exec_env).toJson() for exec_env in self.data.environment]
if self.data.accelenvironment:
if len(self.data.accelenvironment) > 0:
doc["AcceleratorEnvironment"] = [AcceleratorEnvironmentOgfJson(
exec_env).toJson() for exec_env in self.data.accelenvironment]
if len(self.data.manager_accel_info) > 0:
cmai = [ComputingManagerAcceleratorInfoOgfJson(
exec_env).toJson() for exec_env in self.data.manager_accel_info]
cmaii = list([_f for _f in cmai if _f])
if len(cmaii) > 0:
doc["ComputingManagerAcceleratorInfo"] = cmaii
return doc
#######################################################################################################################
class PrivateStep(Step):
def __init__(self):
Step.__init__(self)
self.description = "creates a single data containing all sensitive compute-related information"
self.time_out = 5
self.requires = [IPFInformation, ResourceName, ComputingActivities]
self.produces = [Private]
def run(self):
private = Private()
private.ipfinfo = [self._getInput(IPFInformation)]
private.resource_name = self._getInput(ResourceName).resource_name
# the old TeraGridXML wants a site_name, so just derive it
private.site_name = private.resource_name[private.resource_name.find(
".")+1:]
private.activity = self._getInput(ComputingActivities).activities
private.id = private.resource_name
self._output(private)
#######################################################################################################################
class Private(Data):
def __init__(self):
Data.__init__(self)
self.activity = []
def fromJson(self, doc):
self.activity = []
for adoc in doc.get("ComputingActivity", []):
self.location.append(ComputingActivity().fromJson(adoc))
#######################################################################################################################
class PrivateTeraGridXml(Representation):
data_cls = Private
def __init__(self, data):
Representation.__init__(self, Representation.MIME_TEXT_XML, data)
def get(self):
return self.toDom().toxml()
def toDom(self):
doc = getDOMImplementation().createDocument("http://info.teragrid.org/2009/03/ctss",
"V4glue2RP", None)
# hack - minidom doesn't output name spaces
doc.documentElement.setAttribute(
"xmlns", "http://info.teragrid.org/2009/03/ctss")
glue2 = doc.createElementNS(
"http://info.teragrid.org/glue/2009/02/spec_2.0_r02", "glue2")
doc.documentElement.appendChild(glue2)
# WS-MDS doesn't want a namespace on glue2
# setAttribute("xmlns","http://info.teragrid.org/glue/2009/02/spec_2.0_r02")
if len(self.data.activity) > 0:
setAttribute("Timestamp", dateTimeToText(
self.data.activity[0].CreationTime))
else:
setAttribute("Timestamp", dateTimeToText(
datetime.datetime.now(tzoffset(0))))
setAttribute("UniqueID", ""+self.data.resource_name)
resource = doc.createElement("ResourceID")
resource.appendChild(doc.createTextNode(self.data.resource_name))
appendChild(resource)
site = doc.createElement("SiteID")
site.appendChild(doc.createTextNode(self.data.site_name))
appendChild(site)
entities = doc.createElement("Entities")
appendChild(entities)
for activity in self.data.activity:
entities.appendChild(ComputingActivityTeraGridXml(
activity).toDom().documentElement.firstChild)
return doc
#######################################################################################################################
class PrivateOgfJson(Representation):
data_cls = Private
def __init__(self, data):
Representation.__init__(
self, Representation.MIME_APPLICATION_JSON, data)
def get(self):
return json.dumps(self.toJson(), indent=4)
def toJson(self):
doc = {}
if len(self.data.activity) > 0:
doc["ComputingActivity"] = [ComputingActivityOgfJson(
activity).toJson() for activity in self.data.activity]
doc["PublisherInfo"] = [IPFInformationJson(
ipfinfo).toJson() for ipfinfo in self.data.ipfinfo]
return doc
#######################################################################################################################
| 9,904 | 486 | 396 |
7268716acc82c2e94387b7f98b37eb3a235bba97 | 488 | py | Python | src/azure_keyvault_browser/widgets/__init__.py | samdobson/azure-keyvault-browser | 7e7200dad34f668e477229fe3698e59195b68a78 | [
"MIT"
] | 5 | 2021-12-17T00:18:44.000Z | 2021-12-29T05:18:47.000Z | src/azure_keyvault_browser/widgets/__init__.py | samdobson/azure-keyvault-browser | 7e7200dad34f668e477229fe3698e59195b68a78 | [
"MIT"
] | 6 | 2021-12-20T17:57:21.000Z | 2021-12-29T10:29:04.000Z | src/azure_keyvault_browser/widgets/__init__.py | samdobson/azure-keyvault-browser | 7e7200dad34f668e477229fe3698e59195b68a78 | [
"MIT"
] | 1 | 2021-12-20T15:06:03.000Z | 2021-12-20T15:06:03.000Z | from .filter import FilterWidget
from .flash import FlashWidget, ShowFlashNotification
from .header import HeaderWidget
from .help import HelpWidget
from .secret_properties import SecretPropertiesWidget
from .secret_versions import SecretVersionsWidget
from .secrets import SecretsWidget
__all__ = (
"SecretsWidget",
"ShowFlashNotification",
"FilterWidget",
"FlashWidget",
"HeaderWidget",
"SecretVersionsWidget",
"SecretPropertiesWidget",
"HelpWidget",
)
| 25.684211 | 53 | 0.776639 | from .filter import FilterWidget
from .flash import FlashWidget, ShowFlashNotification
from .header import HeaderWidget
from .help import HelpWidget
from .secret_properties import SecretPropertiesWidget
from .secret_versions import SecretVersionsWidget
from .secrets import SecretsWidget
__all__ = (
"SecretsWidget",
"ShowFlashNotification",
"FilterWidget",
"FlashWidget",
"HeaderWidget",
"SecretVersionsWidget",
"SecretPropertiesWidget",
"HelpWidget",
)
| 0 | 0 | 0 |
97cd577d5266057515b84c623a4e2cb5632a6417 | 382 | py | Python | Section 18/4.Document-scope-of-the-variables.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | 18 | 2020-04-13T03:14:06.000Z | 2022-03-09T18:54:41.000Z | Section 18/4.Document-scope-of-the-variables.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | null | null | null | Section 18/4.Document-scope-of-the-variables.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | 22 | 2020-04-29T21:12:42.000Z | 2022-03-17T18:19:54.000Z |
main()
| 11.235294 | 32 | 0.599476 | def myfunction1():
x=60 #This is local variable
print("Welcome to functions")
print("x value from fun1: ",x)
#myfunction2()
return None
def myfunction2(y): #Parameter
print("Thank you!!")
print("x value from fun2: ",y)
return None
def main():
#global x
x=10
myfunction1()
myfunction2(x) #Argument
return None
main()
| 273 | 0 | 73 |
ea4e6724ee9153b7da1be30b271755785ac1a14b | 3,196 | py | Python | pycausal_explorer/meta/_xlearner.py | gotolino/pycausal-explorer | 250309674c0657b9ccd318aea0893827da1badfe | [
"MIT"
] | 3 | 2022-01-28T12:32:43.000Z | 2022-02-12T23:26:52.000Z | pycausal_explorer/meta/_xlearner.py | gotolino/pycausal-explorer | 250309674c0657b9ccd318aea0893827da1badfe | [
"MIT"
] | 8 | 2022-02-06T19:34:47.000Z | 2022-03-11T17:24:23.000Z | pycausal_explorer/meta/_xlearner.py | gotolino/pycausal-explorer | 250309674c0657b9ccd318aea0893827da1badfe | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.base import clone
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted, check_X_y
from pycausal_explorer.base import BaseCausalModel
from ..reweight import PropensityScore
class XLearner(BaseCausalModel):
"""
Implementation of the X-learner.
It consists of estimating heterogeneous treatment effect using four machine learning models.
Details of X-learner theory are available at Kunzel et al. (2018) (https://arxiv.org/abs/1706.03461).
Parameters
----------
learner: base learner to use in all models. Either leaner or (u0, u1, te_u0, te_u1) must be filled
u0: model used to estimate outcome in the control group
u1: model used to estimate outcome in the treatment group
te_u0: model used to estimate treatment effect in the control group
te_u1: model used to estimate treatment effect in the treatment group group
random_state: random state
"""
| 31.96 | 105 | 0.604506 | import numpy as np
from sklearn.base import clone
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted, check_X_y
from pycausal_explorer.base import BaseCausalModel
from ..reweight import PropensityScore
class XLearner(BaseCausalModel):
"""
Implementation of the X-learner.
It consists of estimating heterogeneous treatment effect using four machine learning models.
Details of X-learner theory are available at Kunzel et al. (2018) (https://arxiv.org/abs/1706.03461).
Parameters
----------
learner: base learner to use in all models. Either leaner or (u0, u1, te_u0, te_u1) must be filled
u0: model used to estimate outcome in the control group
u1: model used to estimate outcome in the treatment group
te_u0: model used to estimate treatment effect in the control group
te_u1: model used to estimate treatment effect in the treatment group group
random_state: random state
"""
def __init__(
self,
learner=RandomForestRegressor(),
u0=None,
u1=None,
te_u0=None,
te_u1=None,
random_state=42,
):
self.learner = learner
if learner is not None and all(
[model is None for model in [u0, u1, te_u0, te_u1]]
):
self.u0 = clone(learner)
self.u1 = clone(learner)
self.te_u0 = clone(learner)
self.te_u1 = clone(learner)
elif learner is None and all(
[model is not None for model in [u0, u1, te_u0, te_u1]]
):
self.u0 = clone(u0)
self.u1 = clone(u1)
self.te_u0 = clone(te_u0)
self.te_u1 = clone(te_u1)
else:
raise ValueError("Either learner or (u0, u1, te_u0, te_u1) must be passed")
self._estimator_type = self.u0._estimator_type
self.g = PropensityScore()
self.random_state = random_state
def fit(self, X, y, *, treatment):
X, y = check_X_y(X, y)
X, w = check_X_y(X, treatment)
self.g.fit(X, w)
X_treat = X[w == 1].copy()
X_control = X[w == 0].copy()
y1 = y[w == 1].copy()
y0 = y[w == 0].copy()
self.u0 = self.u0.fit(X_control, y0)
self.u1 = self.u1.fit(X_treat, y1)
y1_pred = self.u1.predict(X_control)
y0_pred = self.u0.predict(X_treat)
te_imp_control = y1_pred - y0
te_imp_treat = y1 - y0_pred
self.te_u0 = self.te_u0.fit(X_control, te_imp_control)
self.te_u1 = self.te_u1.fit(X_treat, te_imp_treat)
self.is_fitted_ = True
return self
def predict(self, X, w):
check_is_fitted(self)
predictions = np.empty(shape=[X.shape[0], 1])
if 1 in w:
predictions[w == 1] = self.u1.predict(X[w == 1]).reshape(-1, 1)
if 0 in w:
predictions[w == 0] = self.u0.predict(X[w == 0]).reshape(-1, 1)
return predictions
def predict_ite(self, X):
check_is_fitted(self)
g_x = self.g.predict_proba(X)[:, 1]
result = g_x * self.te_u0.predict(X) + (1 - g_x) * self.te_u1.predict(X)
return result
| 2,099 | 0 | 108 |
cbbef13972885989e5977242841f369812ccf86f | 5,437 | py | Python | tests/fixtures/test_funding_awards_json/content_05_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-04-16T08:13:31.000Z | 2020-05-18T14:03:06.000Z | tests/fixtures/test_funding_awards_json/content_05_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 310 | 2015-02-11T00:30:09.000Z | 2021-07-14T23:58:50.000Z | tests/fixtures/test_funding_awards_json/content_05_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-02-04T01:21:28.000Z | 2021-06-15T12:50:47.000Z | # coding=utf-8
from collections import OrderedDict
expected = [
OrderedDict(
[
("id", u"par-1"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100006978"),
(
"name",
[
u"University of California Berkeley (University of California, Berkeley)"
],
),
]
),
),
("awardId", u"AWS in Education grant"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Eric Jonas"),
("index", u"Jonas, Eric"),
]
),
),
]
)
],
),
]
),
OrderedDict(
[
("id", u"par-2"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000001"),
("name", [u"National Science Foundation"]),
]
),
),
("awardId", u"NSF CISE Expeditions Award CCF-1139158"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-3"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100006235"),
("name", [u"Lawrence Berkely National Laboratory"]),
]
),
),
("awardId", u"Award 7076018"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-4"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000185"),
("name", [u"Defense Advanced Research Projects Agency"]),
]
),
),
("awardId", u"XData Award FA8750-12-2-0331"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-5"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000002"),
("name", [u"National Institutes of Health"]),
]
),
),
("awardId", u"R01NS074044"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Konrad Kording"),
("index", u"Kording, Konrad"),
]
),
),
]
)
],
),
]
),
OrderedDict(
[
("id", u"par-6"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000002"),
("name", [u"National Institutes of Health"]),
]
),
),
("awardId", u"R01NS063399"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Konrad Kording"),
("index", u"Kording, Konrad"),
]
),
),
]
)
],
),
]
),
]
| 29.389189 | 105 | 0.22586 | # coding=utf-8
from collections import OrderedDict
expected = [
OrderedDict(
[
("id", u"par-1"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100006978"),
(
"name",
[
u"University of California Berkeley (University of California, Berkeley)"
],
),
]
),
),
("awardId", u"AWS in Education grant"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Eric Jonas"),
("index", u"Jonas, Eric"),
]
),
),
]
)
],
),
]
),
OrderedDict(
[
("id", u"par-2"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000001"),
("name", [u"National Science Foundation"]),
]
),
),
("awardId", u"NSF CISE Expeditions Award CCF-1139158"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-3"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100006235"),
("name", [u"Lawrence Berkely National Laboratory"]),
]
),
),
("awardId", u"Award 7076018"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-4"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000185"),
("name", [u"Defense Advanced Research Projects Agency"]),
]
),
),
("awardId", u"XData Award FA8750-12-2-0331"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-5"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000002"),
("name", [u"National Institutes of Health"]),
]
),
),
("awardId", u"R01NS074044"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Konrad Kording"),
("index", u"Kording, Konrad"),
]
),
),
]
)
],
),
]
),
OrderedDict(
[
("id", u"par-6"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000002"),
("name", [u"National Institutes of Health"]),
]
),
),
("awardId", u"R01NS063399"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Konrad Kording"),
("index", u"Kording, Konrad"),
]
),
),
]
)
],
),
]
),
]
| 0 | 0 | 0 |
1859eec29c6660f3e68c133f3169370664e0a82a | 114 | py | Python | apps/web/admin.py | fabioanderegg/code_annotate | 671c5f2b1eee30dffb85e58ce961e18d3344bc94 | [
"MIT"
] | null | null | null | apps/web/admin.py | fabioanderegg/code_annotate | 671c5f2b1eee30dffb85e58ce961e18d3344bc94 | [
"MIT"
] | null | null | null | apps/web/admin.py | fabioanderegg/code_annotate | 671c5f2b1eee30dffb85e58ce961e18d3344bc94 | [
"MIT"
] | null | null | null | from django.contrib import admin
from apps.web.models import CodeAnnotation
admin.site.register(CodeAnnotation)
| 19 | 42 | 0.842105 | from django.contrib import admin
from apps.web.models import CodeAnnotation
admin.site.register(CodeAnnotation)
| 0 | 0 | 0 |
d1a1ee0a1ae8da5ce6bbf899e475145bdd0f5451 | 2,011 | py | Python | PuppeteerLibrary/keywords/mockresponse.py | sdvicorp/robotframework-puppeteer | af6fa68b04c3cdac3a7662cffda6da2a5ace38d1 | [
"Apache-2.0"
] | 37 | 2019-10-28T01:35:43.000Z | 2022-03-31T04:11:49.000Z | PuppeteerLibrary/keywords/mockresponse.py | sdvicorp/robotframework-puppeteer | af6fa68b04c3cdac3a7662cffda6da2a5ace38d1 | [
"Apache-2.0"
] | 61 | 2020-07-16T00:18:22.000Z | 2022-03-24T07:12:05.000Z | PuppeteerLibrary/keywords/mockresponse.py | sdvicorp/robotframework-puppeteer | af6fa68b04c3cdac3a7662cffda6da2a5ace38d1 | [
"Apache-2.0"
] | 10 | 2020-03-03T05:28:05.000Z | 2022-02-14T10:03:44.000Z | from PuppeteerLibrary.ikeywords.imockresponse_async import iMockResponseAsync
from PuppeteerLibrary.base.robotlibcore import keyword
from PuppeteerLibrary.base.librarycomponent import LibraryComponent
| 40.22 | 140 | 0.611636 | from PuppeteerLibrary.ikeywords.imockresponse_async import iMockResponseAsync
from PuppeteerLibrary.base.robotlibcore import keyword
from PuppeteerLibrary.base.librarycomponent import LibraryComponent
class MockResponseKeywords(LibraryComponent):
def __init__(self, ctx):
super().__init__(ctx)
def get_async_keyword_group(self) -> iMockResponseAsync:
return self.ctx.get_current_library_context().get_async_keyword_group(type(self).__name__)
@keyword
def mock_current_page_api_response(self, url, mock_response, method='GET', body=None):
"""
Mock current page api response.
The ``mock_response`` is a dictionary which can have the following fields:
- ``status`` (int): Response status code, defaults to 200.
- ``headers`` (dict): Optional response headers.
- ``contentType`` (str): If set, equals to setting ``Content-Type`` response header.
- ``body`` (str|bytes): Optional response body.
The ``url`` is request url. url can be partial url match using regexp
Match Options:
| Options | Url value |
| Exact match | ^http://127.0.0.1:7272/ajax_info.json\\?count=3$ |
| Partial match | /ajax_info.json\\?count=3 |
| Regular expression | .*?/ajax_info.json\\?count=3 |
The ``method`` is HTTP Request Methods:
- GET (default)
- POST
- PUT
- HEAD
- DELETE
- PATCH
The ``body`` is request body message. body can match using regexp
Example:
| &{response} | Create Dictionary | body=I'm a mock response |
| Mock Current Page Api Response | /ajax_info.json\\?count=3 | ${response} |
"""
return self.loop.run_until_complete(self.get_async_keyword_group().mock_current_page_api_response(url, mock_response, method, body))
| 167 | 1,619 | 23 |
a29fc96079d39943724101eb7ecfb452bcb65d11 | 254 | py | Python | splintr/__init__.py | shreykshah/splintr | 1fc2580606c1ccfe36ad13be68794e69c450ed05 | [
"Apache-2.0"
] | 2 | 2021-01-18T07:12:28.000Z | 2021-01-18T07:12:43.000Z | splintr/__init__.py | vsrin1/splintr | 218a268dd8cc3aa02e1adc69ab556922f6e01a11 | [
"Apache-2.0"
] | null | null | null | splintr/__init__.py | vsrin1/splintr | 218a268dd8cc3aa02e1adc69ab556922f6e01a11 | [
"Apache-2.0"
] | 2 | 2020-07-18T15:38:19.000Z | 2020-07-18T20:35:10.000Z | __all__ = ['DataParallel', 'ModelParallel', 'benchmarks', 'dataparallel', 'modelparallel']
from .DataParallel import DataParallel
from .ModelParallel import ModelParallel
import splintr.benchmarks
import splintr.dataparallel
import splintr.modelparallel
| 36.285714 | 90 | 0.830709 | __all__ = ['DataParallel', 'ModelParallel', 'benchmarks', 'dataparallel', 'modelparallel']
from .DataParallel import DataParallel
from .ModelParallel import ModelParallel
import splintr.benchmarks
import splintr.dataparallel
import splintr.modelparallel
| 0 | 0 | 0 |
7fca4dacf6508d4beaf221a6e17c5e956d2bb365 | 141 | py | Python | test/__init__.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | test/__init__.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | test/__init__.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | #!/usr/bin/env/python3
# -*- coding:utf-8 -*-
"""
@project: apiAutoTest
@author: cjw
@file: __init__.py.py
@ide: PyCharm
@time: 2020/7/31
""" | 15.666667 | 22 | 0.638298 | #!/usr/bin/env/python3
# -*- coding:utf-8 -*-
"""
@project: apiAutoTest
@author: cjw
@file: __init__.py.py
@ide: PyCharm
@time: 2020/7/31
""" | 0 | 0 | 0 |
9a7b5fda502bc31c6581fe21da183c411caabf7c | 8,331 | py | Python | purge-user.py | appaegis/api-script-samples | f5445b351411fe858e2130e47b28befccc6262e8 | [
"MIT"
] | null | null | null | purge-user.py | appaegis/api-script-samples | f5445b351411fe858e2130e47b28befccc6262e8 | [
"MIT"
] | null | null | null | purge-user.py | appaegis/api-script-samples | f5445b351411fe858e2130e47b28befccc6262e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import logging
import argparse
import pydash
from lib.common import USER_EMAIL
from lib.common import API_KEY
from lib.common import API_SECRET
from lib.common import USER_API
from lib.common import TEAM_API
from lib.common import ROLE_API
from lib.common import POLICY_API
from lib.common import APP_API
from lib.common import getToken
from lib.common import booleanString
from lib.purge import getResource
from lib.purge import getResources
from lib.purge import updateResource
from lib.purge import purgeResource
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remove existing user and associated objects')
parser.add_argument('--dryrun', dest='dryrun', type=booleanString, default=True,
required=True, help='In dryrun mode, no objects will be deleted')
parser.add_argument('--debug', dest='debug', type=booleanString, default=False,
required=False, help='Output verbose log')
args = parser.parse_args()
main(vars(args))
| 42.28934 | 121 | 0.723443 | #!/usr/bin/env python
# coding: utf-8
import logging
import argparse
import pydash
from lib.common import USER_EMAIL
from lib.common import API_KEY
from lib.common import API_SECRET
from lib.common import USER_API
from lib.common import TEAM_API
from lib.common import ROLE_API
from lib.common import POLICY_API
from lib.common import APP_API
from lib.common import getToken
from lib.common import booleanString
from lib.purge import getResource
from lib.purge import getResources
from lib.purge import updateResource
from lib.purge import purgeResource
def main(argsdict):
dryrun = argsdict.get('dryrun')
debug = argsdict.get('debug')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
userId = USER_EMAIL
logging.warning(f'Remove user: {userId}, Dryrun: {dryrun}')
idToken = getToken(apiSecret=API_SECRET, apiKey=API_KEY)
user = getResource(id=userId, idToken=idToken, url=USER_API)
# TODO: check the team contains only this user
# also need to skip "groups"
teamIds = user.get('teamIds', [])
accessRoleIds = user.get('accessRoleIds')
# NOTE: Fetch all apps and related policyId
policyAppMapper = {}
apps = getResources(idToken=idToken, url=APP_API)
for app in apps:
appId = app.get('id')
policyId = app.get('policyId')
# policy exists
if bool(policyId):
appIds = pydash.get(policyAppMapper, f'{policyId}.appId', [])
appIds.append(appId)
pydash.set_(policyAppMapper, f'{policyId}.appId', appIds)
# NOTE: Check each policy if it's deletable or not. It only handles ruleRoleLink except Role
policyIds = list(policyAppMapper.keys())
for policyId in policyIds:
policyRoleIds = []
policy = getResource(id=policyId, idToken=idToken, url=POLICY_API)
rules = pydash.objects.get(policy, 'rules')
for rule in rules:
roleIds = pydash.objects.get(rule, 'accessRoleIds')
policyRoleIds.append(roleIds)
policyRoleIds = pydash.flatten_deep(policyRoleIds)
# NOTE: In case the policyRoleIds is totally equal with userRoleIds, we will delete it.
if set(policyRoleIds) <= set(accessRoleIds):
pydash.set_(policyAppMapper, f'{policyId}.deletable', True)
else:
pydash.set_(policyAppMapper, f'{policyId}.deletable', False)
deletablePolicyMapper = pydash.pick_by(policyAppMapper, lambda item: pydash.get(item, 'deletable') == True)
deletablePolicyIds = list(deletablePolicyMapper.keys())
deletableAppIds = pydash.flatten_deep([pydash.get(deletablePolicyMapper, f'{i}.appId') for i in deletablePolicyMapper])
# NOTE: delete app if its policy will be deleted.
for appId in deletableAppIds:
purgeResource(dryrun, id=appId, idToken=idToken, url=APP_API)
# NOTE: delete policy something like policyEntry, policyRole relationship and ruleEntry
for policyId in deletablePolicyIds:
purgeResource(dryrun, id=policyId, idToken=idToken, url=POLICY_API)
# NOTE: remove relationship something like userTeamLink, userRoleLink, teamRoleLink.
for teamId in teamIds:
purgeResource(dryrun, id=teamId, idToken=idToken, url=f'{TEAM_API}/{teamId}/users/', data=[userId])
for roleId in accessRoleIds:
purgeResource(dryrun, id=roleId, idToken=idToken, url=f'{ROLE_API}/{roleId}/users/', data=[userId])
purgeResource(dryrun, id=roleId, idToken=idToken, url=f'{ROLE_API}/{roleId}/teams/', data=teamIds)
# NOTE: remove teams
deletableTeamIds = []
for teamId in teamIds:
team = getResource(id=teamId, idToken=idToken, url=TEAM_API)
teamEmails = pydash.get(team, 'emails')
teamRoleIds = pydash.get(team, 'accessRoleIds')
# NOTE: check the role contains only this user and teams
if len(set(teamEmails) - set([userId])) == 0 and len(set(teamRoleIds) - set(accessRoleIds)) == 0:
deletableTeamIds.append(teamId)
# NOTE: remove roles
deletableRoleIds = []
for roleId in accessRoleIds:
role = getResource(id=roleId, idToken=idToken, url=ROLE_API)
roleEmails = pydash.get(role, 'emails')
roleTeamIds = pydash.get(role, 'teamIds')
# NOTE: check the role contains only this user and teams
if len(set(roleEmails) - set([userId])) == 0 and len(set(roleTeamIds) - set(teamIds)) == 0:
deletableRoleIds.append(roleId)
for teamId in deletableTeamIds:
purgeResource(dryrun, id=teamId, idToken=idToken, url=TEAM_API)
for roleId in deletableRoleIds:
purgeResource(dryrun, id=roleId, idToken=idToken, url=ROLE_API)
# NOTE: handle orphan policy once app was deleted before
updatablePolicyDataSet = {}
deletablePolicyIds = {}
policies = getResources(idToken=idToken, url=POLICY_API)
for policy in policies:
policyId = policy.get('id')
policyRoleIds = []
rules = pydash.objects.get(policy, 'rules')
for ruleIdx, rule in enumerate(rules):
ruleRoleIds = rule.get('accessRoleIds')
policyRoleIds.append(ruleRoleIds)
# NOTE: Handle the detail Configure policy
remainingRuleRoleIds = set(ruleRoleIds) - set(accessRoleIds)
remainingRuleRoleIds = list(remainingRuleRoleIds)
if len(remainingRuleRoleIds) > 0 and len(ruleRoleIds) != len(remainingRuleRoleIds):
newPolicy = pydash.get(updatablePolicyDataSet, policyId, pydash.clone_deep(policy))
pydash.set_(newPolicy, f'rules.{ruleIdx}.accessRoleIds', remainingRuleRoleIds)
pydash.set_(updatablePolicyDataSet, policyId, newPolicy)
elif len(remainingRuleRoleIds) == 0:
newPolicy = pydash.get(updatablePolicyDataSet, policyId, pydash.clone_deep(policy))
pydash.set_(newPolicy, f'rules.{ruleIdx}.accessRoleIds', [])
pydash.set_(updatablePolicyDataSet, policyId, newPolicy)
policyRoleIds = pydash.flatten_deep(policyRoleIds)
# NOTE: In case the policyRoleIds is totally equal with userRoleIds, we will delete it.
if set(policyRoleIds) <= set(accessRoleIds):
pydash.set_(deletablePolicyIds, policyId, policy)
elif len(policyRoleIds) == 0:
# NOTE: Relationship was removed previously
pydash.set_(deletablePolicyIds, policyId, policy)
# NOTE: Handle Configure policy
for policyId in updatablePolicyDataSet:
policy = pydash.get(updatablePolicyDataSet, policyId)
if pydash.get(deletablePolicyIds, policyId, None) != None:
continue
rules = policy.get('rules', [])
newRules = [rule for rule in rules if len(rule.get('accessRoleIds', [])) > 0]
pydash.set_(policy, 'rules', newRules)
updateResource(dryrun, id=policyId, idToken=idToken, url=POLICY_API, data = policy)
for policyId in deletablePolicyIds:
purgeResource(dryrun, id=policyId, idToken=idToken, url=POLICY_API)
# NOTE: handle orphan team once app was deleted before
orphanTeamIds = []
teams = getResources(idToken=idToken, url=TEAM_API)
for team in teams:
teamId = team.get('id')
teamEmails = pydash.get(team, 'emails')
if teamEmails == [userId]:
# NOTE: Other case will be hanlded by user deleting
orphanTeamIds.append(teamId)
for teamId in orphanTeamIds:
purgeResource(dryrun, id=teamId, idToken=idToken, url=f'{TEAM_API}/{teamId}/users/', data=[userId])
# NOTE: handle orphan role once app was deleted before
orphanRoleIds = []
roles = getResources(idToken=idToken, url=ROLE_API)
for role in roles:
roleId = role.get('id')
roleEmails = pydash.get(role, 'emails')
roleTeamIds = pydash.get(role, 'teamIds')
# NOTE: skip this team including others relationship
if roleEmails == [userId] and len(set(roleTeamIds) - set(teamIds)) == 0:
orphanRoleIds.append(roleId)
for roleId in orphanRoleIds:
purgeResource(dryrun, id=roleId, idToken=idToken, url=ROLE_API)
# NOTE: remove userEntry, and his relationship team, rule link, etc
# TODO: check the team contains only this user
# also need to skip "groups"
purgeResource(dryrun, id=userId, idToken=idToken, url=USER_API)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remove existing user and associated objects')
parser.add_argument('--dryrun', dest='dryrun', type=booleanString, default=True,
required=True, help='In dryrun mode, no objects will be deleted')
parser.add_argument('--debug', dest='debug', type=booleanString, default=False,
required=False, help='Output verbose log')
args = parser.parse_args()
main(vars(args))
| 7,260 | 0 | 23 |
7d59636da25ea632171a4002c1c95bd69b16a857 | 946 | py | Python | hanoi.py | PanzeriT/hanoi | 7817bda536d059f438c9268c5f0c6a40ef78ca94 | [
"MIT"
] | null | null | null | hanoi.py | PanzeriT/hanoi | 7817bda536d059f438c9268c5f0c6a40ef78ca94 | [
"MIT"
] | null | null | null | hanoi.py | PanzeriT/hanoi | 7817bda536d059f438c9268c5f0c6a40ef78ca94 | [
"MIT"
] | null | null | null | from typing import TypeVar, Generic, List
T = TypeVar('T')
if __name__ == '__main__':
discs: int = 5
tower_a: Stack[int] = Stack()
tower_b: Stack[int] = Stack()
tower_c: Stack[int] = Stack()
for i in range(discs, 0, -1):
tower_a.push(i)
print(tower_a, tower_b, tower_c)
hanoi(tower_a, tower_c, tower_b, discs)
| 23.073171 | 80 | 0.581395 | from typing import TypeVar, Generic, List
T = TypeVar('T')
class Stack(Generic[T]):
def __init__(self) -> None:
self._container: List[T] = []
def push(self, item: T) -> None:
self._container.append(item)
def pop(self) -> T:
return self._container.pop()
def __repr__(self) -> str:
return repr(self._container)
def hanoi(begin: Stack[int], end: Stack[int], temp: Stack[int], n: int) -> None:
if n == 1:
end.push(begin.pop())
print(tower_a, tower_b, tower_c)
else:
hanoi(begin, temp, end, n - 1)
hanoi(begin, end, temp, 1)
hanoi(temp, end, begin, n - 1)
if __name__ == '__main__':
discs: int = 5
tower_a: Stack[int] = Stack()
tower_b: Stack[int] = Stack()
tower_c: Stack[int] = Stack()
for i in range(discs, 0, -1):
tower_a.push(i)
print(tower_a, tower_b, tower_c)
hanoi(tower_a, tower_c, tower_b, discs)
| 437 | 3 | 154 |
96e121827578905e3826a776de058f40e9f17b21 | 1,076 | py | Python | code/tests/test_prepare/test_utils.py | evolaemp/svmcc | c57c92c6b97f57ab8f7bc20ac06c1c77d96c5143 | [
"MIT"
] | 1 | 2020-07-16T05:01:16.000Z | 2020-07-16T05:01:16.000Z | code/tests/test_prepare/test_utils.py | evolaemp/svmcc | c57c92c6b97f57ab8f7bc20ac06c1c77d96c5143 | [
"MIT"
] | null | null | null | code/tests/test_prepare/test_utils.py | evolaemp/svmcc | c57c92c6b97f57ab8f7bc20ac06c1c77d96c5143 | [
"MIT"
] | 2 | 2017-04-29T07:29:53.000Z | 2020-07-16T16:48:42.000Z | import os.path
from unittest import TestCase
from code.cli import PARAMS_DIR, TESTS_DIR
from code.prepare.base import load_data
from code.prepare.params import load_params
from code.prepare.utils import *
FIXTURE_DATASET = os.path.join(TESTS_DIR, 'fixtures/GER.tsv')
FIXTURE_DATASET_ASJP = os.path.join(TESTS_DIR, 'fixtures/Afrasian.tsv')
| 26.9 | 71 | 0.754647 | import os.path
from unittest import TestCase
from code.cli import PARAMS_DIR, TESTS_DIR
from code.prepare.base import load_data
from code.prepare.params import load_params
from code.prepare.utils import *
FIXTURE_DATASET = os.path.join(TESTS_DIR, 'fixtures/GER.tsv')
FIXTURE_DATASET_ASJP = os.path.join(TESTS_DIR, 'fixtures/Afrasian.tsv')
class UtilsTestCase(TestCase):
def setUp(self):
self.params = load_params(PARAMS_DIR)
def test_make_sample_id(self):
self.assertEqual(
make_sample_id('98', 'English', 'German', 1, 1),
'98/English,German/1,1')
def test_ipa_to_asjp(self):
self.assertEqual(ipa_to_asjp('at͡lir', self.params), 'atir')
self.assertEqual(ipa_to_asjp('oːɾ', self.params), 'or')
self.assertEqual(ipa_to_asjp('ɔːl', self.params), 'ol')
self.assertEqual(ipa_to_asjp('ũ', self.params), 'u')
with self.assertRaises(AssertionError):
ipa_to_asjp('XXX', self.params)
def test_is_asjp_data(self):
self.assertFalse(is_asjp_data(load_data(FIXTURE_DATASET)))
self.assertTrue(is_asjp_data(load_data(FIXTURE_DATASET_ASJP)))
| 603 | 9 | 123 |
96f83df3d849dd9cc0fc7a9ffc76ce58dd3d7421 | 6,356 | py | Python | python/uw/data/timed_data.py | tburnett/pointlike | a556f07650c2f17d437c86fdafe9f9a33f59758e | [
"BSD-3-Clause"
] | 1 | 2019-03-19T14:45:28.000Z | 2019-03-19T14:45:28.000Z | python/uw/data/timed_data.py | tburnett/pointlike | a556f07650c2f17d437c86fdafe9f9a33f59758e | [
"BSD-3-Clause"
] | null | null | null | python/uw/data/timed_data.py | tburnett/pointlike | a556f07650c2f17d437c86fdafe9f9a33f59758e | [
"BSD-3-Clause"
] | 1 | 2018-08-24T18:58:27.000Z | 2018-08-24T18:58:27.000Z | """
Process time data set
see create_timed_data to generate files with times for all
Extract a single data set around a cone with TimedData
"""
import os, glob, pickle
import healpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time, TimeDelta
from . import binned_data
mission_start = Time('2001-01-01T00:00:00', scale='utc')
class TimeInfo(object):
"""Read in, process a file generated by binned_data.ConvertFT1.time_record
"""
def select(self, l, b, radius=5, nside=1024):
"""create DataFrame with times, band id, distance from center
parameters:
l,b : position in Galactic
radius : cone radius, deg
nside : for healpy
returns:
DataFrame with columns:
band : from input, energy and event type
time : Mission Elapsed Time in s. (double)
delta : distance from input position (deg, float32)
"""
df = self.df
cart = lambda l,b: healpy.dir2vec(l,b, lonlat=True)
# use query_disc to get photons within given radius of position
center = cart(l,b)
ipix = healpy.query_disc(nside, cart(l,b), np.radians(radius), nest=False)
incone = np.isin(self.df.hpindex, ipix)
# times: convert to double, add to start
t = np.array(df.time[incone],float)+self.tstart
# convert position info to just distance from center
ll,bb = healpy.pix2ang(nside, self.df.hpindex[incone], nest=False, lonlat=True)
t2 = np.array(np.sqrt((1.-np.dot(center, cart(ll,bb)))*2), np.float32)
return pd.DataFrame(np.rec.fromarrays(
[df.band[incone], t, np.degrees(t2)], names='band time delta'.split()))
class TimedData(object):
"""Create a data set at a given position
"""
plt.rc('font', size=12)
def __init__(self, position, name='', radius=5,
file_pattern='$FERMI/data/P8_P305/time_info/month_*.pkl'):
"""Set up combined data from set of monthly files
position : l,b in degrees
name : string, optional name to describe source
radius : float, cone radius for selection
file_pattern : string for glob use
"""
assert hasattr(position, '__len__') and len(position)==2, 'expect position to be (l,b)'
files = sorted(glob.glob(os.path.expandvars(file_pattern)))
assert len(files)>0, 'No files found using pattern {}'.format(file_pattern)
self.name = name
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/2**30
print 'Opening {} files, with {} GB total'.format(len(files), gbtotal)
dflist=[]
for filename in files:
dflist.append(TimeInfo(filename).select(*position))
print '.',
self.df = pd.concat(dflist)
print 'Selected {} photons'.format(len(self.df))
def plot_time(self, delta_max=2, delta_t=1, xlim=None):
"""
"""
df = self.df
t = timed_data.MJD(df.time)
ta,tb=t[0],t[-1]
Nbins = int((tb-ta)/float(delta_t))
fig,ax= plt.subplots(figsize=(15,5))
hkw = dict(bins = np.linspace(ta,tb,Nbins), histtype='step')
ax.hist(t, label='E>100 MeV', **hkw)
ax.hist(t[(df.delta<delta_max) & (df.band>0)], label='delta<{} deg'.format(delta_max), **hkw);
ax.set(xlabel=r'$\mathsf{MJD}$', ylabel='counts per {:.0f} day'.format(delta_t))
if xlim is not None: ax.set(xlim=xlim)
ax.legend()
ax.set_title('{} counts vs. time'.format(self.name))
def create_timed_data(
monthly_ft1_files='/afs/slac/g/glast/groups/catalog/P8_P305/zmax105/*.fits',
outfolder='$FERMI/data/P8_P305/time_info/',
overwrite=False,
test=False,
verbose=1):
"""
"""
files=sorted(glob.glob(monthly_ft1_files))
assert len(files)>0, 'No ft1 files found at {}'.format(monthly_ft1_files)
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/2**30
if verbose>0:
print '{} monthly FT1 files found at {}\n\t {} GB total'.format(len(files), monthly_ft1_files, gbtotal)
outfolder = os.path.expandvars(outfolder)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
os.chdir(outfolder)
if verbose>0:
print 'Writing time files to folder {}\n\toverwrite={}'.format(outfolder, overwrite)
for filename in files:
m = filename.split('_')[-2]
outfile = 'month_{}.pkl'.format(m)
if not overwrite and os.path.exists(outfile) :
if verbose>1:
print 'exists: {}'.format(outfile)
else:
print '.',
continue
tr = binned_data.ConvertFT1(filename).time_record()
if not test:
if verbose>1:
print 'writing {}'.format(outfile),
elif verbose>0:
print '+',
pickle.dump(tr, open(outfile, 'wr'))
else:
if verbose>0:
print 'Test: would have written {}'.format(outfile)
# check how many exist
files=sorted(glob.glob(outfolder+'/*.pkl'))
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/float(2**30)
print '\nThere are {} timed data files, {:.1f} GB total'.format(len(files), gbtotal)
| 38.993865 | 111 | 0.599434 | """
Process time data set
see create_timed_data to generate files with times for all
Extract a single data set around a cone with TimedData
"""
import os, glob, pickle
import healpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time, TimeDelta
from . import binned_data
mission_start = Time('2001-01-01T00:00:00', scale='utc')
def MJD(met):
# convert MET to MJD
return (mission_start+TimeDelta(met, format='sec')).mjd
class TimeInfo(object):
"""Read in, process a file generated by binned_data.ConvertFT1.time_record
"""
def __init__(self, filename):
d = pickle.load(open(filename))
self.tstart = d['tstart']
self.df = pd.DataFrame(d['timerec'])
def select(self, l, b, radius=5, nside=1024):
"""create DataFrame with times, band id, distance from center
parameters:
l,b : position in Galactic
radius : cone radius, deg
nside : for healpy
returns:
DataFrame with columns:
band : from input, energy and event type
time : Mission Elapsed Time in s. (double)
delta : distance from input position (deg, float32)
"""
df = self.df
cart = lambda l,b: healpy.dir2vec(l,b, lonlat=True)
# use query_disc to get photons within given radius of position
center = cart(l,b)
ipix = healpy.query_disc(nside, cart(l,b), np.radians(radius), nest=False)
incone = np.isin(self.df.hpindex, ipix)
# times: convert to double, add to start
t = np.array(df.time[incone],float)+self.tstart
# convert position info to just distance from center
ll,bb = healpy.pix2ang(nside, self.df.hpindex[incone], nest=False, lonlat=True)
t2 = np.array(np.sqrt((1.-np.dot(center, cart(ll,bb)))*2), np.float32)
return pd.DataFrame(np.rec.fromarrays(
[df.band[incone], t, np.degrees(t2)], names='band time delta'.split()))
class TimedData(object):
"""Create a data set at a given position
"""
plt.rc('font', size=12)
def __init__(self, position, name='', radius=5,
file_pattern='$FERMI/data/P8_P305/time_info/month_*.pkl'):
"""Set up combined data from set of monthly files
position : l,b in degrees
name : string, optional name to describe source
radius : float, cone radius for selection
file_pattern : string for glob use
"""
assert hasattr(position, '__len__') and len(position)==2, 'expect position to be (l,b)'
files = sorted(glob.glob(os.path.expandvars(file_pattern)))
assert len(files)>0, 'No files found using pattern {}'.format(file_pattern)
self.name = name
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/2**30
print 'Opening {} files, with {} GB total'.format(len(files), gbtotal)
dflist=[]
for filename in files:
dflist.append(TimeInfo(filename).select(*position))
print '.',
self.df = pd.concat(dflist)
print 'Selected {} photons'.format(len(self.df))
def plot_time(self, delta_max=2, delta_t=1, xlim=None):
"""
"""
df = self.df
t = timed_data.MJD(df.time)
ta,tb=t[0],t[-1]
Nbins = int((tb-ta)/float(delta_t))
fig,ax= plt.subplots(figsize=(15,5))
hkw = dict(bins = np.linspace(ta,tb,Nbins), histtype='step')
ax.hist(t, label='E>100 MeV', **hkw)
ax.hist(t[(df.delta<delta_max) & (df.band>0)], label='delta<{} deg'.format(delta_max), **hkw);
ax.set(xlabel=r'$\mathsf{MJD}$', ylabel='counts per {:.0f} day'.format(delta_t))
if xlim is not None: ax.set(xlim=xlim)
ax.legend()
ax.set_title('{} counts vs. time'.format(self.name))
def plot_delta(self, cumulative=False, squared=True):
plt.rc('font', size=12)
df = self.df
fig,ax = plt.subplots(figsize=(6,6))
x = df.delta**2 if squared else df.delta
hkw = dict(bins=np.linspace(0, 25 if squared else 5, 100), histtype='step',lw=2,cumulative=cumulative)
ax.hist(x, label='E>100 MeV', **hkw)
ax.hist(x[df.band>8], label='E>1 GeV', **hkw)
ax.set(yscale='log', xlabel='delta**2 [deg^2]' if squared else 'delta [deg]',
ylabel='cumulative counts' if cumulative else 'counts');
ax.legend(loc='upper left' if cumulative else 'upper right');
def create_timed_data(
monthly_ft1_files='/afs/slac/g/glast/groups/catalog/P8_P305/zmax105/*.fits',
outfolder='$FERMI/data/P8_P305/time_info/',
overwrite=False,
test=False,
verbose=1):
"""
"""
files=sorted(glob.glob(monthly_ft1_files))
assert len(files)>0, 'No ft1 files found at {}'.format(monthly_ft1_files)
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/2**30
if verbose>0:
print '{} monthly FT1 files found at {}\n\t {} GB total'.format(len(files), monthly_ft1_files, gbtotal)
outfolder = os.path.expandvars(outfolder)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
os.chdir(outfolder)
if verbose>0:
print 'Writing time files to folder {}\n\toverwrite={}'.format(outfolder, overwrite)
for filename in files:
m = filename.split('_')[-2]
outfile = 'month_{}.pkl'.format(m)
if not overwrite and os.path.exists(outfile) :
if verbose>1:
print 'exists: {}'.format(outfile)
else:
print '.',
continue
tr = binned_data.ConvertFT1(filename).time_record()
if not test:
if verbose>1:
print 'writing {}'.format(outfile),
elif verbose>0:
print '+',
pickle.dump(tr, open(outfile, 'wr'))
else:
if verbose>0:
print 'Test: would have written {}'.format(outfile)
# check how many exist
files=sorted(glob.glob(outfolder+'/*.pkl'))
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/float(2**30)
print '\nThere are {} timed data files, {:.1f} GB total'.format(len(files), gbtotal)
| 821 | 0 | 80 |
aaa5afe2a6b97519f2ef121d1b8310c6670e70c9 | 399 | py | Python | bot.py | AdrianoBinhara/RegisterBot | 234fe49de32f8449ee30059e65fd0523ae9e16f4 | [
"MIT"
] | 8 | 2021-09-24T18:48:59.000Z | 2021-11-09T18:54:44.000Z | bot.py | AdrianoBinhara/RegisterBot | 234fe49de32f8449ee30059e65fd0523ae9e16f4 | [
"MIT"
] | null | null | null | bot.py | AdrianoBinhara/RegisterBot | 234fe49de32f8449ee30059e65fd0523ae9e16f4 | [
"MIT"
] | 2 | 2021-09-25T12:49:06.000Z | 2021-09-29T04:39:00.000Z | from discord.ext import commands
import os
from decouple import config
bot = commands.Bot("!")
load_cogs(bot)
TOKEN = config("TOKEN")
bot.run(TOKEN)
| 18.136364 | 49 | 0.646617 | from discord.ext import commands
import os
from decouple import config
bot = commands.Bot("!")
def load_cogs(bot):
bot.load_extension("manager")
bot.load_extension("tasks.purge")
for file in os.listdir("commands"):
if file.endswith(".py"):
cog = file[:-3]
bot.load_extension(f"commands.{cog}")
load_cogs(bot)
TOKEN = config("TOKEN")
bot.run(TOKEN)
| 222 | 0 | 23 |
ae3d873948253fda96cab2ffa3da72359b1782fc | 87 | py | Python | chocolate/sample/__init__.py | Intelecy/chocolate | 0ba4f6f0130eab851d32d5534241c8cac3f6666e | [
"BSD-3-Clause"
] | 105 | 2017-10-27T02:14:22.000Z | 2022-01-13T12:57:05.000Z | chocolate/sample/__init__.py | Intelecy/chocolate | 0ba4f6f0130eab851d32d5534241c8cac3f6666e | [
"BSD-3-Clause"
] | 31 | 2017-10-03T13:41:35.000Z | 2021-08-20T21:01:29.000Z | chocolate/sample/__init__.py | areeh/chocolate | 5f946cb9daf42c3ab44508648917d46bc105c2fc | [
"BSD-3-Clause"
] | 38 | 2017-10-05T20:19:42.000Z | 2022-03-28T11:34:04.000Z | from .grid import Grid
from .random import Random
from .quasirandom import QuasiRandom
| 21.75 | 36 | 0.827586 | from .grid import Grid
from .random import Random
from .quasirandom import QuasiRandom
| 0 | 0 | 0 |
ca8bad046acf42a1b6463df94d5faf8e7e548b29 | 219 | py | Python | game/__init__.py | Randomneo/python_game | b5a4f399e092ff84a813509380156d0f91a761fa | [
"WTFPL"
] | null | null | null | game/__init__.py | Randomneo/python_game | b5a4f399e092ff84a813509380156d0f91a761fa | [
"WTFPL"
] | null | null | null | game/__init__.py | Randomneo/python_game | b5a4f399e092ff84a813509380156d0f91a761fa | [
"WTFPL"
] | null | null | null | from enum import Enum
from .core.vector2 import Vector2
screen_size = width, height = 1040, 480
map_size = Vector2(x=10000, y=1000)
gravity = 1.5
| 18.25 | 39 | 0.671233 | from enum import Enum
from .core.vector2 import Vector2
screen_size = width, height = 1040, 480
map_size = Vector2(x=10000, y=1000)
gravity = 1.5
class Colors(Enum):
black = (0, 0, 0)
white = (255, 255, 255)
| 0 | 48 | 23 |
be9918c89550f09504e0af7a94d005b3d72c1c51 | 5,052 | py | Python | ingesters/youtube/search.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 13 | 2019-12-09T07:56:13.000Z | 2021-08-03T01:45:53.000Z | ingesters/youtube/search.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 1 | 2020-04-29T00:00:14.000Z | 2021-07-09T14:24:19.000Z | ingesters/youtube/search.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 3 | 2020-04-27T15:36:36.000Z | 2021-03-29T17:52:35.000Z | # -*- coding: utf-8 -*-
# Search API docs: https://developers.google.com/youtube/v3/docs/search/list
# Search API Python docs: https://developers.google.com/resources/api-libraries/documentation/youtube/v3/python/latest/youtube_v3.search.html
# Examples: https://github.com/youtube/api-samples/tree/master/python
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
try:
#python2
from urllib import urlencode
except ImportError:
#python3
from urllib.parse import urlencode
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0,parentdir)
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-key', dest="API_KEY", default="", help="Your API Key. See: https://google-developers.appspot.com/youtube/v3/getting-started")
parser.add_argument('-query', dest="QUERY", default=" location=40.903125,-73.85062&locationRadius=10km&videoLicense=creativeCommon", help="Search query parameters as a query string")
parser.add_argument('-in', dest="INPUT_FILE", default="", help="Input .csv file containing one or more queries; will override individual query")
parser.add_argument('-sort', dest="SORT_BY", default="", help="Sort by string")
parser.add_argument('-lim', dest="LIMIT", default=100, type=int, help="Limit results")
parser.add_argument('-out', dest="OUTPUT_FILE", default="tmp/yt-search/%s.json", help="JSON output file pattern")
parser.add_argument('-verbose', dest="VERBOSE", action="store_true", help="Display search result details")
a = parser.parse_args()
aa = vars(a)
makeDirectories([a.OUTPUT_FILE])
aa["QUERY"] = a.QUERY.strip()
MAX_YT_RESULTS_PER_PAGE = 50
if len(a.API_KEY) <= 0:
print("You must pass in your developer API key. See more at https://google-developers.appspot.com/youtube/v3/getting-started")
sys.exit()
if len(a.QUERY) <= 0:
print("Please pass in a query.")
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=a.API_KEY)
queries = []
if len(a.INPUT_FILE) > 0:
queryKeys, queries = readCsv(a.INPUT_FILE, doParseNumbers=False)
else:
queries = [parseQueryString(a.QUERY)]
queryCount = len(queries)
for i, q in enumerate(queries):
ytQuery = q.copy()
ytQuery["part"] = "id,snippet"
ytQuery["type"] = "video" # Always get videos back
ytQuery["videoDimension"] = "2d" # exclude 3d videos
if len(a.SORT_BY) > 0:
ytQuery["order"] = a.SORT_BY
pages = 1
if a.LIMIT > 0:
pages = ceilInt(1.0 * a.LIMIT / MAX_YT_RESULTS_PER_PAGE)
ytQuery["maxResults"] = min(a.LIMIT, MAX_YT_RESULTS_PER_PAGE)
print("Query %s of %s: %s" % (i+1, queryCount, urlencode(ytQuery)))
for page in range(pages):
print("- Page %s..." % (page+1))
# Make one query to retrieve ids
try:
search_response = youtube.search().list(**ytQuery).execute()
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
sys.exit()
nextPageToken = search_response.get('nextPageToken', "")
# pprint(search_response.get('items', []))
# sys.exit()
ids = []
for r in search_response.get('items', []):
ids.append(r['id']['videoId'])
print("-- %s results found." % (len(ids)))
missingIds = []
for id in ids:
outfile = a.OUTPUT_FILE % id
if not os.path.isfile(outfile):
missingIds.append(id)
if len(missingIds) > 0:
print("-- Getting details for %s videos..." % (len(missingIds)))
# Make another query to retrieve stats
idString = ",".join(ids)
try:
search_response = youtube.videos().list(id=idString, part="id,statistics,snippet").execute()
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
sys.exit()
if a.VERBOSE:
print("-----\nResults: ")
for r in search_response.get('items', []):
outfile = a.OUTPUT_FILE % r['id']
writeJSON(outfile, r, verbose=a.VERBOSE)
# pprint(r['id'])
# pprint(r['statistics'])
# pprint(r['snippet'])
if a.VERBOSE:
print("%s: %s (%s views)" % (r['id'], r['snippet']['title'], r['statistics']['viewCount']))
if a.VERBOSE:
print("-----")
# Retrieve the next page
if len(nextPageToken) < 1:
break
ytQuery["pageToken"] = nextPageToken
print("Done.")
| 37.422222 | 182 | 0.64034 | # -*- coding: utf-8 -*-
# Search API docs: https://developers.google.com/youtube/v3/docs/search/list
# Search API Python docs: https://developers.google.com/resources/api-libraries/documentation/youtube/v3/python/latest/youtube_v3.search.html
# Examples: https://github.com/youtube/api-samples/tree/master/python
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
try:
#python2
from urllib import urlencode
except ImportError:
#python3
from urllib.parse import urlencode
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0,parentdir)
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-key', dest="API_KEY", default="", help="Your API Key. See: https://google-developers.appspot.com/youtube/v3/getting-started")
parser.add_argument('-query', dest="QUERY", default=" location=40.903125,-73.85062&locationRadius=10km&videoLicense=creativeCommon", help="Search query parameters as a query string")
parser.add_argument('-in', dest="INPUT_FILE", default="", help="Input .csv file containing one or more queries; will override individual query")
parser.add_argument('-sort', dest="SORT_BY", default="", help="Sort by string")
parser.add_argument('-lim', dest="LIMIT", default=100, type=int, help="Limit results")
parser.add_argument('-out', dest="OUTPUT_FILE", default="tmp/yt-search/%s.json", help="JSON output file pattern")
parser.add_argument('-verbose', dest="VERBOSE", action="store_true", help="Display search result details")
a = parser.parse_args()
aa = vars(a)
makeDirectories([a.OUTPUT_FILE])
aa["QUERY"] = a.QUERY.strip()
MAX_YT_RESULTS_PER_PAGE = 50
if len(a.API_KEY) <= 0:
print("You must pass in your developer API key. See more at https://google-developers.appspot.com/youtube/v3/getting-started")
sys.exit()
if len(a.QUERY) <= 0:
print("Please pass in a query.")
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=a.API_KEY)
queries = []
if len(a.INPUT_FILE) > 0:
queryKeys, queries = readCsv(a.INPUT_FILE, doParseNumbers=False)
else:
queries = [parseQueryString(a.QUERY)]
queryCount = len(queries)
for i, q in enumerate(queries):
ytQuery = q.copy()
ytQuery["part"] = "id,snippet"
ytQuery["type"] = "video" # Always get videos back
ytQuery["videoDimension"] = "2d" # exclude 3d videos
if len(a.SORT_BY) > 0:
ytQuery["order"] = a.SORT_BY
pages = 1
if a.LIMIT > 0:
pages = ceilInt(1.0 * a.LIMIT / MAX_YT_RESULTS_PER_PAGE)
ytQuery["maxResults"] = min(a.LIMIT, MAX_YT_RESULTS_PER_PAGE)
print("Query %s of %s: %s" % (i+1, queryCount, urlencode(ytQuery)))
for page in range(pages):
print("- Page %s..." % (page+1))
# Make one query to retrieve ids
try:
search_response = youtube.search().list(**ytQuery).execute()
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
sys.exit()
nextPageToken = search_response.get('nextPageToken', "")
# pprint(search_response.get('items', []))
# sys.exit()
ids = []
for r in search_response.get('items', []):
ids.append(r['id']['videoId'])
print("-- %s results found." % (len(ids)))
missingIds = []
for id in ids:
outfile = a.OUTPUT_FILE % id
if not os.path.isfile(outfile):
missingIds.append(id)
if len(missingIds) > 0:
print("-- Getting details for %s videos..." % (len(missingIds)))
# Make another query to retrieve stats
idString = ",".join(ids)
try:
search_response = youtube.videos().list(id=idString, part="id,statistics,snippet").execute()
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
sys.exit()
if a.VERBOSE:
print("-----\nResults: ")
for r in search_response.get('items', []):
outfile = a.OUTPUT_FILE % r['id']
writeJSON(outfile, r, verbose=a.VERBOSE)
# pprint(r['id'])
# pprint(r['statistics'])
# pprint(r['snippet'])
if a.VERBOSE:
print("%s: %s (%s views)" % (r['id'], r['snippet']['title'], r['statistics']['viewCount']))
if a.VERBOSE:
print("-----")
# Retrieve the next page
if len(nextPageToken) < 1:
break
ytQuery["pageToken"] = nextPageToken
print("Done.")
| 0 | 0 | 0 |
b33e830b1007fc54e85f27112bd5b96f20551d3c | 8,472 | py | Python | bubuku/controller.py | zalando-nakadi/bubuku | 5738cc9309ed46e86fcad41b6fb580ddd69af8fd | [
"MIT"
] | 32 | 2017-10-17T09:59:46.000Z | 2022-01-23T11:39:31.000Z | bubuku/controller.py | zalando-nakadi/bubuku | 5738cc9309ed46e86fcad41b6fb580ddd69af8fd | [
"MIT"
] | 91 | 2017-07-13T15:43:15.000Z | 2022-02-21T13:06:35.000Z | bubuku/controller.py | zalando-nakadi/bubuku | 5738cc9309ed46e86fcad41b6fb580ddd69af8fd | [
"MIT"
] | 3 | 2018-04-19T13:13:00.000Z | 2018-09-11T05:59:38.000Z | import logging
from time import time
from typing import Tuple, Optional
from bubuku.broker import BrokerManager
from bubuku.communicate import sleep_and_operate
from bubuku.env_provider import EnvProvider
from bubuku.zookeeper import BukuExhibitor
_LOG = logging.getLogger('bubuku.controller')
#
# Returns a flag indicating if the change should continue running (True).
# In that case time_till_next_run() is called to determine when to schedule the next run.
#
| 40.535885 | 118 | 0.608357 | import logging
from time import time
from typing import Tuple, Optional
from bubuku.broker import BrokerManager
from bubuku.communicate import sleep_and_operate
from bubuku.env_provider import EnvProvider
from bubuku.zookeeper import BukuExhibitor
_LOG = logging.getLogger('bubuku.controller')
class Change(object):
def get_name(self) -> str:
raise NotImplementedError('Not implemented yet')
def can_run(self, current_actions) -> bool:
raise NotImplementedError('Not implemented yet')
#
# Returns a flag indicating if the change should continue running (True).
# In that case time_till_next_run() is called to determine when to schedule the next run.
#
def run(self, current_actions) -> bool:
raise NotImplementedError('Not implemented')
def time_till_next_run(self) -> float:
return 0.5
def can_run_at_exit(self) -> bool:
return False
def on_remove(self):
pass
class Check(object):
def __init__(self, check_interval_s=5):
self.check_interval_s = check_interval_s
self.__last_check_timestamp_s = 0
def check_if_time(self) -> Change:
if self.time_till_check() <= 0:
self.__last_check_timestamp_s = time()
_LOG.info('Executing check {}'.format(self))
return self.check()
return None
def time_till_check(self):
return self.__last_check_timestamp_s + self.check_interval_s - time()
def check(self) -> Change:
raise NotImplementedError('Not implemented')
def _exclude_self(provider_id, name, running_actions):
return [k for k, v in running_actions.items() if k != name or v != provider_id]
class Controller(object):
def __init__(self, broker_manager: BrokerManager, zk: BukuExhibitor, env_provider: EnvProvider):
self.broker_manager = broker_manager
self.zk = zk
self.env_provider = env_provider
self.checks = []
self.changes = {} # Holds mapping from change name to array of pending changes
self.running = True
self.provider_id = None # provider id must not be requested on initialization
def enumerate_changes(self):
with self.zk.lock(self.provider_id):
running_changes = self.zk.get_running_changes()
result = []
for name, change_list in self.changes.items():
running = running_changes.get(name) == self.provider_id
first = True
for change in change_list:
result.append({
'type': name,
'description': str(change),
'running': bool(first and running)
})
first = False
return result
def cancel_changes(self, name):
result = len(self.changes.get(name, {}))
if result:
if name in self.zk.get_running_changes():
for change in self.changes[name]:
change.on_remove()
with self.zk.lock(self.provider_id):
self.zk.unregister_change(name)
del self.changes[name]
return result
def add_check(self, check):
_LOG.info('Adding check {}'.format(str(check)))
self.checks.append(check)
def _register_running_changes(self) -> dict:
if not self.changes:
return {} # Do not take lock if there are no changes to register
_LOG.debug('Taking lock for processing')
with self.zk.lock(self.provider_id):
_LOG.debug('Lock is taken')
# Get list of current running changes
running_changes = self.zk.get_running_changes()
if running_changes:
_LOG.info("Running changes: {}".format(running_changes))
# Register changes to run
for name, change_list in self.changes.items():
# Only first change is able to run
first_change = change_list[0]
if first_change.can_run(_exclude_self(self.provider_id, name, running_changes)):
if name not in running_changes:
self.zk.register_change(name, self.provider_id)
running_changes[name] = self.provider_id
else:
_LOG.info('Change {} is waiting for others: {}'.format(name, running_changes))
return running_changes
def _run_changes(self, running_changes: dict) -> Tuple[list, Optional[float]]:
changes_to_remove = []
min_time_till_next_change_run = None
for name, change_list in self.changes.copy().items():
if name in running_changes and running_changes[name] == self.provider_id:
change = change_list[0]
_LOG.info('Executing action {} step'.format(change))
if self.running or change.can_run_at_exit():
try:
if not change.run(_exclude_self(self.provider_id, change.get_name(), running_changes)):
_LOG.info('Action {} completed'.format(change))
changes_to_remove.append(change.get_name())
else:
_LOG.info('Action {} will be executed on next loop step'.format(change))
time_till_next_run = change.time_till_next_run()
if min_time_till_next_change_run is None:
min_time_till_next_change_run = time_till_next_run
else:
min_time_till_next_change_run = min(time_till_next_run, min_time_till_next_change_run)
except Exception as e:
_LOG.error('Failed to execute change {} because of exception, removing'.format(change),
exc_info=e)
changes_to_remove.append(change.get_name())
else:
_LOG.info(
'Action {} can not be run while stopping, forcing to stop it'.format(change))
changes_to_remove.append(change.get_name())
return changes_to_remove, min_time_till_next_change_run
def _release_changes_lock(self, changes_to_remove):
if changes_to_remove:
for change_name in changes_to_remove:
removed_change = self.changes[change_name][0]
del self.changes[change_name][0]
if not self.changes[change_name]:
del self.changes[change_name]
removed_change.on_remove()
with self.zk.lock():
for name in changes_to_remove:
self.zk.unregister_change(name)
def loop(self, change_on_init=None):
self.provider_id = self.env_provider.get_id()
if change_on_init:
self._add_change_to_queue(change_on_init)
while self.running or self.changes:
time_till_next_step = self.make_step()
timeouts = [check.time_till_check() for check in self.checks]
timeouts.append(time_till_next_step or 5.0)
sleep_and_operate(self, min(timeouts))
def make_step(self) -> Optional[float]:
# register running changes
running_changes = self._register_running_changes()
# apply changes without holding lock
changes_to_remove, time_till_next_run = self._run_changes(running_changes)
# remove processed actions
self._release_changes_lock(changes_to_remove)
if self.running:
for check in self.checks:
change = check.check_if_time()
if change:
self._add_change_to_queue(change)
# prioritize newly appearing change run
time_till_next_run = 0.5
return time_till_next_run
def _add_change_to_queue(self, change):
_LOG.info('Adding change {} to pending changes'.format(change.get_name()))
if change.get_name() not in self.changes:
self.changes[change.get_name()] = []
self.changes[change.get_name()].append(change)
def stop(self, change: Change):
_LOG.info('Stopping controller with additional change: {}'.format(change.get_name() if change else None))
# clear all pending changes
if change:
self._add_change_to_queue(change)
self.running = False
| 7,329 | 3 | 655 |
ea7f69405d55960dd53034f6c6f8fca3da210fe6 | 1,312 | py | Python | roman-to-integer/solution.py | thehimel/problem-solving | b7cd019e50895a0d2438947a0a826774eb7ce82f | [
"MIT"
] | null | null | null | roman-to-integer/solution.py | thehimel/problem-solving | b7cd019e50895a0d2438947a0a826774eb7ce82f | [
"MIT"
] | null | null | null | roman-to-integer/solution.py | thehimel/problem-solving | b7cd019e50895a0d2438947a0a826774eb7ce82f | [
"MIT"
] | null | null | null | def integer(roman):
"""
Function to convert a roman numeral to integer.
:type roman: str
:rtype: int
"""
# Initialize a dictionary of symbol and values
symbol_value = {
'M': 1000,
'D': 500,
'C': 100,
'L': 50,
'X': 10,
'V': 5,
'I': 1
}
second_last_index = len(roman) - 1
result = 0
# Now traverse the roman string from index 0 to the second last index.
# Compare value of the present symbol with the value of the next symbol.
# If the present value is smaller than the next value, reduce the
# present value from the result. Else add it with the result.
for i in range(second_last_index):
present_value = symbol_value[roman[i]]
next_value = symbol_value[roman[i+1]]
if present_value < next_value:
result -= present_value
else:
result += present_value
# At last, add the value of the last symbol.
result += symbol_value[roman[-1]]
return result
if __name__ == '__main__':
test_set = [
('XLV', 45),
('MMMMMCMXCV', 5995),
('XCV', 95),
('DCCC', 800),
('CDLXXXII', 482),
]
for roman, output in test_set:
assert output == integer(roman)
print('Test Passed.')
| 23.428571 | 76 | 0.567073 | def integer(roman):
"""
Function to convert a roman numeral to integer.
:type roman: str
:rtype: int
"""
# Initialize a dictionary of symbol and values
symbol_value = {
'M': 1000,
'D': 500,
'C': 100,
'L': 50,
'X': 10,
'V': 5,
'I': 1
}
second_last_index = len(roman) - 1
result = 0
# Now traverse the roman string from index 0 to the second last index.
# Compare value of the present symbol with the value of the next symbol.
# If the present value is smaller than the next value, reduce the
# present value from the result. Else add it with the result.
for i in range(second_last_index):
present_value = symbol_value[roman[i]]
next_value = symbol_value[roman[i+1]]
if present_value < next_value:
result -= present_value
else:
result += present_value
# At last, add the value of the last symbol.
result += symbol_value[roman[-1]]
return result
if __name__ == '__main__':
test_set = [
('XLV', 45),
('MMMMMCMXCV', 5995),
('XCV', 95),
('DCCC', 800),
('CDLXXXII', 482),
]
for roman, output in test_set:
assert output == integer(roman)
print('Test Passed.')
| 0 | 0 | 0 |
c700279aa1df8709d4b0dabb418cb7afd030f998 | 1,098 | py | Python | agbot/core/plugin/plugin_manager_static.py | chinapnr/agbot | 9739ce1c2198e50111629db2d1de785edd06876e | [
"MIT"
] | 2 | 2018-06-23T06:48:46.000Z | 2018-06-23T10:11:50.000Z | agbot/core/plugin/plugin_manager_static.py | chinapnr/agbot | 9739ce1c2198e50111629db2d1de785edd06876e | [
"MIT"
] | 5 | 2020-01-03T09:33:02.000Z | 2021-06-02T00:49:52.000Z | agbot/core/plugin/plugin_manager_static.py | chinapnr/agbot | 9739ce1c2198e50111629db2d1de785edd06876e | [
"MIT"
] | 1 | 2021-07-07T07:17:27.000Z | 2021-07-07T07:17:27.000Z | from fishbase import logger
class PluginsManagerStatic(object):
"""
1. 现阶段插件是用来进行请求或者响应参数的处理
2. 暂时规定插件必须实现 run 方法
3. 使用实例:
pm = PluginsManager()
pm.run_plugin('demo.demo_md5',
{'sign_type':'md5','data_sign_params':'param1, param2'}, {'param1':'1','param2':'2','param3':'3'})
"""
| 33.272727 | 106 | 0.604736 | from fishbase import logger
class PluginsManagerStatic(object):
"""
1. 现阶段插件是用来进行请求或者响应参数的处理
2. 暂时规定插件必须实现 run 方法
3. 使用实例:
pm = PluginsManager()
pm.run_plugin('demo.demo_md5',
{'sign_type':'md5','data_sign_params':'param1, param2'}, {'param1':'1','param2':'2','param3':'3'})
"""
def __init__(self, package):
self.__plugin_dict = {}
try:
pr = __import__(package)
pr.register(self)
except Exception as e:
logger.exception('plugins_path not found: %s; cause: %s', package, str(e))
raise RuntimeError('plugins_path not found: {}; cause: {}'.format(package, str(e))) from e
def run_plugin(self, plugin_name, plugin_conf_dict, ctx):
try:
plugin = self.__plugin_dict[plugin_name]
return plugin.run(plugin_conf_dict, ctx)
except Exception as e:
logger.exception('run plugin error: %s; cause: %s', plugin_name, str(e))
raise e
def add_plugin(self, plugin_dict):
self.__plugin_dict.update(plugin_dict)
| 692 | 0 | 81 |
6b3f4acf49e4b6dc2a76d42d703ee2157fa96ee9 | 1,057 | py | Python | problems/test_0088.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | 1 | 2017-06-17T23:47:17.000Z | 2017-06-17T23:47:17.000Z | problems/test_0088.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | problems/test_0088.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | import unittest
if __name__ == '__main__':
unittest.main()
| 23.488889 | 75 | 0.434248 | import unittest
class Solution:
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
hi = m + n - 1
m -= 1
n -= 1
while m >= 0 and n >= 0:
if nums1[m] > nums2[n]:
nums1[hi] = nums1[m]
m -= 1
else:
nums1[hi] = nums2[n]
n -= 1
hi -= 1
if n >= 0:
nums1[:n + 1] = nums2[:n + 1]
class Test(unittest.TestCase):
def test(self):
self._test(
[10, 20],
[1, 2, 11, 12, 21, 22],
[1, 2, 10, 11, 12, 20, 21, 22])
def _test(self, nums1, nums2, expected):
m = len(nums1)
n = len(nums2)
nums1 += [0] * n
actual = Solution().merge(nums1, m, nums2, n)
self.assertEqual(expected, nums1)
if __name__ == '__main__':
unittest.main()
| 302 | 589 | 99 |
8ff3e9267e1da09adc5fb8a5ff21ae54a517201e | 64,575 | py | Python | samap/analysis.py | vishalbelsare/SAMap | 2a2170496019f37dda113bb52bb55169825f7e05 | [
"MIT"
] | 20 | 2021-03-20T05:06:41.000Z | 2022-02-16T08:25:46.000Z | samap/analysis.py | vishalbelsare/SAMap | 2a2170496019f37dda113bb52bb55169825f7e05 | [
"MIT"
] | 47 | 2021-01-29T21:04:57.000Z | 2022-03-18T11:53:44.000Z | samap/analysis.py | vishalbelsare/SAMap | 2a2170496019f37dda113bb52bb55169825f7e05 | [
"MIT"
] | 6 | 2021-02-12T18:07:05.000Z | 2022-03-09T01:02:06.000Z | import sklearn.utils.sparsefuncs as sf
from . import q, ut, pd, sp, np, warnings, sc
from .utils import to_vo, to_vn, substr, df_to_dict, sparse_knn, prepend_var_prefix
from samalg import SAM
from scipy.stats import rankdata
def GOEA(target_genes,GENE_SETS,df_key='GO',goterms=None,fdr_thresh=0.25,p_thresh=1e-3):
"""Performs GO term Enrichment Analysis using the hypergeometric distribution.
Parameters
----------
target_genes - array-like
List of target genes from which to find enriched GO terms.
GENE_SETS - dictionary or pandas.DataFrame
Dictionary where the keys are GO terms and the values are lists of genes associated with each GO term.
Ex: {'GO:0000001': ['GENE_A','GENE_B'],
'GO:0000002': ['GENE_A','GENE_C','GENE_D']}
Make sure to include all available genes that have GO terms in your dataset.
---OR---
Pandas DataFrame with genes as the index and GO terms values.
Ex: 'GENE_A','GO:0000001',
'GENE_A','GO:0000002',
'GENE_B','GO:0000001',
'GENE_B','GO:0000004',
...
If `GENE_SETS` is a pandas DataFrame, the `df_key` parameter should be the name of the column in which
the GO terms are stored.
df_key - str, optional, default 'GO'
The name of the column in which GO terms are stored. Only used if `GENE_SETS` is a DataFrame.
goterms - array-list, optional, default None
If provided, only these GO terms will be tested.
fdr_thresh - float, optional, default 0.25
Filter out GO terms with FDR q value greater than this threshold.
p_thresh - float, optional, default 1e-3
Filter out GO terms with p value greater than this threshold.
Returns:
-------
enriched_goterms - pandas.DataFrame
A Pandas DataFrame of enriched GO terms with FDR q values, p values, and associated genes provided.
"""
# identify all genes found in `GENE_SETS`
if isinstance(GENE_SETS,pd.DataFrame):
print('Converting DataFrame into dictionary')
genes = np.array(list(GENE_SETS.index))
agt = np.array(list(GENE_SETS[df_key].values))
idx = np.argsort(agt)
genes = genes[idx]
agt = agt[idx]
bounds = np.where(agt[:-1]!=agt[1:])[0]+1
bounds = np.append(np.append(0,bounds),agt.size)
bounds_left=bounds[:-1]
bounds_right=bounds[1:]
genes_lists = [genes[bounds_left[i]:bounds_right[i]] for i in range(bounds_left.size)]
GENE_SETS = dict(zip(np.unique(agt),genes_lists))
all_genes = np.unique(np.concatenate(list(GENE_SETS.values())))
all_genes = np.array(all_genes)
# if goterms is None, use all the goterms found in `GENE_SETS`
if goterms is None:
goterms = np.unique(list(GENE_SETS.keys()))
else:
goterms = goterms[np.in1d(goterms,np.unique(list(GENE_SETS.keys())))]
# ensure that target genes are all present in `all_genes`
_,ix = np.unique(target_genes,return_index=True)
target_genes=target_genes[np.sort(ix)]
target_genes = target_genes[np.in1d(target_genes,all_genes)]
# N -- total number of genes
N = all_genes.size
probs=[]
probs_genes=[]
counter=0
# for each go term,
for goterm in goterms:
if counter%1000==0:
pass; #print(counter)
counter+=1
# identify genes associated with this go term
gene_set = np.array(GENE_SETS[goterm])
# B -- number of genes associated with this go term
B = gene_set.size
# b -- number of genes in target associated with this go term
gene_set_in_target = gene_set[np.in1d(gene_set,target_genes)]
b = gene_set_in_target.size
if b != 0:
# calculate the enrichment probability as the cumulative sum of the tail end of a hypergeometric distribution
# with parameters (N,B,n,b)
n = target_genes.size
num_iter = min(n,B)
rng = np.arange(b,num_iter+1)
probs.append(sum([np.exp(_log_binomial(n,i)+_log_binomial(N-n,B-i) - _log_binomial(N,B)) for i in rng]))
else:
probs.append(1.0)
#append associated genes to a list
probs_genes.append(gene_set_in_target)
probs = np.array(probs)
probs_genes = np.array([';'.join(x) for x in probs_genes])
# adjust p value to correct for multiple testing
fdr_q_probs = probs.size*probs / rankdata(probs,method='ordinal')
# filter out go terms based on the FDR q value and p value thresholds
filt = np.logical_and(fdr_q_probs<fdr_thresh,probs<p_thresh)
enriched_goterms = goterms[filt]
p_values = probs[filt]
fdr_q_probs = fdr_q_probs[filt]
probs_genes=probs_genes[filt]
# construct the Pandas DataFrame
gns = probs_genes
enriched_goterms = pd.DataFrame(data=fdr_q_probs,index=enriched_goterms,columns=['fdr_q_value'])
enriched_goterms['p_value'] = p_values
enriched_goterms['genes'] = gns
# sort in ascending order by the p value
enriched_goterms = enriched_goterms.sort_values('p_value')
return enriched_goterms
_KOG_TABLE = dict(A = "RNA processing and modification",
B = "Chromatin structure and dynamics",
C = "Energy production and conversion",
D = "Cell cycle control, cell division, chromosome partitioning",
E = "Amino acid transport and metabolism",
F = "Nucleotide transport and metabolism",
G = "Carbohydrate transport and metabolism",
H = "Coenzyme transport and metabolism",
I = "Lipid transport and metabolism",
J = "Translation, ribosomal structure and biogenesis",
K = "Transcription",
L = "Replication, recombination, and repair",
M = "Cell wall membrane/envelope biogenesis",
N = "Cell motility",
O = "Post-translational modification, protein turnover, chaperones",
P = "Inorganic ion transport and metabolism",
Q = "Secondary metabolites biosynthesis, transport and catabolism",
R = "General function prediction only",
S = "Function unknown",
T = "Signal transduction mechanisms",
U = "Intracellular trafficking, secretion, and vesicular transport",
V = "Defense mechanisms",
W = "Extracellular structures",
Y = "Nuclear structure",
Z = "Cytoskeleton")
import gc
from collections.abc import Iterable
def sankey_plot(M,species_order=None,align_thr=0.1,**params):
"""Generate a sankey plot
Parameters
----------
M: pandas.DataFrame
Mapping table output from `get_mapping_scores` (second output).
align_thr: float, optional, default 0.1
The alignment score threshold below which to remove cell type mappings.
species_order: list, optional, default None
Specify the order of species (left-to-right) in the sankey plot.
For example, `species_order=['hu','le','ms']`.
Keyword arguments
-----------------
Keyword arguments will be passed to `sankey.opts`.
"""
if species_order is not None:
ids = np.array(species_order)
else:
ids = np.unique([x.split('_')[0] for x in M.index])
if len(ids)>2:
d = M.values.copy()
d[d<align_thr]=0
x,y = d.nonzero()
x,y = np.unique(np.sort(np.vstack((x,y)).T,axis=1),axis=0).T
values = d[x,y]
nodes = q(M.index)
node_pairs = nodes[np.vstack((x,y)).T]
sn1 = q([xi.split('_')[0] for xi in node_pairs[:,0]])
sn2 = q([xi.split('_')[0] for xi in node_pairs[:,1]])
filt = np.logical_or(
np.logical_or(np.logical_and(sn1==ids[0],sn2==ids[1]),np.logical_and(sn1==ids[1],sn2==ids[0])),
np.logical_or(np.logical_and(sn1==ids[1],sn2==ids[2]),np.logical_and(sn1==ids[2],sn2==ids[1]))
)
x,y,values=x[filt],y[filt],values[filt]
d=dict(zip(ids,list(np.arange(len(ids)))))
depth_map = dict(zip(nodes,[d[xi.split('_')[0]] for xi in nodes]))
data = nodes[np.vstack((x,y))].T
for i in range(data.shape[0]):
if d[data[i,0].split('_')[0]] > d[data[i,1].split('_')[0]]:
data[i,:]=data[i,::-1]
R = pd.DataFrame(data = data,columns=['source','target'])
R['Value'] = values
else:
d = M.values.copy()
d[d<align_thr]=0
x,y = d.nonzero()
x,y = np.unique(np.sort(np.vstack((x,y)).T,axis=1),axis=0).T
values = d[x,y]
nodes = q(M.index)
R = pd.DataFrame(data = nodes[np.vstack((x,y))].T,columns=['source','target'])
R['Value'] = values
depth_map=None
try:
from holoviews import dim
#from bokeh.models import Label
import holoviews as hv
hv.extension('bokeh',logo=False)
hv.output(size=100)
except:
raise ImportError('Please install holoviews-samap with `!pip install holoviews-samap`.')
sankey1 = hv.Sankey(R, kdims=["source", "target"])#, vdims=["Value"])
cmap = params.get('cmap','Colorblind')
label_position = params.get('label_position','outer')
edge_line_width = params.get('edge_line_width',0)
show_values = params.get('show_values',False)
node_padding = params.get('node_padding',4)
node_alpha = params.get('node_alpha',1.0)
node_width = params.get('node_width',40)
node_sort = params.get('node_sort',True)
frame_height = params.get('frame_height',1000)
frame_width = params.get('frame_width',800)
bgcolor = params.get('bgcolor','snow')
apply_ranges = params.get('apply_ranges',True)
sankey1.opts(cmap=cmap,label_position=label_position, edge_line_width=edge_line_width, show_values=show_values,
node_padding=node_padding,depth_map=depth_map, node_alpha=node_alpha, node_width=node_width,
node_sort=node_sort,frame_height=frame_height,frame_width=frame_width,bgcolor=bgcolor,
apply_ranges=apply_ranges,hooks=[f])
return sankey1
def chord_plot(A,align_thr=0.1):
"""Generate a chord plot
Parameters
----------
A: pandas.DataFrame
Mapping table output from `get_mapping_scores` (second output).
align_thr: float, optional, default 0.1
The alignment score threshold below which to remove cell type mappings.
"""
try:
from holoviews import dim, opts
import holoviews as hv
hv.extension('bokeh',logo=False)
hv.output(size=300)
except:
raise ImportError('Please install holoviews-samap with `!pip install holoviews-samap`.')
xx=A.values.copy()
xx[xx<align_thr]=0
x,y = xx.nonzero()
z=xx[x,y]
x,y = A.index[x],A.columns[y]
links=pd.DataFrame(data=np.array([x,y,z]).T,columns=['source','target','value'])
links['edge_grp'] = [x.split('_')[0]+y.split('_')[0] for x,y in zip(links['source'],links['target'])]
links['value']*=100
f = links['value'].values
z=((f-f.min())/(f.max()-f.min())*0.99+0.01)*100
links['value']=z
links['value']=np.round([x for x in links['value'].values]).astype('int')
clu=np.unique(A.index)
clu = clu[np.in1d(clu,np.unique(np.array([x,y])))]
links = hv.Dataset(links)
nodes = hv.Dataset(pd.DataFrame(data=np.array([clu,clu,np.array([x.split('_')[0] for x in clu])]).T,columns=['index','name','group']),'index')
chord = hv.Chord((links, nodes),kdims=["source", "target"], vdims=["value","edge_grp"])#.select(value=(5, None))
chord.opts(
opts.Chord(cmap='Category20', edge_cmap='Category20',edge_color=dim('edge_grp'),
labels='name', node_color=dim('group').str()))
return chord
def find_cluster_markers(sam, key, inplace=True):
""" Finds differentially expressed genes for provided cell type labels.
Parameters
----------
sam - SAM object
key - str
Column in `sam.adata.obs` for which to identifying differentially expressed genes.
inplace - bool, optional, default True
If True, deposits enrichment scores in `sam.adata.varm[f'{key}_scores']`
and p-values in `sam.adata.varm[f'{key}_pvals']`.
Otherwise, returns three pandas.DataFrame objects (genes x clusters).
NAMES - the gene names
PVALS - the p-values
SCORES - the enrichment scores
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a,c = np.unique(q(sam.adata.obs[key]),return_counts=True)
t = a[c==1]
adata = sam.adata[np.in1d(q(sam.adata.obs[key]),a[c==1],invert=True)].copy()
sc.tl.rank_genes_groups(
adata,
key,
method="wilcoxon",
n_genes=sam.adata.shape[1],
use_raw=False,
layer=None,
)
sam.adata.uns['rank_genes_groups'] = adata.uns['rank_genes_groups']
NAMES = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["names"])
PVALS = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["pvals"])
SCORES = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["scores"])
if not inplace:
return NAMES, PVALS, SCORES
dfs1 = []
dfs2 = []
for i in range(SCORES.shape[1]):
names = NAMES.iloc[:, i]
scores = SCORES.iloc[:, i]
pvals = PVALS.iloc[:, i]
pvals[scores < 0] = 1.0
scores[scores < 0] = 0
pvals = q(pvals)
scores = q(scores)
dfs1.append(pd.DataFrame(
data=scores[None, :], index = [SCORES.columns[i]], columns=names
)[sam.adata.var_names].T)
dfs2.append(pd.DataFrame(
data=pvals[None, :], index = [SCORES.columns[i]], columns=names
)[sam.adata.var_names].T)
df1 = pd.concat(dfs1,axis=1)
df2 = pd.concat(dfs2,axis=1)
try:
sam.adata.varm[key+'_scores'] = df1
sam.adata.varm[key+'_pvals'] = df2
except:
sam.adata.varm.dim_names = sam.adata.var_names
sam.adata.varm.dim_names = sam.adata.var_names
sam.adata.varm[key+'_scores'] = df1
sam.adata.varm[key+'_pvals'] = df2
for i in range(t.size):
sam.adata.varm[key+'_scores'][t[i]]=0
sam.adata.varm[key+'_pvals'][t[i]]=1
def ParalogSubstitutions(sm, ortholog_pairs, paralog_pairs=None, psub_thr = 0.3):
"""Identify paralog substitutions.
For all genes in `ortholog_pairs` and `paralog_pairs`, this function expects the genes to
be prepended with their corresponding species IDs.
Parameters
----------
sm - SAMAP object
ortholog_pairs - n x 2 numpy array of ortholog pairs
paralog_pairs - n x 2 numpy array of paralog pairs, optional, default None
If None, assumes every pair in the homology graph that is not an ortholog is a paralog.
Note that this would essentially result in the more generic 'homolog substitutions' rather
than paralog substitutions.
The paralogs can be either cross-species, within-species, or a mix of both.
psub_thr - float, optional, default 0.3
Threshold for correlation difference between paralog pairs and ortholog pairs.
Paralog pairs that do not have greater than `psub_thr` correlation than their
corresponding ortholog pairs are filtered out.
Returns
-------
RES - pandas.DataFrame
A table of paralog substitutions.
"""
if paralog_pairs is not None:
ids1 = np.array([x.split('_')[0] for x in paralog_pairs[:,0]])
ids2 = np.array([x.split('_')[0] for x in paralog_pairs[:,1]])
ix = np.where(ids1==ids2)[0]
ixnot = np.where(ids1!=ids2)[0]
if ix.size > 0:
pps = paralog_pairs[ix]
ZZ1 = {}
ZZ2 = {}
for i in range(pps.shape[0]):
L = ZZ1.get(pps[i,0],[])
L.append(pps[i,1])
ZZ1[pps[i,0]]=L
L = ZZ2.get(pps[i,1],[])
L.append(pps[i,0])
ZZ2[pps[i,1]]=L
keys = list(ZZ1.keys())
for k in keys:
L = ZZ2.get(k,[])
L.extend(ZZ1[k])
ZZ2[k] = list(np.unique(L))
ZZ = ZZ2
L1=[]
L2=[]
for i in range(ortholog_pairs.shape[0]):
try:
x = ZZ[ortholog_pairs[i,0]]
except:
x = []
L1.extend([ortholog_pairs[i,1]]*len(x))
L2.extend(x)
try:
x = ZZ[ortholog_pairs[i,1]]
except:
x = []
L1.extend([ortholog_pairs[i,0]]*len(x))
L2.extend(x)
L = np.vstack((L2,L1)).T
pps = np.unique(np.sort(L,axis=1),axis=0)
paralog_pairs = np.unique(np.sort(np.vstack((pps,paralog_pairs[ixnot])),axis=1),axis=0)
smp = sm.samap
gnnm = smp.adata.varp["homology_graph_reweighted"]
gn = q(smp.adata.var_names)
ortholog_pairs = np.sort(ortholog_pairs,axis=1)
ortholog_pairs = ortholog_pairs[np.logical_and(np.in1d(ortholog_pairs[:,0],gn),np.in1d(ortholog_pairs[:,1],gn))]
if paralog_pairs is None:
paralog_pairs = gn[np.vstack(smp.adata.varp["homology_graph"].nonzero()).T]
else:
paralog_pairs = paralog_pairs[np.logical_and(np.in1d(paralog_pairs[:,0],gn),np.in1d(paralog_pairs[:,1],gn))]
paralog_pairs = np.sort(paralog_pairs,axis=1)
paralog_pairs = paralog_pairs[
np.in1d(to_vn(paralog_pairs), np.append(to_vn(ortholog_pairs),to_vn(ortholog_pairs[:,::-1])), invert=True)
]
A = pd.DataFrame(data=np.arange(gn.size)[None, :], columns=gn)
xp, yp = (
A[paralog_pairs[:, 0]].values.flatten(),
A[paralog_pairs[:, 1]].values.flatten(),
)
xp, yp = np.unique(
np.vstack((np.vstack((xp, yp)).T, np.vstack((yp, xp)).T)), axis=0
).T
xo, yo = (
A[ortholog_pairs[:, 0]].values.flatten(),
A[ortholog_pairs[:, 1]].values.flatten(),
)
xo, yo = np.unique(
np.vstack((np.vstack((xo, yo)).T, np.vstack((yo, xo)).T)), axis=0
).T
A = pd.DataFrame(data=np.vstack((xp, yp)).T, columns=["x", "y"])
pairdict = df_to_dict(A, key_key="x", val_key="y")
Xp = []
Yp = []
Xo = []
Yo = []
for i in range(xo.size):
try:
y = pairdict[xo[i]]
except KeyError:
y = np.array([])
Yp.extend(y)
Xp.extend([xo[i]] * y.size)
Xo.extend([xo[i]] * y.size)
Yo.extend([yo[i]] * y.size)
orths = to_vn(gn[np.vstack((np.array(Xo), np.array(Yo))).T])
paras = to_vn(gn[np.vstack((np.array(Xp), np.array(Yp))).T])
orth_corrs = gnnm[Xo, Yo].A.flatten()
par_corrs = gnnm[Xp, Yp].A.flatten()
diff_corrs = par_corrs - orth_corrs
RES = pd.DataFrame(
data=np.vstack((orths, paras)).T, columns=["ortholog pairs", "paralog pairs"]
)
RES["ortholog corrs"] = orth_corrs
RES["paralog corrs"] = par_corrs
RES["corr diff"] = diff_corrs
RES = RES.sort_values("corr diff", ascending=False)
RES = RES[RES["corr diff"] > psub_thr]
orths = RES['ortholog pairs'].values.flatten()
paras = RES['paralog pairs'].values.flatten()
orthssp = np.vstack([np.array([x.split('_')[0] for x in xx]) for xx in to_vo(orths)])
parassp = np.vstack([np.array([x.split('_')[0] for x in xx]) for xx in to_vo(paras)])
filt=[]
for i in range(orthssp.shape[0]):
filt.append(np.in1d(orthssp[i],parassp[i]).mean()==1.0)
filt=np.array(filt)
return RES[filt]
def convert_eggnog_to_homologs(sm, EGGs, og_key = 'eggNOG_OGs', taxon=2759):
"""Gets an n x 2 array of homologs at some taxonomic level based on Eggnog results.
Parameters
----------
smp: SAMAP object
EGGs: dict of pandas.DataFrame, Eggnog output tables keyed by species IDs
og_key: str, optional, default 'eggNOG_OGs'
The column name of the orthology group mapping results in the Eggnog output tables.
taxon: int, optional, default 2759
Taxonomic ID corresponding to the level at which genes with overlapping orthology groups
will be considered homologs. Defaults to the Eukaryotic level.
Returns
-------
homolog_pairs: n x 2 numpy array of homolog pairs.
"""
smp = sm.samap
taxon = str(taxon)
EGGs = dict(zip(list(EGGs.keys()),list(EGGs.values()))) #copying
for k in EGGs.keys():
EGGs[k] = EGGs[k].copy()
Es=[]
for k in EGGs.keys():
A=EGGs[k]
A.index=k+"_"+A.index
Es.append(A)
A = pd.concat(Es, axis=0)
gn = q(smp.adata.var_names)
A = A[np.in1d(q(A.index), gn)]
orthology_groups = A[og_key]
og = q(orthology_groups)
x = np.unique(",".join(og).split(","))
D = pd.DataFrame(data=np.arange(x.size)[None, :], columns=x)
for i in range(og.size):
n = orthology_groups[i].split(",")
taxa = substr(substr(n, "@", 1),'|',0)
if (taxa == "2759").sum() > 1 and taxon == '2759':
og[i] = ""
else:
og[i] = "".join(np.array(n)[taxa == taxon])
A[og_key] = og
og = q(A[og_key].reindex(gn))
og[og == "nan"] = ""
X = []
Y = []
for i in range(og.size):
x = og[i]
if x != "":
X.extend(D[x].values.flatten())
Y.extend([i])
X = np.array(X)
Y = np.array(Y)
B = sp.sparse.lil_matrix((og.size, D.size))
B[Y, X] = 1
B = B.tocsr()
B = B.dot(B.T)
B.data[:] = 1
pairs = gn[np.vstack((B.nonzero())).T]
pairssp = np.vstack([q([x.split('_')[0] for x in xx]) for xx in pairs])
return np.unique(np.sort(pairs[pairssp[:,0]!=pairssp[:,1]],axis=1),axis=0)
def CellTypeTriangles(sm,keys, align_thr=0.1):
"""Outputs a table of cell type triangles.
Parameters
----------
sm: SAMAP object - assumed to contain at least three species.
keys: dictionary of annotation keys (`.adata.obs[key]`) keyed by species.
align_thr: float, optional, default, 0.1
Only keep triangles with minimum `align_thr` alignment score.
"""
D,A = get_mapping_scores(sm,keys=keys)
x,y = A.values.nonzero()
all_pairsf = np.array([A.index[x],A.columns[y]]).T.astype('str')
alignmentf = A.values[x,y].flatten()
alignment = alignmentf.copy()
all_pairs = all_pairsf.copy()
all_pairs = all_pairs[alignment > align_thr]
alignment = alignment[alignment > align_thr]
all_pairs = to_vn(np.sort(all_pairs, axis=1))
x, y = substr(all_pairs, ";")
ctu = np.unique(np.concatenate((x, y)))
Z = pd.DataFrame(data=np.arange(ctu.size)[None, :], columns=ctu)
nnm = sp.sparse.lil_matrix((ctu.size,) * 2)
nnm[Z[x].values.flatten(), Z[y].values.flatten()] = alignment
nnm[Z[y].values.flatten(), Z[x].values.flatten()] = alignment
nnm = nnm.tocsr()
import networkx as nx
G = nx.Graph()
gps=ctu[np.vstack(nnm.nonzero()).T]
G.add_edges_from(gps)
alignment = pd.Series(index=to_vn(gps),data=nnm.data)
all_cliques = nx.enumerate_all_cliques(G)
all_triangles = [x for x in all_cliques if len(x) == 3]
Z = np.sort(np.vstack(all_triangles), axis=1)
DF = pd.DataFrame(data=Z, columns=[x.split("_")[0] for x in Z[0]])
for i,sid1 in enumerate(sm.ids):
for sid2 in sm.ids[i:]:
if sid1!=sid2:
DF[sid1+';'+sid2] = [alignment[x] for x in DF[sid1].values.astype('str').astype('object')+';'+DF[sid2].values.astype('str').astype('object')]
DF = DF[sm.ids]
return DF
def GeneTriangles(sm,orth,keys=None,compute_markers=True,corr_thr=0.3, psub_thr = 0.3, pval_thr=1e-10):
"""Outputs a table of gene triangles.
Parameters
----------
sm: SAMAP object which contains at least three species
orths: (n x 2) ortholog pairs
keys: dict of strings corresponding to each species annotation column keyed by species, optional, default None
If you'd like to include information about where each gene is differentially expressed, you can specify the
annotation column to compute differential expressivity from for each species.
compute_markers: bool, optional, default True
Set this to False if you already precomputed differential expression for the input keys.
corr_thr: float, optional, default, 0.3
Only keep triangles with minimum `corr_thr` correlation.
pval_thr: float, optional, defaul, 1e-10
Consider cell types as differentially expressed if their p-values are less than `pval_thr`.
"""
FINALS = []
orth = np.sort(orth,axis=1)
orthsp = np.vstack([q([x.split('_')[0] for x in xx]) for xx in orth])
RES = ParalogSubstitutions(sm, orth, psub_thr = psub_thr)
op = to_vo(q(RES['ortholog pairs']))
pp = to_vo(q(RES['paralog pairs']))
ops = np.vstack([q([x.split('_')[0] for x in xx]) for xx in op])
pps = np.vstack([q([x.split('_')[0] for x in xx]) for xx in pp])
gnnm = sm.samap.adata.varp["homology_graph_reweighted"]
gn = q(sm.samap.adata.var_names)
gnsp = q([x.split('_')[0] for x in gn])
import itertools
combs = list(itertools.combinations(sm.ids,3))
for comb in combs:
A,B,C = comb
smp1 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==A,sm.samap.adata.obs['species']==B)])
smp2 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==A,sm.samap.adata.obs['species']==C)])
smp3 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==B,sm.samap.adata.obs['species']==C)])
sam1=sm.sams[A]
sam2=sm.sams[B]
sam3=sm.sams[C]
A1,A2=A,B
B1,B2=A,C
C1,C2=B,C
f1 = np.logical_and(((ops[:,0]==A1) * (ops[:,1]==A2) + (ops[:,0]==A2) * (ops[:,1]==A1)) > 0,
((pps[:,0]==A1) * (pps[:,1]==A2) + (pps[:,0]==A2) * (pps[:,1]==A1)) > 0)
f2 = np.logical_and(((ops[:,0]==B1) * (ops[:,1]==B2) + (ops[:,0]==B2) * (ops[:,1]==B1)) > 0,
((pps[:,0]==B1) * (pps[:,1]==B2) + (pps[:,0]==B2) * (pps[:,1]==B1)) > 0)
f3 = np.logical_and(((ops[:,0]==C1) * (ops[:,1]==C2) + (ops[:,0]==C2) * (ops[:,1]==C1)) > 0,
((pps[:,0]==C1) * (pps[:,1]==C2) + (pps[:,0]==C2) * (pps[:,1]==C1)) > 0)
RES1=RES[f1]
RES2=RES[f2]
RES3=RES[f3]
f1 = ((orthsp[:,0]==A1) * (orthsp[:,1]==A2) + (orthsp[:,0]==A2) * (orthsp[:,1]==A1)) > 0
f2 = ((orthsp[:,0]==B1) * (orthsp[:,1]==B2) + (orthsp[:,0]==B2) * (orthsp[:,1]==B1)) > 0
f3 = ((orthsp[:,0]==C1) * (orthsp[:,1]==C2) + (orthsp[:,0]==C2) * (orthsp[:,1]==C1)) > 0
orth1 = orth[f1]
orth2 = orth[f2]
orth3 = orth[f3]
op1 = to_vo(q(RES1["ortholog pairs"]))
op2 = to_vo(q(RES2["ortholog pairs"]))
op3 = to_vo(q(RES3["ortholog pairs"]))
pp1 = to_vo(q(RES1["paralog pairs"]))
pp2 = to_vo(q(RES2["paralog pairs"]))
pp3 = to_vo(q(RES3["paralog pairs"]))
gnnm1 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==A1).sum(),)*2),gnnm[gnsp==A1,:][:,gnsp==A2])),
sp.sparse.hstack((gnnm[gnsp==A2,:][:,gnsp==A1],sp.sparse.csr_matrix(((gnsp==A2).sum(),)*2)))
)).tocsr()
gnnm2 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==B1).sum(),)*2),gnnm[gnsp==B1,:][:,gnsp==B2])),
sp.sparse.hstack((gnnm[gnsp==B2,:][:,gnsp==B1],sp.sparse.csr_matrix(((gnsp==B2).sum(),)*2)))
)).tocsr()
gnnm3 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==C1).sum(),)*2),gnnm[gnsp==C1,:][:,gnsp==C2])),
sp.sparse.hstack((gnnm[gnsp==C2,:][:,gnsp==C1],sp.sparse.csr_matrix(((gnsp==C2).sum(),)*2)))
)).tocsr()
gn1 = np.append(gn[gnsp==A1],gn[gnsp==A2])
gn2 = np.append(gn[gnsp==B1],gn[gnsp==B2])
gn3 = np.append(gn[gnsp==C1],gn[gnsp==C2])
# suppress warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
T1 = pd.DataFrame(data=np.arange(gn1.size)[None, :], columns=gn1)
x, y = T1[op1[:, 0]].values.flatten(), T1[op1[:, 1]].values.flatten()
gnnm1[x, y] = gnnm1[x, y]
gnnm1[y, x] = gnnm1[y, x]
T1 = pd.DataFrame(data=np.arange(gn2.size)[None, :], columns=gn2)
x, y = T1[op2[:, 0]].values.flatten(), T1[op2[:, 1]].values.flatten()
gnnm2[x, y] = gnnm2[x, y]
gnnm2[y, x] = gnnm2[y, x]
T1 = pd.DataFrame(data=np.arange(gn3.size)[None, :], columns=gn3)
x, y = T1[op3[:, 0]].values.flatten(), T1[op3[:, 1]].values.flatten()
gnnm3[x, y] = gnnm3[x, y]
gnnm3[y, x] = gnnm3[y, x]
gnnm1.data[gnnm1.data==0]=1e-4
gnnm2.data[gnnm2.data==0]=1e-4
gnnm3.data[gnnm3.data==0]=1e-4
pairs1 = gn1[np.vstack(gnnm1.nonzero()).T]
pairs2 = gn2[np.vstack(gnnm2.nonzero()).T]
pairs3 = gn3[np.vstack(gnnm3.nonzero()).T]
data = np.concatenate((gnnm1.data, gnnm2.data, gnnm3.data))
CORR1 = pd.DataFrame(data=gnnm1.data[None, :], columns=to_vn(pairs1))
CORR2 = pd.DataFrame(data=gnnm2.data[None, :], columns=to_vn(pairs2))
CORR3 = pd.DataFrame(data=gnnm3.data[None, :], columns=to_vn(pairs3))
pairs = np.vstack((pairs1, pairs2, pairs3))
all_genes = np.unique(pairs.flatten())
Z = pd.DataFrame(data=np.arange(all_genes.size)[None, :], columns=all_genes)
x, y = Z[pairs[:, 0]].values.flatten(), Z[pairs[:, 1]].values.flatten()
GNNM = sp.sparse.lil_matrix((all_genes.size,) * 2)
GNNM[x, y] = data
import networkx as nx
G = nx.from_scipy_sparse_matrix(GNNM, create_using=nx.Graph)
all_cliques = nx.enumerate_all_cliques(G)
all_triangles = [x for x in all_cliques if len(x) == 3]
Z = all_genes[np.sort(np.vstack(all_triangles), axis=1)]
DF = pd.DataFrame(data=Z, columns=[x.split("_")[0] for x in Z[0]])
DF = DF[[A, B, C]]
orth1DF = pd.DataFrame(data=orth1, columns=[x.split("_")[0] for x in orth1[0]])[
[A, B]
]
orth2DF = pd.DataFrame(data=orth2, columns=[x.split("_")[0] for x in orth2[0]])[
[A, C]
]
orth3DF = pd.DataFrame(data=orth3, columns=[x.split("_")[0] for x in orth3[0]])[
[B, C]
]
ps1DF = pd.DataFrame(
data=np.sort(pp1, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp1, axis=1)[0]],
)[[A, B]]
ps2DF = pd.DataFrame(
data=np.sort(pp2, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp2, axis=1)[0]],
)[[A, C]]
ps3DF = pd.DataFrame(
data=np.sort(pp3, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp3, axis=1)[0]],
)[[B, C]]
A_AB = pd.DataFrame(data=to_vn(op1)[None, :], columns=to_vn(ps1DF.values))
A_AC = pd.DataFrame(data=to_vn(op2)[None, :], columns=to_vn(ps2DF.values))
A_BC = pd.DataFrame(data=to_vn(op3)[None, :], columns=to_vn(ps3DF.values))
AB = to_vn(DF[[A, B]].values)
AC = to_vn(DF[[A, C]].values)
BC = to_vn(DF[[B, C]].values)
AVs = []
CATs = []
CORRs = []
for i, X, O, P, Z, R in zip(
[0, 1, 2],
[AB, AC, BC],
[orth1DF, orth2DF, orth3DF],
[ps1DF, ps2DF, ps3DF],
[A_AB, A_AC, A_BC],
[CORR1, CORR2, CORR3],
):
cat = q(["homolog"] * X.size).astype("object")
cat[np.in1d(X, to_vn(O.values))] = "ortholog"
ff = np.in1d(X, to_vn(P.values))
cat[ff] = "substitution"
z = Z[X[ff]] #problem line here
x = X[ff]
av = np.zeros(x.size, dtype="object")
for ai in range(x.size):
v=pd.DataFrame(z[x[ai]]) #get ortholog pairs - paralog pairs dataframe
vd=v.values.flatten() #get ortholog pairs
vc=q(';'.join(v.columns).split(';')) # get paralogous genes
temp = np.unique(q(';'.join(vd).split(';'))) #get orthologous genes
av[ai] = ';'.join(temp[np.in1d(temp,vc,invert=True)]) #get orthologous genes not present in paralogous genes
AV = np.zeros(X.size, dtype="object")
AV[ff] = av
corr = R[X].values.flatten()
AVs.append(AV)
CATs.append(cat)
CORRs.append(corr)
tri_pairs = np.vstack((AB, AC, BC)).T
cat_pairs = np.vstack(CATs).T
corr_pairs = np.vstack(CORRs).T
homology_triangles = DF.values
substituted_genes = np.vstack(AVs).T
substituted_genes[substituted_genes == 0] = "N.S."
data = np.hstack(
(
homology_triangles.astype("object"),
substituted_genes.astype("object"),
tri_pairs.astype("object"),
corr_pairs.astype("object"),
cat_pairs.astype("object"),
)
)
FINAL = pd.DataFrame(data = data, columns = [f'{A} gene',f'{B} gene',f'{C} gene',
f'{A}/{B} subbed',f'{A}/{C} subbed',f'{B}/{C} subbed',
f'{A}/{B}',f'{A}/{C}',f'{B}/{C}',
f'{A}/{B} corr',f'{A}/{C} corr',f'{B}/{C} corr',
f'{A}/{B} type',f'{A}/{C} type',f'{B}/{C} type'])
FINAL['#orthologs'] = (cat_pairs=='ortholog').sum(1)
FINAL['#substitutions'] = (cat_pairs=='substitution').sum(1)
FINAL = FINAL[(FINAL['#orthologs']+FINAL['#substitutions'])==3]
x = FINAL[[f'{A}/{B} corr',f'{A}/{C} corr',f'{B}/{C} corr']].min(1)
FINAL['min_corr'] = x
FINAL = FINAL[x>corr_thr]
if keys is not None:
keys = [keys[A],keys[B],keys[C]]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if keys is not None:
for i,sam,n in zip([0,1,2],[sam1,sam2,sam3],[A,B,C]):
if compute_markers:
find_cluster_markers(sam,keys[i])
a = sam.adata.varm[keys[i]+'_scores'].T[q(FINAL[n+' gene'])].T
p = sam.adata.varm[keys[i]+'_pvals'].T[q(FINAL[n+' gene'])].T.values
p[p>pval_thr]=1
p[p<1]=0
p=1-p
f = a.columns[a.values.argmax(1)]
res=[]
for i in range(p.shape[0]):
res.append(';'.join(np.unique(np.append(f[i],a.columns[p[i,:]==1]))))
FINAL[n+' cell type'] = res
FINAL = FINAL.sort_values('min_corr',ascending=False)
FINALS.append(FINAL)
FINAL = pd.concat(FINALS,axis=0)
return FINAL
def transfer_annotations(sm,reference_id=None, keys=[],num_iters=5, inplace = True):
""" Transfer annotations across species using label propagation along the combined manifold.
Parameters
----------
sm - SAMAP object
reference_id - str, optional, default None
The species ID of the reference species from which the annotations will be transferred.
keys - str or list, optional, default []
The `obs` key or list of keys corresponding to the labels to be propagated.
If passed an empty list, all keys in the reference species' `obs` dataframe
will be propagated.
num_iters - int, optional, default 5
The number of steps to run the diffusion propagation.
inplace - bool, optional, default True
If True, deposit propagated labels in the target species (`sm.sams['hu']`) `obs`
DataFrame. Otherwise, just return the soft-membership DataFrame.
Returns
-------
A Pandas DataFrame with soft membership scores for each cluster in each cell.
"""
stitched = sm.samap
NNM = stitched.adata.obsp['connectivities'].copy()
NNM = NNM.multiply(1/NNM.sum(1).A).tocsr()
if type(keys) is str:
keys = [keys]
elif len(keys) == 0:
try:
keys = list(sm.sams[reference_id].adata.obs.keys())
except KeyError:
raise ValueError(f'`reference` must be one of {sm.ids}.')
for key in keys:
samref = sm.sams[reference_id]
ANN = stitched.adata.obs
ANNr = samref.adata.obs
cl = ANN[key].values.astype('object').astype('str')
clr = reference_id+'_'+ANNr[key].values.astype('object')
cl[np.invert(np.in1d(cl,clr))]=''
clu,clui = np.unique(cl,return_inverse=True)
P = np.zeros((NNM.shape[0],clu.size))
Pmask = np.ones((NNM.shape[0],clu.size))
P[np.arange(clui.size),clui]=1.0
Pmask[stitched.adata.obs['species']==reference_id]=0
Pmask=Pmask[:,1:]
P=P[:,1:]
Pinit = P.copy()
for j in range(num_iters):
P_new = NNM.dot(P)
if np.max(np.abs(P_new - P)) < 5e-3:
P = P_new
s=P.sum(1)[:,None]
s[s==0]=1
P = P/s
break
else:
P = P_new
s=P.sum(1)[:,None]
s[s==0]=1
P = P/s
P = P * Pmask + Pinit
uncertainty = 1-P.max(1)
labels = clu[1:][np.argmax(P,axis=1)]
labels[uncertainty==1.0]='NAN'
uncertainty[uncertainty>=uncertainty.max()*0.99] = 1
if inplace:
stitched.adata.obs[key+'_transfer'] = pd.Series(labels,index = stitched.adata.obs_names)
stitched.adata.obs[key+'_uncertainty'] = pd.Series(uncertainty,index=stitched.adata.obs_names)
res = pd.DataFrame(data=P,index=stitched.adata.obs_names,columns=clu[1:])
res['labels'] = labels
return res
def get_mapping_scores(sm, keys, n_top = 0):
"""Calculate mapping scores
Parameters
----------
sm: SAMAP object
keys: dict, annotation vector keys for at least two species with species identifiers as the keys
e.g. {'pl':'tissue','sc':'tissue'}
n_top: int, optional, default 0
If `n_top` is 0, average the alignment scores for all cells in a pair of clusters.
Otherwise, average the alignment scores of the top `n_top` cells in a pair of clusters.
Set this to non-zero if you suspect there to be subpopulations of your cell types mapping
to distinct cell types in the other species.
Returns
-------
D - table of highest mapping scores for cell types
A - pairwise table of mapping scores between cell types across species
"""
if len(list(keys.keys()))<len(list(sm.sams.keys())):
samap = SAM(counts = sm.samap.adata[np.in1d(sm.samap.adata.obs['species'],list(keys.keys()))])
else:
samap=sm.samap
clusters = []
ix = np.unique(samap.adata.obs['species'],return_index=True)[1]
skeys = q(samap.adata.obs['species'])[np.sort(ix)]
for sid in skeys:
clusters.append(q([sid+'_'+str(x) for x in sm.sams[sid].adata.obs[keys[sid]]]))
cl = np.concatenate(clusters)
l = "{}_mapping_scores".format(';'.join([keys[sid] for sid in skeys]))
samap.adata.obs[l] = pd.Categorical(cl)
CSIMth, clu = _compute_csim(samap, l, n_top = n_top, prepend = False)
A = pd.DataFrame(data=CSIMth, index=clu, columns=clu)
i = np.argsort(-A.values.max(0).flatten())
H = []
C = []
for I in range(A.shape[1]):
x = A.iloc[:, i[I]].sort_values(ascending=False)
H.append(np.vstack((x.index, x.values)).T)
C.append(A.columns[i[I]])
C.append(A.columns[i[I]])
H = np.hstack(H)
D = pd.DataFrame(data=H, columns=[C, ["Cluster","Alignment score"]*(H.shape[1]//2)])
return D, A
| 39.065336 | 157 | 0.556392 | import sklearn.utils.sparsefuncs as sf
from . import q, ut, pd, sp, np, warnings, sc
from .utils import to_vo, to_vn, substr, df_to_dict, sparse_knn, prepend_var_prefix
from samalg import SAM
from scipy.stats import rankdata
def _log_factorial(n):
return np.log(np.arange(1,n+1)).sum()
def _log_binomial(n,k):
return _log_factorial(n) - (_log_factorial(k) + _log_factorial(n-k))
def GOEA(target_genes,GENE_SETS,df_key='GO',goterms=None,fdr_thresh=0.25,p_thresh=1e-3):
"""Performs GO term Enrichment Analysis using the hypergeometric distribution.
Parameters
----------
target_genes - array-like
List of target genes from which to find enriched GO terms.
GENE_SETS - dictionary or pandas.DataFrame
Dictionary where the keys are GO terms and the values are lists of genes associated with each GO term.
Ex: {'GO:0000001': ['GENE_A','GENE_B'],
'GO:0000002': ['GENE_A','GENE_C','GENE_D']}
Make sure to include all available genes that have GO terms in your dataset.
---OR---
Pandas DataFrame with genes as the index and GO terms values.
Ex: 'GENE_A','GO:0000001',
'GENE_A','GO:0000002',
'GENE_B','GO:0000001',
'GENE_B','GO:0000004',
...
If `GENE_SETS` is a pandas DataFrame, the `df_key` parameter should be the name of the column in which
the GO terms are stored.
df_key - str, optional, default 'GO'
The name of the column in which GO terms are stored. Only used if `GENE_SETS` is a DataFrame.
goterms - array-list, optional, default None
If provided, only these GO terms will be tested.
fdr_thresh - float, optional, default 0.25
Filter out GO terms with FDR q value greater than this threshold.
p_thresh - float, optional, default 1e-3
Filter out GO terms with p value greater than this threshold.
Returns:
-------
enriched_goterms - pandas.DataFrame
A Pandas DataFrame of enriched GO terms with FDR q values, p values, and associated genes provided.
"""
# identify all genes found in `GENE_SETS`
if isinstance(GENE_SETS,pd.DataFrame):
print('Converting DataFrame into dictionary')
genes = np.array(list(GENE_SETS.index))
agt = np.array(list(GENE_SETS[df_key].values))
idx = np.argsort(agt)
genes = genes[idx]
agt = agt[idx]
bounds = np.where(agt[:-1]!=agt[1:])[0]+1
bounds = np.append(np.append(0,bounds),agt.size)
bounds_left=bounds[:-1]
bounds_right=bounds[1:]
genes_lists = [genes[bounds_left[i]:bounds_right[i]] for i in range(bounds_left.size)]
GENE_SETS = dict(zip(np.unique(agt),genes_lists))
all_genes = np.unique(np.concatenate(list(GENE_SETS.values())))
all_genes = np.array(all_genes)
# if goterms is None, use all the goterms found in `GENE_SETS`
if goterms is None:
goterms = np.unique(list(GENE_SETS.keys()))
else:
goterms = goterms[np.in1d(goterms,np.unique(list(GENE_SETS.keys())))]
# ensure that target genes are all present in `all_genes`
_,ix = np.unique(target_genes,return_index=True)
target_genes=target_genes[np.sort(ix)]
target_genes = target_genes[np.in1d(target_genes,all_genes)]
# N -- total number of genes
N = all_genes.size
probs=[]
probs_genes=[]
counter=0
# for each go term,
for goterm in goterms:
if counter%1000==0:
pass; #print(counter)
counter+=1
# identify genes associated with this go term
gene_set = np.array(GENE_SETS[goterm])
# B -- number of genes associated with this go term
B = gene_set.size
# b -- number of genes in target associated with this go term
gene_set_in_target = gene_set[np.in1d(gene_set,target_genes)]
b = gene_set_in_target.size
if b != 0:
# calculate the enrichment probability as the cumulative sum of the tail end of a hypergeometric distribution
# with parameters (N,B,n,b)
n = target_genes.size
num_iter = min(n,B)
rng = np.arange(b,num_iter+1)
probs.append(sum([np.exp(_log_binomial(n,i)+_log_binomial(N-n,B-i) - _log_binomial(N,B)) for i in rng]))
else:
probs.append(1.0)
#append associated genes to a list
probs_genes.append(gene_set_in_target)
probs = np.array(probs)
probs_genes = np.array([';'.join(x) for x in probs_genes])
# adjust p value to correct for multiple testing
fdr_q_probs = probs.size*probs / rankdata(probs,method='ordinal')
# filter out go terms based on the FDR q value and p value thresholds
filt = np.logical_and(fdr_q_probs<fdr_thresh,probs<p_thresh)
enriched_goterms = goterms[filt]
p_values = probs[filt]
fdr_q_probs = fdr_q_probs[filt]
probs_genes=probs_genes[filt]
# construct the Pandas DataFrame
gns = probs_genes
enriched_goterms = pd.DataFrame(data=fdr_q_probs,index=enriched_goterms,columns=['fdr_q_value'])
enriched_goterms['p_value'] = p_values
enriched_goterms['genes'] = gns
# sort in ascending order by the p value
enriched_goterms = enriched_goterms.sort_values('p_value')
return enriched_goterms
_KOG_TABLE = dict(A = "RNA processing and modification",
B = "Chromatin structure and dynamics",
C = "Energy production and conversion",
D = "Cell cycle control, cell division, chromosome partitioning",
E = "Amino acid transport and metabolism",
F = "Nucleotide transport and metabolism",
G = "Carbohydrate transport and metabolism",
H = "Coenzyme transport and metabolism",
I = "Lipid transport and metabolism",
J = "Translation, ribosomal structure and biogenesis",
K = "Transcription",
L = "Replication, recombination, and repair",
M = "Cell wall membrane/envelope biogenesis",
N = "Cell motility",
O = "Post-translational modification, protein turnover, chaperones",
P = "Inorganic ion transport and metabolism",
Q = "Secondary metabolites biosynthesis, transport and catabolism",
R = "General function prediction only",
S = "Function unknown",
T = "Signal transduction mechanisms",
U = "Intracellular trafficking, secretion, and vesicular transport",
V = "Defense mechanisms",
W = "Extracellular structures",
Y = "Nuclear structure",
Z = "Cytoskeleton")
import gc
from collections.abc import Iterable
class FunctionalEnrichment(object):
def __init__(self,sm, DFS, col_key, keys, delimiter = '', align_thr = 0.1, limit_reference = False, n_top = 0):
"""Performs functional enrichment analysis on gene pairs enriched
in mapped cell types using functional annotations output by Eggnog.
Parameters
----------
sm - SAMAP object.
DFS - dictionary of pandas.DataFrame functional annotations keyed by species present in the input `SAMAP` object.
col_key - str
The column name with functional annotations in the annotation DataFrames.
keys - dictionary of column keys from `.adata.obs` DataFrames keyed by species present in the input `SAMAP` object.
Cell type mappings will be computed between these annotation vectors.
delimiter - str, optional, default ''
Some transcripts may have multiple functional annotations (e.g. GO terms or KOG terms) separated by
a delimiter. For KOG terms, this is typically no delimiter (''). For GO terms, this is usually a comma
(',').
align_thr - float, optional, default 0.1
The alignment score below which to filter out cell type mappings
limit_reference - bool, optional, default False
If True, limits the background set of genes to include only those that are enriched in any cell type mappings
If False, the background set of genes will include all genes present in the input dataframes.
n_top: int, optional, default 0
If `n_top` is 0, average the alignment scores for all cells in a pair of clusters.
Otherwise, average the alignment scores of the top `n_top` cells in a pair of clusters.
Set this to non-zero if you suspect there to be subpopulations of your cell types mapping
to distinct cell types in the other species.
"""
# get dictionary of sam objects
SAMS=sm.sams
# link up SAM memories.
for sid in sm.ids:
sm.sams[sid] = SAMS[sid]
gc.collect()
for k in DFS.keys():
DFS[k].index = k+'_'+DFS[k].index
# concatenate DFS
A = pd.concat(list(DFS.values()),axis=0)
RES = pd.DataFrame(A[col_key])
RES.columns=['GO']
RES = RES[(q(RES.values.flatten())!='nan')]
# EXPAND RES
data = []
index = []
for i in range(RES.shape[0]):
if delimiter == '':
l = list(RES.values[i][0])
l = np.array([str(x) if str(x).isalpha() else '' for x in l])
l = l[l!= '']
l = list(l)
else:
l = RES.values[i][0].split(delimiter)
data.extend(l)
index.extend([RES.index[i]]*len(l))
RES = pd.DataFrame(index = index,data = data,columns = ['GO'])
genes = np.array(list(RES.index))
agt = np.array(list(RES['GO'].values))
idx = np.argsort(agt)
genes = genes[idx]
agt = agt[idx]
bounds = np.where(agt[:-1]!=agt[1:])[0]+1
bounds = np.append(np.append(0,bounds),agt.size)
bounds_left=bounds[:-1]
bounds_right=bounds[1:]
genes_lists = [genes[bounds_left[i]:bounds_right[i]] for i in range(bounds_left.size)]
GENE_SETS = dict(zip(np.unique(agt),genes_lists))
for cc in GENE_SETS.keys():
GENE_SETS[cc]=np.unique(GENE_SETS[cc])
G = []
print(f'Finding enriched gene pairs...')
gpf = GenePairFinder(sm,keys=keys)
gene_pairs = gpf.find_all(thr=align_thr,n_top=n_top)
self.DICT = {}
for c in gene_pairs.columns:
x = q(gene_pairs[c].values.flatten()).astype('str')
ff = x!='nan'
if ff.sum()>0:
self.DICT[c] = x[ff]
if limit_reference:
all_genes = np.unique(np.concatenate(substr(np.concatenate(list(self.DICT.values())),';')))
else:
all_genes = np.unique(np.array(list(A.index)))
for d in GENE_SETS.keys():
GENE_SETS[d] = GENE_SETS[d][np.in1d(GENE_SETS[d],all_genes)]
self.gene_pairs = gene_pairs
self.CAT_NAMES = np.unique(q(RES['GO']))
self.GENE_SETS = GENE_SETS
self.RES = RES
def calculate_enrichment(self,verbose=False):
""" Calculates the functional enrichment.
Parameters
----------
verbose - bool, optional, default False
If False, function does not log progress to output console.
Returns
-------
ENRICHMENT_SCORES - pandas.DataFrame (cell types x function categories)
Enrichment scores (-log10 p-value) for each function in each cell type.
NUM_ENRICHED_GENES - pandas.DataFrame (cell types x function categories)
Number of enriched genes for each function in each cell type.
ENRICHED_GENES - pandas.DataFrame (cell types x function categories)
The IDs of enriched genes for each function in each cell type.
"""
DICT = self.DICT
RES = self.RES
CAT_NAMES = self.CAT_NAMES
GENE_SETS = self.GENE_SETS
pairs = np.array(list(DICT.keys()))
all_nodes = np.unique(np.concatenate(substr(pairs,';')))
CCG={}
P=[]
for ik in range(len(all_nodes)):
genes=[]
nodes = all_nodes[ik]
for j in range(len(pairs)):
n1,n2 = pairs[j].split(';')
if n1 == nodes or n2 == nodes:
g1,g2 = substr(DICT[pairs[j]],';')
genes.append(np.append(g1,g2))
if len(genes) > 0:
genes = np.concatenate(genes)
genes = np.unique(genes)
else:
genes = np.array([])
CCG[all_nodes[ik]] = genes
HM = np.zeros((len(CAT_NAMES),len(all_nodes)))
HMe = np.zeros((len(CAT_NAMES),len(all_nodes)))
HMg = np.zeros((len(CAT_NAMES),len(all_nodes)),dtype='object')
for ii,cln in enumerate(all_nodes):
if verbose:
print(f'Calculating functional enrichment for cell type {cln}')
g = CCG[cln]
if g.size > 0:
gi = g[np.in1d(g,q(RES.index))]
ix = np.where(np.in1d(q(RES.index),gi))[0]
res = RES.iloc[ix]
goterms = np.unique(q(res['GO']))
goterms = goterms[goterms!='S']
result = GOEA(gi,GENE_SETS,goterms=goterms,fdr_thresh=100,p_thresh=100)
lens = np.array([len(np.unique(x.split(';'))) for x in result['genes'].values])
F = -np.log10(result['p_value'])
gt,vals = F.index,F.values
Z = pd.DataFrame(data=np.arange(CAT_NAMES.size)[None,:],columns=CAT_NAMES)
if gt.size>0:
HM[Z[gt].values.flatten(),ii] = vals
HMe[Z[gt].values.flatten(),ii] = lens
HMg[Z[gt].values.flatten(),ii] = [';'.join(np.unique(x.split(';'))) for x in result['genes'].values]
#CAT_NAMES = [_KOG_TABLE[x] for x in CAT_NAMES]
SC = pd.DataFrame(data = HM,index=CAT_NAMES,columns=all_nodes).T
SCe = pd.DataFrame(data = HMe,index=CAT_NAMES,columns=all_nodes).T
SCg = pd.DataFrame(data = HMg,index=CAT_NAMES,columns=all_nodes).T
SCg.values[SCg.values==0]=''
self.ENRICHMENT_SCORES = SC
self.NUM_ENRICHED_GENES = SCe
self.ENRICHED_GENES = SCg
return self.ENRICHMENT_SCORES,self.NUM_ENRICHED_GENES,self.ENRICHED_GENES
def plot_enrichment(self,cell_types = [], pval_thr=2.0,msize = 50):
"""Create a plot summarizing the functional enrichment analysis.
Parameters
----------
cell_types - list, default []
A list of cell types for which enrichment scores will be plotted. If empty (default),
all cell types will be plotted.
pval_thr - float, default 2.0
-log10 p-values < 2.0 will be filtered from the plot.
msize - float, default 50
The marker size in pixels for the dot plot.
Returns
-------
fig - matplotlib.pyplot.Figure
ax - matplotlib.pyplot.Axes
"""
import colorsys
import seaborn as sns
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from matplotlib import cm,colors
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import linkage, dendrogram
SC = self.ENRICHMENT_SCORES
SCe = self.NUM_ENRICHED_GENES
SCg = self.ENRICHED_GENES
if len(cell_types) > 0:
SC = SC.T[cell_types].T
SCe = SCe.T[cell_types].T
SCg = SCg.T[cell_types].T
CAT_NAMES = self.CAT_NAMES
gc_names = np.array(CAT_NAMES)
SC.values[SC.values<pval_thr]=0
SCe.values[SC.values<pval_thr]=0
SCg.values[SC.values<pval_thr]=''
SCg=SCg.astype('str')
SCg.values[SCg.values=='nan']=''
ixrow = np.array(dendrogram(linkage(SC.values.T,method='ward',metric='euclidean'),no_plot=True)['ivl']).astype('int')
ixcol = np.array(dendrogram(linkage(SC.values,method='ward',metric='euclidean'),no_plot=True)['ivl']).astype('int')
SC = SC.iloc[ixcol].iloc[:,ixrow]
SCe = SCe.iloc[ixcol].iloc[:,ixrow]
SCg = SCg.iloc[ixcol].iloc[:,ixrow]
SCgx = SCg.values.copy()
for i in range(SCgx.shape[0]):
idn = SCg.index[i].split('_')[0]
for j in range(SCgx.shape[1]):
genes = np.array(SCgx[i,j].split(';'))
SCgx[i,j] = ';'.join(genes[np.array([x.split('_')[0] for x in genes]) == idn])
x,y=np.tile(np.arange(SC.shape[0]),SC.shape[1]),np.repeat(np.arange(SC.shape[1]),SC.shape[0])
co = SC.values[x,y].flatten()#**0.5
ms = SCe.values[x,y].flatten()
ms=ms/ms.max()
x=x.max()-x #
ms = ms*msize
ms[np.logical_and(ms<0.15,ms>0)]=0.15
fig,ax = plt.subplots();
fig.set_size_inches((7*SC.shape[0]/SC.shape[1],7))
scat=ax.scatter(x,y,c=co,s=ms,cmap='seismic',edgecolor='k',linewidth=0.5,vmin=3)
cax = fig.colorbar(scat,pad=0.02);
ax.set_yticks(np.arange(SC.shape[1]))
ax.set_yticklabels(SC.columns,ha='right',rotation=0)
ax.set_xticks(np.arange(SC.shape[0]))
ax.set_xticklabels(SC.index[::-1],ha='right',rotation=45)
ax.invert_yaxis()
ax.invert_xaxis()
#ax.figure.tight_layout()
return fig,ax
def sankey_plot(M,species_order=None,align_thr=0.1,**params):
"""Generate a sankey plot
Parameters
----------
M: pandas.DataFrame
Mapping table output from `get_mapping_scores` (second output).
align_thr: float, optional, default 0.1
The alignment score threshold below which to remove cell type mappings.
species_order: list, optional, default None
Specify the order of species (left-to-right) in the sankey plot.
For example, `species_order=['hu','le','ms']`.
Keyword arguments
-----------------
Keyword arguments will be passed to `sankey.opts`.
"""
if species_order is not None:
ids = np.array(species_order)
else:
ids = np.unique([x.split('_')[0] for x in M.index])
if len(ids)>2:
d = M.values.copy()
d[d<align_thr]=0
x,y = d.nonzero()
x,y = np.unique(np.sort(np.vstack((x,y)).T,axis=1),axis=0).T
values = d[x,y]
nodes = q(M.index)
node_pairs = nodes[np.vstack((x,y)).T]
sn1 = q([xi.split('_')[0] for xi in node_pairs[:,0]])
sn2 = q([xi.split('_')[0] for xi in node_pairs[:,1]])
filt = np.logical_or(
np.logical_or(np.logical_and(sn1==ids[0],sn2==ids[1]),np.logical_and(sn1==ids[1],sn2==ids[0])),
np.logical_or(np.logical_and(sn1==ids[1],sn2==ids[2]),np.logical_and(sn1==ids[2],sn2==ids[1]))
)
x,y,values=x[filt],y[filt],values[filt]
d=dict(zip(ids,list(np.arange(len(ids)))))
depth_map = dict(zip(nodes,[d[xi.split('_')[0]] for xi in nodes]))
data = nodes[np.vstack((x,y))].T
for i in range(data.shape[0]):
if d[data[i,0].split('_')[0]] > d[data[i,1].split('_')[0]]:
data[i,:]=data[i,::-1]
R = pd.DataFrame(data = data,columns=['source','target'])
R['Value'] = values
else:
d = M.values.copy()
d[d<align_thr]=0
x,y = d.nonzero()
x,y = np.unique(np.sort(np.vstack((x,y)).T,axis=1),axis=0).T
values = d[x,y]
nodes = q(M.index)
R = pd.DataFrame(data = nodes[np.vstack((x,y))].T,columns=['source','target'])
R['Value'] = values
depth_map=None
try:
from holoviews import dim
#from bokeh.models import Label
import holoviews as hv
hv.extension('bokeh',logo=False)
hv.output(size=100)
except:
raise ImportError('Please install holoviews-samap with `!pip install holoviews-samap`.')
def f(plot,element):
plot.handles['plot'].sizing_mode='scale_width'
plot.handles['plot'].x_range.start = -600
#plot.handles['plot'].add_layout(Label(x=plot.handles['plot'].x_range.end*0.78, y=plot.handles['plot'].y_range.end*0.96, text=id2))
plot.handles['plot'].x_range.end = 1500
#plot.handles['plot'].add_layout(Label(x=0, y=plot.handles['plot'].y_range.end*0.96, text=id1))
sankey1 = hv.Sankey(R, kdims=["source", "target"])#, vdims=["Value"])
cmap = params.get('cmap','Colorblind')
label_position = params.get('label_position','outer')
edge_line_width = params.get('edge_line_width',0)
show_values = params.get('show_values',False)
node_padding = params.get('node_padding',4)
node_alpha = params.get('node_alpha',1.0)
node_width = params.get('node_width',40)
node_sort = params.get('node_sort',True)
frame_height = params.get('frame_height',1000)
frame_width = params.get('frame_width',800)
bgcolor = params.get('bgcolor','snow')
apply_ranges = params.get('apply_ranges',True)
sankey1.opts(cmap=cmap,label_position=label_position, edge_line_width=edge_line_width, show_values=show_values,
node_padding=node_padding,depth_map=depth_map, node_alpha=node_alpha, node_width=node_width,
node_sort=node_sort,frame_height=frame_height,frame_width=frame_width,bgcolor=bgcolor,
apply_ranges=apply_ranges,hooks=[f])
return sankey1
def chord_plot(A,align_thr=0.1):
"""Generate a chord plot
Parameters
----------
A: pandas.DataFrame
Mapping table output from `get_mapping_scores` (second output).
align_thr: float, optional, default 0.1
The alignment score threshold below which to remove cell type mappings.
"""
try:
from holoviews import dim, opts
import holoviews as hv
hv.extension('bokeh',logo=False)
hv.output(size=300)
except:
raise ImportError('Please install holoviews-samap with `!pip install holoviews-samap`.')
xx=A.values.copy()
xx[xx<align_thr]=0
x,y = xx.nonzero()
z=xx[x,y]
x,y = A.index[x],A.columns[y]
links=pd.DataFrame(data=np.array([x,y,z]).T,columns=['source','target','value'])
links['edge_grp'] = [x.split('_')[0]+y.split('_')[0] for x,y in zip(links['source'],links['target'])]
links['value']*=100
f = links['value'].values
z=((f-f.min())/(f.max()-f.min())*0.99+0.01)*100
links['value']=z
links['value']=np.round([x for x in links['value'].values]).astype('int')
clu=np.unique(A.index)
clu = clu[np.in1d(clu,np.unique(np.array([x,y])))]
links = hv.Dataset(links)
nodes = hv.Dataset(pd.DataFrame(data=np.array([clu,clu,np.array([x.split('_')[0] for x in clu])]).T,columns=['index','name','group']),'index')
chord = hv.Chord((links, nodes),kdims=["source", "target"], vdims=["value","edge_grp"])#.select(value=(5, None))
chord.opts(
opts.Chord(cmap='Category20', edge_cmap='Category20',edge_color=dim('edge_grp'),
labels='name', node_color=dim('group').str()))
return chord
class GenePairFinder(object):
def __init__(self, sm, keys=None):
"""Find enriched gene pairs in cell type mappings.
sm: SAMAP object
keys: dict of str, optional, default None
Keys corresponding to the annotations vectors in the AnnData's keyed by species ID.
By default, will use the leiden clusters, e.g. {'hu':'leiden_clusters','ms':'leiden_clusters'}.
"""
if keys is None:
keys={}
for sid in sm.sams.keys():
keys[sid] = 'leiden_clusters'
self.sm = sm
self.sams = sm.sams
self.s3 = sm.samap
self.gns = q(sm.samap.adata.var_names)
self.gnnm = sm.samap.adata.varp['homology_graph_reweighted']
self.gns_dict = sm.gns_dict
self.ids = sm.ids
mus={}
stds={}
for sid in self.sams.keys():
self.sams[sid].adata.obs[keys[sid]] = self.sams[sid].adata.obs[keys[sid]].astype('str')
mu, var = sf.mean_variance_axis(self.sams[sid].adata[:, self.gns_dict[sid]].X, axis=0)
var[var == 0] = 1
var = var ** 0.5
mus[sid]=pd.Series(data=mu,index=self.gns_dict[sid])
stds[sid]=pd.Series(data=var,index=self.gns_dict[sid])
self.mus = mus
self.stds = stds
self.keys = keys
self.find_markers()
def find_markers(self):
for sid in self.sams.keys():
print(
"Finding cluster-specific markers in {}:{}.".format(
sid, self.keys[sid]
)
)
import gc
if self.keys[sid]+'_scores' not in self.sams[sid].adata.varm.keys():
find_cluster_markers(self.sams[sid], self.keys[sid])
gc.collect()
def find_all(self,n=None,align_thr=0.1,n_top=0,**kwargs):
"""Find enriched gene pairs in all pairs of mapped cell types.
Parameters
----------
n: str, optional, default None
If passed, find enriched gene pairs of all cell types connected to `n`.
thr: float, optional, default 0.2
Alignment score threshold above which to consider cell type pairs mapped.
n_top: int, optional, default 0
If `n_top` is 0, average the alignment scores for all cells in a pair of clusters.
Otherwise, average the alignment scores of the top `n_top` cells in a pair of clusters.
Set this to non-zero if you suspect there to be subpopulations of your cell types mapping
to distinct cell types in the other species.
Keyword arguments
-----------------
Keyword arguments to `find_genes` accepted here.
Returns
-------
Table of enriched gene pairs for each cell type pair
"""
_,M = get_mapping_scores(self.sm, self.keys, n_top = n_top)
ax = q(M.index)
data = M.values.copy()
data[data<align_thr]=0
x,y = data.nonzero()
ct1,ct2 = ax[x],ax[y]
if n is not None:
f1 = ct1==n
f2 = ct2==n
f = np.logical_or(f1,f2)
else:
f = np.array([True]*ct2.size)
ct1=ct1[f]
ct2=ct2[f]
ct1,ct2 = np.unique(np.sort(np.vstack((ct1,ct2)).T,axis=1),axis=0).T
res={}
for i in range(ct1.size):
a = '_'.join(ct1[i].split('_')[1:])
b = '_'.join(ct2[i].split('_')[1:])
print('Calculating gene pairs for the mapping: {};{} to {};{}'.format(ct1[i].split('_')[0],a,ct2[i].split('_')[0],b))
res['{};{}'.format(ct1[i],ct2[i])] = self.find_genes(ct1[i],ct2[i],**kwargs)
res = pd.DataFrame([res[k][0] for k in res.keys()],index=res.keys()).fillna(np.nan).T
return res
def find_genes(
self,
n1,
n2,
w1t=0.2,
w2t=0.2,
n_genes=1000,
thr=1e-2,
):
"""Find enriched gene pairs in a particular pair of cell types.
n1: str, cell type ID from species 1
n2: str, cell type ID from species 2
w1t & w2t: float, optional, default 0.2
SAM weight threshold for species 1 and 2. Genes with below this threshold will not be
included in any enriched gene pairs.
n_genes: int, optional, default 1000
Takes the top 1000 ranked gene pairs before filtering based on differential expressivity and
SAM weights.
thr: float, optional, default 0.01
Excludes genes with greater than 0.01 differential expression p-value.
Returns
-------
G - Enriched gene pairs
G1 - Genes from species 1 involved in enriched gene pairs
G2 - Genes from species 2 involved in enriched gene pairs
"""
n1 = str(n1)
n2 = str(n2)
id1,id2 = n1.split('_')[0],n2.split('_')[0]
sam1,sam2=self.sams[id1],self.sams[id2]
n1,n2 = '_'.join(n1.split('_')[1:]),'_'.join(n2.split('_')[1:])
assert n1 in q(self.sams[id1].adata.obs[self.keys[id1]])
assert n2 in q(self.sams[id2].adata.obs[self.keys[id2]])
m,gpairs = self._find_link_genes_avg(n1, n2, id1,id2, w1t=w1t, w2t=w2t, expr_thr=0.05)
self.gene_pair_scores = pd.Series(index=gpairs, data=m)
G = q(gpairs[np.argsort(-m)[:n_genes]])
G1 = substr(G, ";", 0)
G2 = substr(G, ";", 1)
G = q(
G[
np.logical_and(
q(sam1.adata.varm[self.keys[id1] + "_pvals"][n1][G1] < thr),
q(sam2.adata.varm[self.keys[id2] + "_pvals"][n2][G2] < thr),
)
]
)
G1 = substr(G, ";", 0)
G2 = substr(G, ";", 1)
_, ix1 = np.unique(G1, return_index=True)
_, ix2 = np.unique(G2, return_index=True)
G1 = G1[np.sort(ix1)]
G2 = G2[np.sort(ix2)]
return G, G1, G2
def _find_link_genes_avg(self, c1, c2, id1, id2, w1t=0.35, w2t=0.35, expr_thr=0.05):
mus = self.mus
stds = self.stds
sams=self.sams
keys=self.keys
sam3=self.s3
gnnm = self.gnnm
gns = self.gns
xs = []
for sid in [id1,id2]:
xs.append(sams[sid].get_labels(keys[sid]).astype('str').astype('object'))
x1,x2 = xs
g1, g2 = gns[np.vstack(gnnm.nonzero())]
gs1,gs2 = q([x.split('_')[0] for x in g1]),q([x.split('_')[0] for x in g2])
filt = np.logical_and(gs1==id1,gs2==id2)
g1=g1[filt]
g2=g2[filt]
sam1,sam2 = sams[id1],sams[id2]
mu1,std1,mu2,std2 = mus[id1][g1].values,stds[id1][g1].values,mus[id2][g2].values,stds[id2][g2].values
X1 = _sparse_sub_standardize(sam1.adata[:, g1].X[x1 == c1, :], mu1, std1)
X2 = _sparse_sub_standardize(sam2.adata[:, g2].X[x2 == c2, :], mu2, std2)
a, b = sam3.adata.obsp["connectivities"][sam3.adata.obs['species']==id1,:][:,sam3.adata.obs['species']==id2][
x1 == c1, :][:, x2 == c2].nonzero()
c, d = sam3.adata.obsp["connectivities"][sam3.adata.obs['species']==id2,:][:,sam3.adata.obs['species']==id1][
x2 == c2, :][:, x1 == c1].nonzero()
pairs = np.unique(np.vstack((np.vstack((a, b)).T, np.vstack((d, c)).T)), axis=0)
av1 = X1[np.unique(pairs[:, 0]), :].mean(0).A.flatten()
av2 = X2[np.unique(pairs[:, 1]), :].mean(0).A.flatten()
sav1 = (av1 - av1.mean()) / av1.std()
sav2 = (av2 - av2.mean()) / av2.std()
sav1[sav1 < 0] = 0
sav2[sav2 < 0] = 0
val = sav1 * sav2 / sav1.size
X1.data[:] = 1
X2.data[:] = 1
min_expr = (X1.mean(0).A.flatten() > expr_thr) * (
X2.mean(0).A.flatten() > expr_thr
)
w1 = sam1.adata.var["weights"][g1].values.copy()
w2 = sam2.adata.var["weights"][g2].values.copy()
w1[w1 < 0.2] = 0
w2[w2 < 0.2] = 0
w1[w1 > 0] = 1
w2[w2 > 0] = 1
return val * w1 * w2 * min_expr, to_vn(np.array([g1,g2]).T)
def find_cluster_markers(sam, key, inplace=True):
""" Finds differentially expressed genes for provided cell type labels.
Parameters
----------
sam - SAM object
key - str
Column in `sam.adata.obs` for which to identifying differentially expressed genes.
inplace - bool, optional, default True
If True, deposits enrichment scores in `sam.adata.varm[f'{key}_scores']`
and p-values in `sam.adata.varm[f'{key}_pvals']`.
Otherwise, returns three pandas.DataFrame objects (genes x clusters).
NAMES - the gene names
PVALS - the p-values
SCORES - the enrichment scores
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a,c = np.unique(q(sam.adata.obs[key]),return_counts=True)
t = a[c==1]
adata = sam.adata[np.in1d(q(sam.adata.obs[key]),a[c==1],invert=True)].copy()
sc.tl.rank_genes_groups(
adata,
key,
method="wilcoxon",
n_genes=sam.adata.shape[1],
use_raw=False,
layer=None,
)
sam.adata.uns['rank_genes_groups'] = adata.uns['rank_genes_groups']
NAMES = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["names"])
PVALS = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["pvals"])
SCORES = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["scores"])
if not inplace:
return NAMES, PVALS, SCORES
dfs1 = []
dfs2 = []
for i in range(SCORES.shape[1]):
names = NAMES.iloc[:, i]
scores = SCORES.iloc[:, i]
pvals = PVALS.iloc[:, i]
pvals[scores < 0] = 1.0
scores[scores < 0] = 0
pvals = q(pvals)
scores = q(scores)
dfs1.append(pd.DataFrame(
data=scores[None, :], index = [SCORES.columns[i]], columns=names
)[sam.adata.var_names].T)
dfs2.append(pd.DataFrame(
data=pvals[None, :], index = [SCORES.columns[i]], columns=names
)[sam.adata.var_names].T)
df1 = pd.concat(dfs1,axis=1)
df2 = pd.concat(dfs2,axis=1)
try:
sam.adata.varm[key+'_scores'] = df1
sam.adata.varm[key+'_pvals'] = df2
except:
sam.adata.varm.dim_names = sam.adata.var_names
sam.adata.varm.dim_names = sam.adata.var_names
sam.adata.varm[key+'_scores'] = df1
sam.adata.varm[key+'_pvals'] = df2
for i in range(t.size):
sam.adata.varm[key+'_scores'][t[i]]=0
sam.adata.varm[key+'_pvals'][t[i]]=1
def ParalogSubstitutions(sm, ortholog_pairs, paralog_pairs=None, psub_thr = 0.3):
"""Identify paralog substitutions.
For all genes in `ortholog_pairs` and `paralog_pairs`, this function expects the genes to
be prepended with their corresponding species IDs.
Parameters
----------
sm - SAMAP object
ortholog_pairs - n x 2 numpy array of ortholog pairs
paralog_pairs - n x 2 numpy array of paralog pairs, optional, default None
If None, assumes every pair in the homology graph that is not an ortholog is a paralog.
Note that this would essentially result in the more generic 'homolog substitutions' rather
than paralog substitutions.
The paralogs can be either cross-species, within-species, or a mix of both.
psub_thr - float, optional, default 0.3
Threshold for correlation difference between paralog pairs and ortholog pairs.
Paralog pairs that do not have greater than `psub_thr` correlation than their
corresponding ortholog pairs are filtered out.
Returns
-------
RES - pandas.DataFrame
A table of paralog substitutions.
"""
if paralog_pairs is not None:
ids1 = np.array([x.split('_')[0] for x in paralog_pairs[:,0]])
ids2 = np.array([x.split('_')[0] for x in paralog_pairs[:,1]])
ix = np.where(ids1==ids2)[0]
ixnot = np.where(ids1!=ids2)[0]
if ix.size > 0:
pps = paralog_pairs[ix]
ZZ1 = {}
ZZ2 = {}
for i in range(pps.shape[0]):
L = ZZ1.get(pps[i,0],[])
L.append(pps[i,1])
ZZ1[pps[i,0]]=L
L = ZZ2.get(pps[i,1],[])
L.append(pps[i,0])
ZZ2[pps[i,1]]=L
keys = list(ZZ1.keys())
for k in keys:
L = ZZ2.get(k,[])
L.extend(ZZ1[k])
ZZ2[k] = list(np.unique(L))
ZZ = ZZ2
L1=[]
L2=[]
for i in range(ortholog_pairs.shape[0]):
try:
x = ZZ[ortholog_pairs[i,0]]
except:
x = []
L1.extend([ortholog_pairs[i,1]]*len(x))
L2.extend(x)
try:
x = ZZ[ortholog_pairs[i,1]]
except:
x = []
L1.extend([ortholog_pairs[i,0]]*len(x))
L2.extend(x)
L = np.vstack((L2,L1)).T
pps = np.unique(np.sort(L,axis=1),axis=0)
paralog_pairs = np.unique(np.sort(np.vstack((pps,paralog_pairs[ixnot])),axis=1),axis=0)
smp = sm.samap
gnnm = smp.adata.varp["homology_graph_reweighted"]
gn = q(smp.adata.var_names)
ortholog_pairs = np.sort(ortholog_pairs,axis=1)
ortholog_pairs = ortholog_pairs[np.logical_and(np.in1d(ortholog_pairs[:,0],gn),np.in1d(ortholog_pairs[:,1],gn))]
if paralog_pairs is None:
paralog_pairs = gn[np.vstack(smp.adata.varp["homology_graph"].nonzero()).T]
else:
paralog_pairs = paralog_pairs[np.logical_and(np.in1d(paralog_pairs[:,0],gn),np.in1d(paralog_pairs[:,1],gn))]
paralog_pairs = np.sort(paralog_pairs,axis=1)
paralog_pairs = paralog_pairs[
np.in1d(to_vn(paralog_pairs), np.append(to_vn(ortholog_pairs),to_vn(ortholog_pairs[:,::-1])), invert=True)
]
A = pd.DataFrame(data=np.arange(gn.size)[None, :], columns=gn)
xp, yp = (
A[paralog_pairs[:, 0]].values.flatten(),
A[paralog_pairs[:, 1]].values.flatten(),
)
xp, yp = np.unique(
np.vstack((np.vstack((xp, yp)).T, np.vstack((yp, xp)).T)), axis=0
).T
xo, yo = (
A[ortholog_pairs[:, 0]].values.flatten(),
A[ortholog_pairs[:, 1]].values.flatten(),
)
xo, yo = np.unique(
np.vstack((np.vstack((xo, yo)).T, np.vstack((yo, xo)).T)), axis=0
).T
A = pd.DataFrame(data=np.vstack((xp, yp)).T, columns=["x", "y"])
pairdict = df_to_dict(A, key_key="x", val_key="y")
Xp = []
Yp = []
Xo = []
Yo = []
for i in range(xo.size):
try:
y = pairdict[xo[i]]
except KeyError:
y = np.array([])
Yp.extend(y)
Xp.extend([xo[i]] * y.size)
Xo.extend([xo[i]] * y.size)
Yo.extend([yo[i]] * y.size)
orths = to_vn(gn[np.vstack((np.array(Xo), np.array(Yo))).T])
paras = to_vn(gn[np.vstack((np.array(Xp), np.array(Yp))).T])
orth_corrs = gnnm[Xo, Yo].A.flatten()
par_corrs = gnnm[Xp, Yp].A.flatten()
diff_corrs = par_corrs - orth_corrs
RES = pd.DataFrame(
data=np.vstack((orths, paras)).T, columns=["ortholog pairs", "paralog pairs"]
)
RES["ortholog corrs"] = orth_corrs
RES["paralog corrs"] = par_corrs
RES["corr diff"] = diff_corrs
RES = RES.sort_values("corr diff", ascending=False)
RES = RES[RES["corr diff"] > psub_thr]
orths = RES['ortholog pairs'].values.flatten()
paras = RES['paralog pairs'].values.flatten()
orthssp = np.vstack([np.array([x.split('_')[0] for x in xx]) for xx in to_vo(orths)])
parassp = np.vstack([np.array([x.split('_')[0] for x in xx]) for xx in to_vo(paras)])
filt=[]
for i in range(orthssp.shape[0]):
filt.append(np.in1d(orthssp[i],parassp[i]).mean()==1.0)
filt=np.array(filt)
return RES[filt]
def convert_eggnog_to_homologs(sm, EGGs, og_key = 'eggNOG_OGs', taxon=2759):
"""Gets an n x 2 array of homologs at some taxonomic level based on Eggnog results.
Parameters
----------
smp: SAMAP object
EGGs: dict of pandas.DataFrame, Eggnog output tables keyed by species IDs
og_key: str, optional, default 'eggNOG_OGs'
The column name of the orthology group mapping results in the Eggnog output tables.
taxon: int, optional, default 2759
Taxonomic ID corresponding to the level at which genes with overlapping orthology groups
will be considered homologs. Defaults to the Eukaryotic level.
Returns
-------
homolog_pairs: n x 2 numpy array of homolog pairs.
"""
smp = sm.samap
taxon = str(taxon)
EGGs = dict(zip(list(EGGs.keys()),list(EGGs.values()))) #copying
for k in EGGs.keys():
EGGs[k] = EGGs[k].copy()
Es=[]
for k in EGGs.keys():
A=EGGs[k]
A.index=k+"_"+A.index
Es.append(A)
A = pd.concat(Es, axis=0)
gn = q(smp.adata.var_names)
A = A[np.in1d(q(A.index), gn)]
orthology_groups = A[og_key]
og = q(orthology_groups)
x = np.unique(",".join(og).split(","))
D = pd.DataFrame(data=np.arange(x.size)[None, :], columns=x)
for i in range(og.size):
n = orthology_groups[i].split(",")
taxa = substr(substr(n, "@", 1),'|',0)
if (taxa == "2759").sum() > 1 and taxon == '2759':
og[i] = ""
else:
og[i] = "".join(np.array(n)[taxa == taxon])
A[og_key] = og
og = q(A[og_key].reindex(gn))
og[og == "nan"] = ""
X = []
Y = []
for i in range(og.size):
x = og[i]
if x != "":
X.extend(D[x].values.flatten())
Y.extend([i])
X = np.array(X)
Y = np.array(Y)
B = sp.sparse.lil_matrix((og.size, D.size))
B[Y, X] = 1
B = B.tocsr()
B = B.dot(B.T)
B.data[:] = 1
pairs = gn[np.vstack((B.nonzero())).T]
pairssp = np.vstack([q([x.split('_')[0] for x in xx]) for xx in pairs])
return np.unique(np.sort(pairs[pairssp[:,0]!=pairssp[:,1]],axis=1),axis=0)
def CellTypeTriangles(sm,keys, align_thr=0.1):
"""Outputs a table of cell type triangles.
Parameters
----------
sm: SAMAP object - assumed to contain at least three species.
keys: dictionary of annotation keys (`.adata.obs[key]`) keyed by species.
align_thr: float, optional, default, 0.1
Only keep triangles with minimum `align_thr` alignment score.
"""
D,A = get_mapping_scores(sm,keys=keys)
x,y = A.values.nonzero()
all_pairsf = np.array([A.index[x],A.columns[y]]).T.astype('str')
alignmentf = A.values[x,y].flatten()
alignment = alignmentf.copy()
all_pairs = all_pairsf.copy()
all_pairs = all_pairs[alignment > align_thr]
alignment = alignment[alignment > align_thr]
all_pairs = to_vn(np.sort(all_pairs, axis=1))
x, y = substr(all_pairs, ";")
ctu = np.unique(np.concatenate((x, y)))
Z = pd.DataFrame(data=np.arange(ctu.size)[None, :], columns=ctu)
nnm = sp.sparse.lil_matrix((ctu.size,) * 2)
nnm[Z[x].values.flatten(), Z[y].values.flatten()] = alignment
nnm[Z[y].values.flatten(), Z[x].values.flatten()] = alignment
nnm = nnm.tocsr()
import networkx as nx
G = nx.Graph()
gps=ctu[np.vstack(nnm.nonzero()).T]
G.add_edges_from(gps)
alignment = pd.Series(index=to_vn(gps),data=nnm.data)
all_cliques = nx.enumerate_all_cliques(G)
all_triangles = [x for x in all_cliques if len(x) == 3]
Z = np.sort(np.vstack(all_triangles), axis=1)
DF = pd.DataFrame(data=Z, columns=[x.split("_")[0] for x in Z[0]])
for i,sid1 in enumerate(sm.ids):
for sid2 in sm.ids[i:]:
if sid1!=sid2:
DF[sid1+';'+sid2] = [alignment[x] for x in DF[sid1].values.astype('str').astype('object')+';'+DF[sid2].values.astype('str').astype('object')]
DF = DF[sm.ids]
return DF
def GeneTriangles(sm,orth,keys=None,compute_markers=True,corr_thr=0.3, psub_thr = 0.3, pval_thr=1e-10):
"""Outputs a table of gene triangles.
Parameters
----------
sm: SAMAP object which contains at least three species
orths: (n x 2) ortholog pairs
keys: dict of strings corresponding to each species annotation column keyed by species, optional, default None
If you'd like to include information about where each gene is differentially expressed, you can specify the
annotation column to compute differential expressivity from for each species.
compute_markers: bool, optional, default True
Set this to False if you already precomputed differential expression for the input keys.
corr_thr: float, optional, default, 0.3
Only keep triangles with minimum `corr_thr` correlation.
pval_thr: float, optional, defaul, 1e-10
Consider cell types as differentially expressed if their p-values are less than `pval_thr`.
"""
FINALS = []
orth = np.sort(orth,axis=1)
orthsp = np.vstack([q([x.split('_')[0] for x in xx]) for xx in orth])
RES = ParalogSubstitutions(sm, orth, psub_thr = psub_thr)
op = to_vo(q(RES['ortholog pairs']))
pp = to_vo(q(RES['paralog pairs']))
ops = np.vstack([q([x.split('_')[0] for x in xx]) for xx in op])
pps = np.vstack([q([x.split('_')[0] for x in xx]) for xx in pp])
gnnm = sm.samap.adata.varp["homology_graph_reweighted"]
gn = q(sm.samap.adata.var_names)
gnsp = q([x.split('_')[0] for x in gn])
import itertools
combs = list(itertools.combinations(sm.ids,3))
for comb in combs:
A,B,C = comb
smp1 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==A,sm.samap.adata.obs['species']==B)])
smp2 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==A,sm.samap.adata.obs['species']==C)])
smp3 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==B,sm.samap.adata.obs['species']==C)])
sam1=sm.sams[A]
sam2=sm.sams[B]
sam3=sm.sams[C]
A1,A2=A,B
B1,B2=A,C
C1,C2=B,C
f1 = np.logical_and(((ops[:,0]==A1) * (ops[:,1]==A2) + (ops[:,0]==A2) * (ops[:,1]==A1)) > 0,
((pps[:,0]==A1) * (pps[:,1]==A2) + (pps[:,0]==A2) * (pps[:,1]==A1)) > 0)
f2 = np.logical_and(((ops[:,0]==B1) * (ops[:,1]==B2) + (ops[:,0]==B2) * (ops[:,1]==B1)) > 0,
((pps[:,0]==B1) * (pps[:,1]==B2) + (pps[:,0]==B2) * (pps[:,1]==B1)) > 0)
f3 = np.logical_and(((ops[:,0]==C1) * (ops[:,1]==C2) + (ops[:,0]==C2) * (ops[:,1]==C1)) > 0,
((pps[:,0]==C1) * (pps[:,1]==C2) + (pps[:,0]==C2) * (pps[:,1]==C1)) > 0)
RES1=RES[f1]
RES2=RES[f2]
RES3=RES[f3]
f1 = ((orthsp[:,0]==A1) * (orthsp[:,1]==A2) + (orthsp[:,0]==A2) * (orthsp[:,1]==A1)) > 0
f2 = ((orthsp[:,0]==B1) * (orthsp[:,1]==B2) + (orthsp[:,0]==B2) * (orthsp[:,1]==B1)) > 0
f3 = ((orthsp[:,0]==C1) * (orthsp[:,1]==C2) + (orthsp[:,0]==C2) * (orthsp[:,1]==C1)) > 0
orth1 = orth[f1]
orth2 = orth[f2]
orth3 = orth[f3]
op1 = to_vo(q(RES1["ortholog pairs"]))
op2 = to_vo(q(RES2["ortholog pairs"]))
op3 = to_vo(q(RES3["ortholog pairs"]))
pp1 = to_vo(q(RES1["paralog pairs"]))
pp2 = to_vo(q(RES2["paralog pairs"]))
pp3 = to_vo(q(RES3["paralog pairs"]))
gnnm1 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==A1).sum(),)*2),gnnm[gnsp==A1,:][:,gnsp==A2])),
sp.sparse.hstack((gnnm[gnsp==A2,:][:,gnsp==A1],sp.sparse.csr_matrix(((gnsp==A2).sum(),)*2)))
)).tocsr()
gnnm2 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==B1).sum(),)*2),gnnm[gnsp==B1,:][:,gnsp==B2])),
sp.sparse.hstack((gnnm[gnsp==B2,:][:,gnsp==B1],sp.sparse.csr_matrix(((gnsp==B2).sum(),)*2)))
)).tocsr()
gnnm3 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==C1).sum(),)*2),gnnm[gnsp==C1,:][:,gnsp==C2])),
sp.sparse.hstack((gnnm[gnsp==C2,:][:,gnsp==C1],sp.sparse.csr_matrix(((gnsp==C2).sum(),)*2)))
)).tocsr()
gn1 = np.append(gn[gnsp==A1],gn[gnsp==A2])
gn2 = np.append(gn[gnsp==B1],gn[gnsp==B2])
gn3 = np.append(gn[gnsp==C1],gn[gnsp==C2])
# suppress warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
T1 = pd.DataFrame(data=np.arange(gn1.size)[None, :], columns=gn1)
x, y = T1[op1[:, 0]].values.flatten(), T1[op1[:, 1]].values.flatten()
gnnm1[x, y] = gnnm1[x, y]
gnnm1[y, x] = gnnm1[y, x]
T1 = pd.DataFrame(data=np.arange(gn2.size)[None, :], columns=gn2)
x, y = T1[op2[:, 0]].values.flatten(), T1[op2[:, 1]].values.flatten()
gnnm2[x, y] = gnnm2[x, y]
gnnm2[y, x] = gnnm2[y, x]
T1 = pd.DataFrame(data=np.arange(gn3.size)[None, :], columns=gn3)
x, y = T1[op3[:, 0]].values.flatten(), T1[op3[:, 1]].values.flatten()
gnnm3[x, y] = gnnm3[x, y]
gnnm3[y, x] = gnnm3[y, x]
gnnm1.data[gnnm1.data==0]=1e-4
gnnm2.data[gnnm2.data==0]=1e-4
gnnm3.data[gnnm3.data==0]=1e-4
pairs1 = gn1[np.vstack(gnnm1.nonzero()).T]
pairs2 = gn2[np.vstack(gnnm2.nonzero()).T]
pairs3 = gn3[np.vstack(gnnm3.nonzero()).T]
data = np.concatenate((gnnm1.data, gnnm2.data, gnnm3.data))
CORR1 = pd.DataFrame(data=gnnm1.data[None, :], columns=to_vn(pairs1))
CORR2 = pd.DataFrame(data=gnnm2.data[None, :], columns=to_vn(pairs2))
CORR3 = pd.DataFrame(data=gnnm3.data[None, :], columns=to_vn(pairs3))
pairs = np.vstack((pairs1, pairs2, pairs3))
all_genes = np.unique(pairs.flatten())
Z = pd.DataFrame(data=np.arange(all_genes.size)[None, :], columns=all_genes)
x, y = Z[pairs[:, 0]].values.flatten(), Z[pairs[:, 1]].values.flatten()
GNNM = sp.sparse.lil_matrix((all_genes.size,) * 2)
GNNM[x, y] = data
import networkx as nx
G = nx.from_scipy_sparse_matrix(GNNM, create_using=nx.Graph)
all_cliques = nx.enumerate_all_cliques(G)
all_triangles = [x for x in all_cliques if len(x) == 3]
Z = all_genes[np.sort(np.vstack(all_triangles), axis=1)]
DF = pd.DataFrame(data=Z, columns=[x.split("_")[0] for x in Z[0]])
DF = DF[[A, B, C]]
orth1DF = pd.DataFrame(data=orth1, columns=[x.split("_")[0] for x in orth1[0]])[
[A, B]
]
orth2DF = pd.DataFrame(data=orth2, columns=[x.split("_")[0] for x in orth2[0]])[
[A, C]
]
orth3DF = pd.DataFrame(data=orth3, columns=[x.split("_")[0] for x in orth3[0]])[
[B, C]
]
ps1DF = pd.DataFrame(
data=np.sort(pp1, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp1, axis=1)[0]],
)[[A, B]]
ps2DF = pd.DataFrame(
data=np.sort(pp2, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp2, axis=1)[0]],
)[[A, C]]
ps3DF = pd.DataFrame(
data=np.sort(pp3, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp3, axis=1)[0]],
)[[B, C]]
A_AB = pd.DataFrame(data=to_vn(op1)[None, :], columns=to_vn(ps1DF.values))
A_AC = pd.DataFrame(data=to_vn(op2)[None, :], columns=to_vn(ps2DF.values))
A_BC = pd.DataFrame(data=to_vn(op3)[None, :], columns=to_vn(ps3DF.values))
AB = to_vn(DF[[A, B]].values)
AC = to_vn(DF[[A, C]].values)
BC = to_vn(DF[[B, C]].values)
AVs = []
CATs = []
CORRs = []
for i, X, O, P, Z, R in zip(
[0, 1, 2],
[AB, AC, BC],
[orth1DF, orth2DF, orth3DF],
[ps1DF, ps2DF, ps3DF],
[A_AB, A_AC, A_BC],
[CORR1, CORR2, CORR3],
):
cat = q(["homolog"] * X.size).astype("object")
cat[np.in1d(X, to_vn(O.values))] = "ortholog"
ff = np.in1d(X, to_vn(P.values))
cat[ff] = "substitution"
z = Z[X[ff]] #problem line here
x = X[ff]
av = np.zeros(x.size, dtype="object")
for ai in range(x.size):
v=pd.DataFrame(z[x[ai]]) #get ortholog pairs - paralog pairs dataframe
vd=v.values.flatten() #get ortholog pairs
vc=q(';'.join(v.columns).split(';')) # get paralogous genes
temp = np.unique(q(';'.join(vd).split(';'))) #get orthologous genes
av[ai] = ';'.join(temp[np.in1d(temp,vc,invert=True)]) #get orthologous genes not present in paralogous genes
AV = np.zeros(X.size, dtype="object")
AV[ff] = av
corr = R[X].values.flatten()
AVs.append(AV)
CATs.append(cat)
CORRs.append(corr)
tri_pairs = np.vstack((AB, AC, BC)).T
cat_pairs = np.vstack(CATs).T
corr_pairs = np.vstack(CORRs).T
homology_triangles = DF.values
substituted_genes = np.vstack(AVs).T
substituted_genes[substituted_genes == 0] = "N.S."
data = np.hstack(
(
homology_triangles.astype("object"),
substituted_genes.astype("object"),
tri_pairs.astype("object"),
corr_pairs.astype("object"),
cat_pairs.astype("object"),
)
)
FINAL = pd.DataFrame(data = data, columns = [f'{A} gene',f'{B} gene',f'{C} gene',
f'{A}/{B} subbed',f'{A}/{C} subbed',f'{B}/{C} subbed',
f'{A}/{B}',f'{A}/{C}',f'{B}/{C}',
f'{A}/{B} corr',f'{A}/{C} corr',f'{B}/{C} corr',
f'{A}/{B} type',f'{A}/{C} type',f'{B}/{C} type'])
FINAL['#orthologs'] = (cat_pairs=='ortholog').sum(1)
FINAL['#substitutions'] = (cat_pairs=='substitution').sum(1)
FINAL = FINAL[(FINAL['#orthologs']+FINAL['#substitutions'])==3]
x = FINAL[[f'{A}/{B} corr',f'{A}/{C} corr',f'{B}/{C} corr']].min(1)
FINAL['min_corr'] = x
FINAL = FINAL[x>corr_thr]
if keys is not None:
keys = [keys[A],keys[B],keys[C]]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if keys is not None:
for i,sam,n in zip([0,1,2],[sam1,sam2,sam3],[A,B,C]):
if compute_markers:
find_cluster_markers(sam,keys[i])
a = sam.adata.varm[keys[i]+'_scores'].T[q(FINAL[n+' gene'])].T
p = sam.adata.varm[keys[i]+'_pvals'].T[q(FINAL[n+' gene'])].T.values
p[p>pval_thr]=1
p[p<1]=0
p=1-p
f = a.columns[a.values.argmax(1)]
res=[]
for i in range(p.shape[0]):
res.append(';'.join(np.unique(np.append(f[i],a.columns[p[i,:]==1]))))
FINAL[n+' cell type'] = res
FINAL = FINAL.sort_values('min_corr',ascending=False)
FINALS.append(FINAL)
FINAL = pd.concat(FINALS,axis=0)
return FINAL
def _compute_csim(sam3, key, X=None, prepend=True, n_top = 0):
splabels = q(sam3.adata.obs['species'])
skeys = splabels[np.sort(np.unique(splabels,return_index=True)[1])]
cl = []
clu = []
for sid in skeys:
if prepend:
cl.append(sid+'_'+q(sam3.adata.obs[key])[sam3.adata.obs['species']==sid].astype('str').astype('object'))
else:
cl.append(q(sam3.adata.obs[key])[sam3.adata.obs['species']==sid])
clu.append(np.unique(cl[-1]))
clu = np.concatenate(clu)
cl = np.concatenate(cl)
CSIM = np.zeros((clu.size, clu.size))
if X is None:
X = sam3.adata.obsp["connectivities"].copy()
xi,yi = X.nonzero()
spxi = splabels[xi]
spyi = splabels[yi]
filt = spxi!=spyi
di = X.data[filt]
xi = xi[filt]
yi = yi[filt]
px,py = xi,cl[yi]
p = px.astype('str').astype('object')+';'+py.astype('object')
A = pd.DataFrame(data=np.vstack((p, di)).T, columns=["x", "y"])
valdict = df_to_dict(A, key_key="x", val_key="y")
cell_scores = [valdict[k].sum() for k in valdict.keys()]
ixer = pd.Series(data=np.arange(clu.size),index=clu)
xc,yc = substr(list(valdict.keys()),';')
xc = xc.astype('int')
yc=ixer[yc].values
cell_cluster_scores = sp.sparse.coo_matrix((cell_scores,(xc,yc)),shape=(X.shape[0],clu.size)).A
for i, c in enumerate(clu):
if n_top > 0:
CSIM[i, :] = np.sort(cell_cluster_scores[cl==c],axis=0)[-n_top:].mean(0)
else:
CSIM[i, :] = cell_cluster_scores[cl==c].mean(0)
CSIM = np.stack((CSIM,CSIM.T),axis=2).max(2)
CSIMth = CSIM / sam3.adata.obsp['knn'][0].data.size * (len(skeys)-1)
return CSIMth,clu
def transfer_annotations(sm,reference_id=None, keys=[],num_iters=5, inplace = True):
""" Transfer annotations across species using label propagation along the combined manifold.
Parameters
----------
sm - SAMAP object
reference_id - str, optional, default None
The species ID of the reference species from which the annotations will be transferred.
keys - str or list, optional, default []
The `obs` key or list of keys corresponding to the labels to be propagated.
If passed an empty list, all keys in the reference species' `obs` dataframe
will be propagated.
num_iters - int, optional, default 5
The number of steps to run the diffusion propagation.
inplace - bool, optional, default True
If True, deposit propagated labels in the target species (`sm.sams['hu']`) `obs`
DataFrame. Otherwise, just return the soft-membership DataFrame.
Returns
-------
A Pandas DataFrame with soft membership scores for each cluster in each cell.
"""
stitched = sm.samap
NNM = stitched.adata.obsp['connectivities'].copy()
NNM = NNM.multiply(1/NNM.sum(1).A).tocsr()
if type(keys) is str:
keys = [keys]
elif len(keys) == 0:
try:
keys = list(sm.sams[reference_id].adata.obs.keys())
except KeyError:
raise ValueError(f'`reference` must be one of {sm.ids}.')
for key in keys:
samref = sm.sams[reference_id]
ANN = stitched.adata.obs
ANNr = samref.adata.obs
cl = ANN[key].values.astype('object').astype('str')
clr = reference_id+'_'+ANNr[key].values.astype('object')
cl[np.invert(np.in1d(cl,clr))]=''
clu,clui = np.unique(cl,return_inverse=True)
P = np.zeros((NNM.shape[0],clu.size))
Pmask = np.ones((NNM.shape[0],clu.size))
P[np.arange(clui.size),clui]=1.0
Pmask[stitched.adata.obs['species']==reference_id]=0
Pmask=Pmask[:,1:]
P=P[:,1:]
Pinit = P.copy()
for j in range(num_iters):
P_new = NNM.dot(P)
if np.max(np.abs(P_new - P)) < 5e-3:
P = P_new
s=P.sum(1)[:,None]
s[s==0]=1
P = P/s
break
else:
P = P_new
s=P.sum(1)[:,None]
s[s==0]=1
P = P/s
P = P * Pmask + Pinit
uncertainty = 1-P.max(1)
labels = clu[1:][np.argmax(P,axis=1)]
labels[uncertainty==1.0]='NAN'
uncertainty[uncertainty>=uncertainty.max()*0.99] = 1
if inplace:
stitched.adata.obs[key+'_transfer'] = pd.Series(labels,index = stitched.adata.obs_names)
stitched.adata.obs[key+'_uncertainty'] = pd.Series(uncertainty,index=stitched.adata.obs_names)
res = pd.DataFrame(data=P,index=stitched.adata.obs_names,columns=clu[1:])
res['labels'] = labels
return res
def get_mapping_scores(sm, keys, n_top = 0):
"""Calculate mapping scores
Parameters
----------
sm: SAMAP object
keys: dict, annotation vector keys for at least two species with species identifiers as the keys
e.g. {'pl':'tissue','sc':'tissue'}
n_top: int, optional, default 0
If `n_top` is 0, average the alignment scores for all cells in a pair of clusters.
Otherwise, average the alignment scores of the top `n_top` cells in a pair of clusters.
Set this to non-zero if you suspect there to be subpopulations of your cell types mapping
to distinct cell types in the other species.
Returns
-------
D - table of highest mapping scores for cell types
A - pairwise table of mapping scores between cell types across species
"""
if len(list(keys.keys()))<len(list(sm.sams.keys())):
samap = SAM(counts = sm.samap.adata[np.in1d(sm.samap.adata.obs['species'],list(keys.keys()))])
else:
samap=sm.samap
clusters = []
ix = np.unique(samap.adata.obs['species'],return_index=True)[1]
skeys = q(samap.adata.obs['species'])[np.sort(ix)]
for sid in skeys:
clusters.append(q([sid+'_'+str(x) for x in sm.sams[sid].adata.obs[keys[sid]]]))
cl = np.concatenate(clusters)
l = "{}_mapping_scores".format(';'.join([keys[sid] for sid in skeys]))
samap.adata.obs[l] = pd.Categorical(cl)
CSIMth, clu = _compute_csim(samap, l, n_top = n_top, prepend = False)
A = pd.DataFrame(data=CSIMth, index=clu, columns=clu)
i = np.argsort(-A.values.max(0).flatten())
H = []
C = []
for I in range(A.shape[1]):
x = A.iloc[:, i[I]].sort_values(ascending=False)
H.append(np.vstack((x.index, x.values)).T)
C.append(A.columns[i[I]])
C.append(A.columns[i[I]])
H = np.hstack(H)
D = pd.DataFrame(data=H, columns=[C, ["Cluster","Alignment score"]*(H.shape[1]//2)])
return D, A
def _knndist(nnma, k):
x, y = nnma.nonzero()
data = nnma.data
xc, cc = np.unique(x, return_counts=True)
cc2 = np.zeros(nnma.shape[0], dtype="int")
cc2[xc] = cc
cc = cc2
newx = []
newdata = []
for i in range(nnma.shape[0]):
newx.extend([i] * k)
newdata.extend(list(data[x == i]) + [0] * (k - cc[i]))
data = np.array(newdata)
val = data.reshape((nnma.shape[0], k))
return val
def _sparse_sub_standardize(X, mu, var, rows=False):
x, y = X.nonzero()
if not rows:
Xs = X.copy()
Xs.data[:] = (X.data - mu[y]) / var[y]
else:
mu, var = sf.mean_variance_axis(X, axis=1)
var = var ** 0.5
var[var == 0] = 1
Xs = X.copy()
Xs.data[:] = (X.data - mu[x]) / var[x]
Xs.data[Xs.data < 0] = 0
Xs.eliminate_zeros()
return Xs
def _get_mu_std(sam3, sam1, sam2, knn=False):
g1, g2 = ut.extract_annotation(sam3.adata.uns['gene_pairs'], 0, ";"), ut.extract_annotation(
sam3.adata.uns['gene_pairs'], 1, ";"
)
if knn:
mu1, var1 = sf.mean_variance_axis(sam1.adata[:, g1].layers["X_knn_avg"], axis=0)
mu2, var2 = sf.mean_variance_axis(sam2.adata[:, g2].layers["X_knn_avg"], axis=0)
else:
mu1, var1 = sf.mean_variance_axis(sam1.adata[:, g1].X, axis=0)
mu2, var2 = sf.mean_variance_axis(sam2.adata[:, g2].X, axis=0)
var1[var1 == 0] = 1
var2[var2 == 0] = 1
var1 = var1 ** 0.5
var2 = var2 ** 0.5
return mu1, var1, mu2, var2
| 6,186 | 16,934 | 213 |
96b6e0621d791d164add30bfcebd7a97b026a24f | 2,329 | py | Python | teste.py | danielrodrigues97/Teste_Pratico_Publca | c4a48f2fd86ee371ae593ec4ffc440fe527d2a9b | [
"MIT"
] | null | null | null | teste.py | danielrodrigues97/Teste_Pratico_Publca | c4a48f2fd86ee371ae593ec4ffc440fe527d2a9b | [
"MIT"
] | null | null | null | teste.py | danielrodrigues97/Teste_Pratico_Publca | c4a48f2fd86ee371ae593ec4ffc440fe527d2a9b | [
"MIT"
] | null | null | null | # -*- coding:UTF-8 -*-
import sys
reqMax = []
reqMin = []
cont = 0
#print(limpar())
p = 0
while p != 4:
print('~'*30)
print('Para Inserir dados do jogo aperte [1]: ')
print('Para consultar dados dos jogos aperte [2]: ')
print('para limpar a tabela de jogos aperte [3]')
print('Para Sair do programa aperte [4]: ')
p = int(input('Opção: '))
print('~'*30)
if p == 1:
cont+=1
inserir()
elif p == 2:
consulta()
elif p ==3:
limpar()
elif p == 4:
print('Opção {}'.format(p), 'Saindo do programa!!!')
else:
print('Opção Invalida')
print('*'*30)
| 24.010309 | 72 | 0.509661 | # -*- coding:UTF-8 -*-
import sys
reqMax = []
reqMin = []
cont = 0
def limpar ():
with open('tabela_jogos.txt', 'w') as arquivo:
arquivo.close
global cont
cont = 0
reqMax.clear()
reqMin.clear()
def consulta ():
print('~'*30)
linha = open('tabela_jogos.txt','r')
leitura = linha.read()
print(leitura)
def inserir():
aux = 0
auxmin = 0
placar = int (input('insira o placar do jogo: '))
if placar > 1000 or placar < 0:
print('\n\tPlacar não deve ser maior que 1000 ou menor que 0!',)
print('\tPlacar pode ser igual a 0!\n')
sys.exit()
minimo = int (input('insira o mínimo da temporada: '))
maximo = int (input('insira o máximo da temporada: '))
reqMax.append(maximo)
reqMin.append(minimo)
if maximo == minimo and placar == minimo:
aux= 0
auxmin=0
else:
for i in range(0,len(reqMax)):
if reqMax[i] >= placar:
aux = 1
else:
aux = 0
for j in range(0,len(reqMin)):
if reqMin[j] <= placar:
auxmin = 1
else:
auxmin = 0
with open('tabela_jogos.txt', 'a') as arquivo:
frases = list()
frases.append(f'\rJogo= {str(cont)} |')
frases.append(f'placar= {str(placar)} |')
frases.append(f'minimo= {str(minimo)} |')
frases.append(f'maximo= {str(maximo)} |')
frases.append(f'Recorde Maximo= {str(aux)} |')
frases.append(f'Recorde Minimo= {str(auxmin)} |\r')
frases.append('~'*83)
arquivo.writelines(frases)
arquivo.close
print(reqMax)
#print(limpar())
p = 0
while p != 4:
print('~'*30)
print('Para Inserir dados do jogo aperte [1]: ')
print('Para consultar dados dos jogos aperte [2]: ')
print('para limpar a tabela de jogos aperte [3]')
print('Para Sair do programa aperte [4]: ')
p = int(input('Opção: '))
print('~'*30)
if p == 1:
cont+=1
inserir()
elif p == 2:
consulta()
elif p ==3:
limpar()
elif p == 4:
print('Opção {}'.format(p), 'Saindo do programa!!!')
else:
print('Opção Invalida')
print('*'*30)
| 1,590 | 0 | 73 |
416556fc1a0a4d093835838d768bf3c2f23c309f | 597 | py | Python | app/templatetags/messages.py | augustakingfoundation/queryjane_app | 2c7b27db9e16288c49520b94704246b25dd262b6 | [
"MIT"
] | 5 | 2018-08-07T07:01:04.000Z | 2021-03-19T00:16:59.000Z | app/templatetags/messages.py | augustakingfoundation/queryjane_app | 2c7b27db9e16288c49520b94704246b25dd262b6 | [
"MIT"
] | 1 | 2018-04-30T07:27:03.000Z | 2018-04-30T07:27:03.000Z | app/templatetags/messages.py | augustakingfoundation/queryjane_app | 2c7b27db9e16288c49520b94704246b25dd262b6 | [
"MIT"
] | 3 | 2018-08-08T11:57:01.000Z | 2020-10-02T05:42:13.000Z | from django import template
from account.models import UserMessage
from account.models import Conversation
register = template.Library()
@register.assignment_tag
@register.assignment_tag
@register.assignment_tag
| 20.586207 | 40 | 0.743719 | from django import template
from account.models import UserMessage
from account.models import Conversation
register = template.Library()
@register.assignment_tag
def get_user_messages_count(user):
return UserMessage.objects.filter(
user_to=user,
).count()
@register.assignment_tag
def get_new_user_messages_count(user):
return UserMessage.objects.filter(
unread=True,
user_to=user,
).count()
@register.assignment_tag
def get_recent_user_conversations(user):
return Conversation.objects.filter(
participating_users__in=[user],
)[:10]
| 311 | 0 | 66 |
1504c2faa52e511b14f1969c5baf3ba4565022ac | 3,368 | py | Python | run_scripts/plot_fetch_eval.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 56 | 2019-10-20T03:09:02.000Z | 2022-03-25T09:21:40.000Z | run_scripts/plot_fetch_eval.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 3 | 2020-10-01T07:33:51.000Z | 2021-05-12T03:40:57.000Z | run_scripts/plot_fetch_eval.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 10 | 2019-11-04T16:56:09.000Z | 2022-03-25T09:21:41.000Z | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os.path as osp
import joblib
MAIN_PATH = '/scratch/gobi2/kamyar/oorl_rlkit/output'
WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
# WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
# WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
data_dirs = {
'np_airl': {
0.2: 'correct-saving-np-airl-KL-0p2-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.15: 'correct-saving-np-airl-KL-0p15-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.1: 'correct-saving-np-airl-KL-0p1-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.05: 'correct-saving-np-airl-KL-0p05-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.0: 'correct-saving-np-airl-KL-0-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs'
},
'np_bc': {
0.2: 'np-bc-KL-0p2-FINAL-WITHOUT-TARGETS',
0.15: 'np-bc-KL-0p15-FINAL-WITHOUT-TARGETS',
0.1: 'np-bc-KL-0p1-FINAL-WITHOUT-TARGETS',
0.05: 'np-bc-KL-0p05-FINAL-WITHOUT-TARGETS',
0.0: 'np-bc-KL-0-FINAL-WITHOUT-TARGETS'
}
}
# fig, ax = plt.subplots(1, 5)
for i, beta in enumerate([0.0, 0.05, 0.1, 0.15, 0.2]):
fig, ax = plt.subplots(1)
ax.set_xlabel('$\\beta = %.2f$' % beta)
# np_airl
all_stats = joblib.load(osp.join(MAIN_PATH, data_dirs['np_airl'][beta], WHAT_TO_PLOT))['faster_all_eval_stats']
good_reaches_means = []
good_reaches_stds = []
solves_means = []
solves_stds = []
for c_size in range(1,7):
good_reaches = []
solves = []
for d in all_stats:
good_reaches.append(d[c_size]['Percent_Good_Reach'])
solves.append(d[c_size]['Percent_Solved'])
good_reaches_means.append(np.mean(good_reaches))
good_reaches_stds.append(np.std(good_reaches))
solves_means.append(np.mean(solves))
solves_stds.append(np.std(solves))
# ax.errorbar(list(range(1,7)), good_reaches_means, good_reaches_stds)
ax.errorbar(np.array(list(range(1,7))) + 0.1, solves_means, solves_stds,
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-AIRL'
)
# np_bc
all_stats = joblib.load(osp.join(MAIN_PATH, data_dirs['np_bc'][beta], WHAT_TO_PLOT))['faster_all_eval_stats']
good_reaches_means = []
good_reaches_stds = []
solves_means = []
solves_stds = []
for c_size in range(1,7):
good_reaches = []
solves = []
for d in all_stats:
good_reaches.append(d[c_size]['Percent_Good_Reach'])
solves.append(d[c_size]['Percent_Solved'])
good_reaches_means.append(np.mean(good_reaches))
good_reaches_stds.append(np.std(good_reaches))
solves_means.append(np.mean(solves))
solves_stds.append(np.std(solves))
# ax.errorbar(list(range(1,7)), good_reaches_means, good_reaches_stds)
ax.errorbar(np.array(list(range(1,7))) - 0.1, solves_means, solves_stds,
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-BC'
)
ax.set_ylim([0.3, 1.0])
lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.725, 0.1), shadow=False, ncol=3)
plt.savefig('plots/abc/faster_test_%d.png'%i, bbox_extra_artists=(lgd,), bbox_inches='tight')
# plt.savefig('plots/abc/test_%d.png'%i)
plt.close()
| 39.623529 | 115 | 0.655582 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os.path as osp
import joblib
MAIN_PATH = '/scratch/gobi2/kamyar/oorl_rlkit/output'
WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
# WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
# WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
data_dirs = {
'np_airl': {
0.2: 'correct-saving-np-airl-KL-0p2-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.15: 'correct-saving-np-airl-KL-0p15-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.1: 'correct-saving-np-airl-KL-0p1-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.05: 'correct-saving-np-airl-KL-0p05-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.0: 'correct-saving-np-airl-KL-0-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs'
},
'np_bc': {
0.2: 'np-bc-KL-0p2-FINAL-WITHOUT-TARGETS',
0.15: 'np-bc-KL-0p15-FINAL-WITHOUT-TARGETS',
0.1: 'np-bc-KL-0p1-FINAL-WITHOUT-TARGETS',
0.05: 'np-bc-KL-0p05-FINAL-WITHOUT-TARGETS',
0.0: 'np-bc-KL-0-FINAL-WITHOUT-TARGETS'
}
}
# fig, ax = plt.subplots(1, 5)
for i, beta in enumerate([0.0, 0.05, 0.1, 0.15, 0.2]):
fig, ax = plt.subplots(1)
ax.set_xlabel('$\\beta = %.2f$' % beta)
# np_airl
all_stats = joblib.load(osp.join(MAIN_PATH, data_dirs['np_airl'][beta], WHAT_TO_PLOT))['faster_all_eval_stats']
good_reaches_means = []
good_reaches_stds = []
solves_means = []
solves_stds = []
for c_size in range(1,7):
good_reaches = []
solves = []
for d in all_stats:
good_reaches.append(d[c_size]['Percent_Good_Reach'])
solves.append(d[c_size]['Percent_Solved'])
good_reaches_means.append(np.mean(good_reaches))
good_reaches_stds.append(np.std(good_reaches))
solves_means.append(np.mean(solves))
solves_stds.append(np.std(solves))
# ax.errorbar(list(range(1,7)), good_reaches_means, good_reaches_stds)
ax.errorbar(np.array(list(range(1,7))) + 0.1, solves_means, solves_stds,
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-AIRL'
)
# np_bc
all_stats = joblib.load(osp.join(MAIN_PATH, data_dirs['np_bc'][beta], WHAT_TO_PLOT))['faster_all_eval_stats']
good_reaches_means = []
good_reaches_stds = []
solves_means = []
solves_stds = []
for c_size in range(1,7):
good_reaches = []
solves = []
for d in all_stats:
good_reaches.append(d[c_size]['Percent_Good_Reach'])
solves.append(d[c_size]['Percent_Solved'])
good_reaches_means.append(np.mean(good_reaches))
good_reaches_stds.append(np.std(good_reaches))
solves_means.append(np.mean(solves))
solves_stds.append(np.std(solves))
# ax.errorbar(list(range(1,7)), good_reaches_means, good_reaches_stds)
ax.errorbar(np.array(list(range(1,7))) - 0.1, solves_means, solves_stds,
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-BC'
)
ax.set_ylim([0.3, 1.0])
lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.725, 0.1), shadow=False, ncol=3)
plt.savefig('plots/abc/faster_test_%d.png'%i, bbox_extra_artists=(lgd,), bbox_inches='tight')
# plt.savefig('plots/abc/test_%d.png'%i)
plt.close()
| 0 | 0 | 0 |
6141339f3c082173b90146c68f9445d1b6345332 | 936 | py | Python | web_project/Report/views.py | nosy0411/Object_Oriented_Programming | e6713b5131c125ac50814d375057f06da43e958e | [
"MIT"
] | null | null | null | web_project/Report/views.py | nosy0411/Object_Oriented_Programming | e6713b5131c125ac50814d375057f06da43e958e | [
"MIT"
] | null | null | null | web_project/Report/views.py | nosy0411/Object_Oriented_Programming | e6713b5131c125ac50814d375057f06da43e958e | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404, redirect
from .models import RepPost
from .forms import RepForm
from django.utils import timezone
from django.contrib.auth.decorators import login_required
@login_required | 34.666667 | 102 | 0.642094 | from django.shortcuts import render, get_object_or_404, redirect
from .models import RepPost
from .forms import RepForm
from django.utils import timezone
from django.contrib.auth.decorators import login_required
@login_required
def rep_post(request):
user = request.user
talkable = True
if user.handle.skku:
if user.handle.line_t.all().filter(alive=True):
talkable = False
else:
if user.handle.line_s.all().filter(alive=True):
talkable = False
if request.method == "POST":
form = RepForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.rep_author = request.user.handle
post.rep_date = timezone.now()
post.save()
return redirect('br', pg=1)
else:
form = RepForm()
return render(request, 'Report/rep_edit.html', {'form': form, 'user': user, 'talkable': talkable}) | 686 | 0 | 22 |
a449f75f76ba154fd1a52ce57663d5dace977604 | 2,566 | py | Python | int_to_line.py | CUUATS/feature-class-sync | 05bf8e44f5721655e9bb71590849af460ec0256a | [
"BSD-3-Clause"
] | null | null | null | int_to_line.py | CUUATS/feature-class-sync | 05bf8e44f5721655e9bb71590849af460ec0256a | [
"BSD-3-Clause"
] | null | null | null | int_to_line.py | CUUATS/feature-class-sync | 05bf8e44f5721655e9bb71590849af460ec0256a | [
"BSD-3-Clause"
] | null | null | null | #int_to_line.py
#This script takes intersection and road segment and determine the direction of the road segment in contrast to the intersection.
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
#input configuration
env.workspace = "C:/Users/kml42638/Desktop/testDB.gdb"
print("The name of the workspace is " + env.workspace)
streetCL = "GGISC_streetCL"
intersections = "Intersections_all"
main(intersections, streetCL)
| 26.729167 | 129 | 0.601715 | #int_to_line.py
#This script takes intersection and road segment and determine the direction of the road segment in contrast to the intersection.
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
#input configuration
env.workspace = "C:/Users/kml42638/Desktop/testDB.gdb"
print("The name of the workspace is " + env.workspace)
streetCL = "GGISC_streetCL"
intersections = "Intersections_all"
def main(intersections, streetCL):
int_buffer = buffer_function(intersections)
int_point = intersect_function(int_buffer, streetCL)
near_int = near_function(int_point, intersections)
add_direction(int_point)
join_dir_function(int_point, streetCL)
def buffer_function(int):
print("Finish buffer")
return(arcpy.Buffer_analysis(intersections, "in_memory" + "\\" + "int_buff", 30))
def intersect_function(int_buffer, streetCL):
print("Finish intersect")
return(arcpy.Intersect_analysis(
in_features=[int_buffer, streetCL],
out_feature_class="int_point",
output_type="point"
)
)
def near_function(int_point, intersections):
print("Finish near feature")
return(arcpy.Near_analysis(
in_features=int_point,
near_features=intersections,
location=False,
angle=True,
search_radius=31
)
)
def add_direction(int_point):
arcpy.AddField_management(
in_table=int_point,
field_name="dir",
field_type="TEXT",
field_length=3
)
arcpy.CalculateField_management(
in_table=int_point,
field="dir",
expression_type="PYTHON_9.3",
expression="reclass(!NEAR_ANGLE!)",
code_block= """def reclass(angle):
if (angle >= -45 and angle <=45):
return ("W")
elif (angle >= -135 and angle <=-45):
return ("N")
elif (angle >=-45 and angle <=135):
return ("S")
else:
return ("E")"""
)
def join_dir_function(int_point, streetCL):
arcpy.SpatialJoin_analysis(
target_features=streetCL,
join_features=int_point,
out_feature_class="streetCL_join",
match_option="WITHIN_A_DISTANCE",
search_radius=1
)
main(intersections, streetCL)
| 1,912 | 0 | 138 |
f9989169a6208962fd766e65aab7abac678b046b | 318 | py | Python | cactusco/celery.py | cactus-computing/product-recommendation | b5d9bb27205a4fb032fd19934ecab56a5a8c6d81 | [
"MIT"
] | null | null | null | cactusco/celery.py | cactus-computing/product-recommendation | b5d9bb27205a4fb032fd19934ecab56a5a8c6d81 | [
"MIT"
] | null | null | null | cactusco/celery.py | cactus-computing/product-recommendation | b5d9bb27205a4fb032fd19934ecab56a5a8c6d81 | [
"MIT"
] | null | null | null | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cactusco.settings')
app = Celery('cactusco')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
| 26.5 | 69 | 0.742138 | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cactusco.settings')
app = Celery('cactusco')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}') | 41 | 0 | 22 |
9424aef899235c1a9aa2de958ac45e3889e4b3b5 | 3,306 | py | Python | tests/bak/_alignment_func.py | jumbrich/dtpattern | 38433c89d169a280b0439b9cd4f463d5d7604dd7 | [
"MIT"
] | null | null | null | tests/bak/_alignment_func.py | jumbrich/dtpattern | 38433c89d169a280b0439b9cd4f463d5d7604dd7 | [
"MIT"
] | 2 | 2018-04-25T22:13:34.000Z | 2018-04-26T17:52:43.000Z | tests/bak/_alignment_func.py | jumbrich/dtpattern | 38433c89d169a280b0439b9cd4f463d5d7604dd7 | [
"MIT"
] | null | null | null | from dtpattern import alignment
from dtpattern.alignment import needle, finalize, gap_penalty, match_award, mismatch_penalty, water
from dtpattern.utils import translate
from dtpattern.alignment import alignment as al
def align(s1,s2):
"""
input is a list of characters or character set symbols for each s1 and s2
return is
:param s1:
:param s2:
:return: tuple of align1, align2, symbol2, identity, score
"""
identity, score, align1, symbol2, align2 = needle(s1, s2)
print_alignment(align1, align2, symbol2, identity, score, altype="NEEDLE")
identity, score, align1, symbol2, align2 = water(s1, s2)
print_alignment(align1, align2, symbol2, identity, score, altype="WATER")
score_matrix = {
gap_penalty: -15,
match_award: 5,
mismatch_penalty: -4
}
identity, score, align1, symbol2, align2 = needle(s1, s2,score_matrix=score_matrix)
print_alignment(align1, align2, symbol2, identity, score, altype="VALUE")
identity, score, align1, symbol2, align2 = water(s1, s2,score_matrix=score_matrix)
print_alignment(align1, align2, symbol2, identity, score, altype="WATER")
identity, score, align1, symbol2, align2 = needle(_translate(s1), s2)
print_alignment(align1, align2, symbol2, identity, score, altype="TRANS")
identity, score, align1, symbol2, align2 = water(_translate(s1), s2)
print_alignment(align1, align2, symbol2, identity, score, altype="TRANS_WATER")
#for a in al.align.globalms("".join(s1), "".join(s2), 5, -4, -50, -.1):
# print(al.format_alignment(*a))
return align1, align2, symbol2, identity, score
data=[
['111',"1222","1113"]
]
for values in data:
s1 = values[0]
for s2 in values[1:]:
print("MERGE:\n\t{}\n\t{}".format(s1,s2))
if isinstance(s1,str):
s1= to_list(s1)
if isinstance(s2,str):
s2= to_list(s2)
align1, align2, symbol2, identity, score = align(s1,s2)
#print_alignment(align1, align2, symbol2, identity, score)
_s1,_s2=s1,s2
while not is_valid_alignment(align1, align2, symbol2):
break
s1 = merge_alignment(symbol2)
| 27.322314 | 112 | 0.61827 | from dtpattern import alignment
from dtpattern.alignment import needle, finalize, gap_penalty, match_award, mismatch_penalty, water
from dtpattern.utils import translate
from dtpattern.alignment import alignment as al
def to_list(alpha):
if isinstance(alpha, str):
return [c for c in alpha]
def _translate(s):
r=[]
for c in s:
if isinstance(c,str):
r.append([translate(c)])
elif isinstance(c, list):
r.append(c)
return r
def align(s1,s2):
"""
input is a list of characters or character set symbols for each s1 and s2
return is
:param s1:
:param s2:
:return: tuple of align1, align2, symbol2, identity, score
"""
identity, score, align1, symbol2, align2 = needle(s1, s2)
print_alignment(align1, align2, symbol2, identity, score, altype="NEEDLE")
identity, score, align1, symbol2, align2 = water(s1, s2)
print_alignment(align1, align2, symbol2, identity, score, altype="WATER")
score_matrix = {
gap_penalty: -15,
match_award: 5,
mismatch_penalty: -4
}
identity, score, align1, symbol2, align2 = needle(s1, s2,score_matrix=score_matrix)
print_alignment(align1, align2, symbol2, identity, score, altype="VALUE")
identity, score, align1, symbol2, align2 = water(s1, s2,score_matrix=score_matrix)
print_alignment(align1, align2, symbol2, identity, score, altype="WATER")
identity, score, align1, symbol2, align2 = needle(_translate(s1), s2)
print_alignment(align1, align2, symbol2, identity, score, altype="TRANS")
identity, score, align1, symbol2, align2 = water(_translate(s1), s2)
print_alignment(align1, align2, symbol2, identity, score, altype="TRANS_WATER")
#for a in al.align.globalms("".join(s1), "".join(s2), 5, -4, -50, -.1):
# print(al.format_alignment(*a))
return align1, align2, symbol2, identity, score
def print_alignment(align1, align2, symbol2, identity, score, altype="VALUE"):
s="{:-^40}\n" \
" a1: {}\n" \
" a2: {}\n" \
" s: {}\n" \
" identity: {:2.2f}% Score: {}".format("ALIGNMENT "+altype,align1, align2, str(symbol2), identity, score)
print(s)
def is_valid_alignment(align1, align2, symbol):
print("a1_len:{}, a2_len:{}, s_len:{}".format(len(align1), len(align2), len(symbol)))
return True
def merge_alignment(symbol):
m=[]
for s in symbol:
if isinstance(s,str):
m.append(s)
elif isinstance(s, list):
a1=s[0]
a2=s[1]
#if isinstance(a1,list) and isinstance(a2, str):
##a1 is already a merge or optional
t=set(translate("".join(s)))
m.append([c for c in t])
return m
data=[
['111',"1222","1113"]
]
for values in data:
s1 = values[0]
for s2 in values[1:]:
print("MERGE:\n\t{}\n\t{}".format(s1,s2))
if isinstance(s1,str):
s1= to_list(s1)
if isinstance(s2,str):
s2= to_list(s2)
align1, align2, symbol2, identity, score = align(s1,s2)
#print_alignment(align1, align2, symbol2, identity, score)
_s1,_s2=s1,s2
while not is_valid_alignment(align1, align2, symbol2):
break
s1 = merge_alignment(symbol2)
| 993 | 0 | 115 |
2899f0c4f189edbe9eb0b4a1b531ba0952b7d769 | 785 | py | Python | mediasort/__init__.py | aroberts/mediasort | c70836b11d19bd9fad63e22c7aa5217ae4a4cef3 | [
"BSD-3-Clause"
] | 1 | 2020-01-04T09:14:23.000Z | 2020-01-04T09:14:23.000Z | mediasort/__init__.py | aroberts/mediasort | c70836b11d19bd9fad63e22c7aa5217ae4a4cef3 | [
"BSD-3-Clause"
] | null | null | null | mediasort/__init__.py | aroberts/mediasort | c70836b11d19bd9fad63e22c7aa5217ae4a4cef3 | [
"BSD-3-Clause"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(message)s',
"%Y-%m-%d %H:%M:%S"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
import mimetypes
| 27.068966 | 70 | 0.700637 | import logging
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(message)s',
"%Y-%m-%d %H:%M:%S"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
import mimetypes
def setup_logging(config):
if 'log_path' in config:
handler = logging.FileHandler(config['log_path'])
handler.setFormatter(formatter)
logger.addHandler(handler)
if 'log_level' in config:
logger.setLevel(getattr(logging, config['log_level'].upper()))
def setup_mime(config):
if 'mimetypes_path' in config:
mimetypes.init([config['mimetypes_path']])
if 'mimetypes_paths' in config:
mimetypes.init(config['mimetypes_paths'])
| 443 | 0 | 46 |
3d45fe73ba920eaffc67a7fa644b7150bb3136b0 | 6,819 | py | Python | limix_ext/leap/core/calc_h2.py | glimix/limix-ext | 7cf7a3b2b02f6a73cbba90f1945a06b9295b7357 | [
"MIT"
] | null | null | null | limix_ext/leap/core/calc_h2.py | glimix/limix-ext | 7cf7a3b2b02f6a73cbba90f1945a06b9295b7357 | [
"MIT"
] | 2 | 2017-06-05T08:29:22.000Z | 2017-06-07T16:54:54.000Z | limix_ext/leap/core/calc_h2.py | glimix/limix-ext | 7cf7a3b2b02f6a73cbba90f1945a06b9295b7357 | [
"MIT"
] | null | null | null | import logging
import numpy as np
import scipy.stats as stats
from .eigd import eigenDecompose
| 33.757426 | 79 | 0.525884 | import logging
import numpy as np
import scipy.stats as stats
from .eigd import eigenDecompose
def calcLiabThreholds(U, S, keepArr, phe, numRemovePCs, prev):
#Run logistic regression
G = U[:, -numRemovePCs:] * np.sqrt(S[-numRemovePCs:])
import sklearn.linear_model
Logreg = sklearn.linear_model.LogisticRegression(
penalty='l2', C=500000, fit_intercept=True)
Logreg.fit(G[keepArr, :numRemovePCs], phe[keepArr])
#Compute individual thresholds
Pi = Logreg.predict_proba(G)[:, 1]
#Compute thresholds and save to files
P = np.sum(phe == 1) / float(phe.shape[0])
K = prev
Ki = K * (1 - P) / (P * (1 - K)) * Pi / (1 + K * (1 - P) /
(P * (1 - K)) * Pi - Pi)
thresholds = stats.norm(0, 1).isf(Ki)
thresholds[Ki >= 1.] = -999999999
thresholds[Ki <= 0.] = 999999999
return Pi, thresholds
def calcH2Continuous_twotails(XXT, phe, keepArr, prev, h2coeff):
logger = logging.getLogger(__name__)
logger.debug('computing h2 for a two-tails ascertained study.')
XXT = XXT[np.ix_(keepArr, keepArr)]
phe = phe[keepArr]
t1 = stats.norm(0, 1).ppf(prev)
t2 = stats.norm(0, 1).isf(prev)
phit1 = stats.norm(0, 1).pdf(t1)
phit2 = stats.norm(0, 1).pdf(t2)
K1 = prev
K2 = prev
xCoeff = ((phit2 * t2 - phit1 * t1 + K1 + K2)**2 * (K1 + K2)**2 -
(phit2 - phit1)**4) / (K1 + K2)**4
intersect = ((phit2 - phit1) / (K1 + K2))**2
pheMean = 0
pheVar = 1
x = (xCoeff * h2coeff) * XXT
y = np.outer((phe - pheMean) / np.sqrt(pheVar),
(phe - pheMean) / np.sqrt(pheVar))
y -= intersect
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, _, _, _, _ = stats.linregress(x, y)
return slope
def calcH2Continuous(XXT, phe, keepArr, prev, h2coeff):
t = stats.norm(0, 1).isf(prev)
phit = stats.norm(0, 1).pdf(t)
K1 = 1 - prev
K2 = 1 - K1
P = np.sum(phe < t) / float(phe.shape[0])
P2 = 1.0
P1 = K2 * P2 * P / (K1 * (1 - P))
R = P2 / P1
XXT = XXT[np.ix_(keepArr, keepArr)]
phe = phe[keepArr]
xCoeff = (((R - 1) * phit * t + K1 + R * K2)**2 * (K1 + R * K2)**2 -
((R - 1) * phit)**4) / (K1 + R * K2)**4
x = (xCoeff * h2coeff) * XXT
pheMean = 0
pheVar = 1
y = np.outer((phe - pheMean) / np.sqrt(pheVar),
(phe - pheMean) / np.sqrt(pheVar))
y -= ((R - 1) * phit / (K1 + R * K2))**2
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, _, _, _, _ = stats.linregress(x, y)
return slope
def calcH2Binary(XXT, phe, probs, thresholds, keepArr, prev, h2coeff):
K = prev
P = np.sum(phe > 0) / float(phe.shape[0])
XXT = XXT[np.ix_(keepArr, keepArr)]
phe = phe[keepArr]
if (thresholds is None):
t = stats.norm(0, 1).isf(K)
phit = stats.norm(0, 1).pdf(t)
xCoeff = P * (1 - P) / (K**2 * (1 - K)**2) * phit**2 * h2coeff
y = np.outer((phe - P) / np.sqrt(P * (1 - P)),
(phe - P) / np.sqrt(P * (1 - P)))
x = xCoeff * XXT
else:
probs = probs[keepArr]
thresholds = thresholds[keepArr]
Ki = K * (1 - P) / (P * (1 - K)) * probs / (1 + K * (1 - P) /
(P *
(1 - K)) * probs - probs)
phit = stats.norm(0, 1).pdf(thresholds)
probsInvOuter = np.outer(probs * (1 - probs), probs * (1 - probs))
y = np.outer(phe - probs, phe - probs) / np.sqrt(probsInvOuter)
sumProbs = np.tile(np.column_stack(probs).T,
(1, probs.shape[0])) + np.tile(
probs, (probs.shape[0], 1))
Atag0 = np.outer(phit, phit) * (
1 - (sumProbs) * (P - K) / (P * (1 - K)) + np.outer(probs, probs) *
(((P - K) / (P * (1 - K)))**2)) / np.sqrt(probsInvOuter)
B0 = np.outer(Ki + (1 - Ki) * (K * (1 - P)) / (P * (1 - K)),
Ki + (1 - Ki) * (K * (1 - P)) / (P * (1 - K)))
x = (Atag0 / B0 * h2coeff) * XXT
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, _, _, _, _ = stats.linregress(x, y)
return slope
def calc_h2(pheno, prev, eigen, keepArr, numRemovePCs, h2coeff, lowtail):
logger = logging.getLogger(__name__)
# pheno = leapUtils._fixup_pheno(pheno)
#Extract phenotype
if isinstance(pheno, dict): phe = pheno['vals']
else: phe = pheno
if (len(phe.shape) == 2):
if (phe.shape[1] == 1): phe = phe[:, 0]
else: raise Exception('More than one phenotype found')
if (keepArr is None): keepArr = np.ones(phe.shape[0], dtype=np.bool)
#Compute kinship matrix
XXT = eigen['XXT']
#Remove top PCs from kinship matrix
if (numRemovePCs > 0):
if (eigen is None):
S, U = leapUtils.eigenDecompose(XXT)
else:
S, U = eigen['arr_1'], eigen['arr_0']
logger.info('Removing the top %d PCs from the kinship matrix',
numRemovePCs)
XXT -= (U[:, -numRemovePCs:] *
S[-numRemovePCs:]).dot(U[:, -numRemovePCs:].T)
#Determine if this is a case-control study
pheUnique = np.unique(phe)
if (pheUnique.shape[0] < 2):
raise Exception('Less than two different phenotypes observed')
isCaseControl = (pheUnique.shape[0] == 2)
if isCaseControl:
logger.debug('Computing h2 for a binary phenotype')
pheMean = phe.mean()
phe[phe <= pheMean] = 0
phe[phe > pheMean] = 1
if (numRemovePCs > 0):
probs, thresholds = calcLiabThreholds(U, S, keepArr, phe,
numRemovePCs, prev)
h2 = calcH2Binary(XXT, phe, probs, thresholds, keepArr, prev,
h2coeff)
else:
h2 = calcH2Binary(XXT, phe, None, None, keepArr, prev, h2coeff)
else:
logger.debug('Computing h2 for a continuous phenotype')
if (not lowtail):
h2 = calcH2Continuous(XXT, phe, keepArr, prev, h2coeff)
else:
h2 = calcH2Continuous_twotails(XXT, phe, keepArr, prev, h2coeff)
if (h2 <= 0):
h2 = 0.01
print("Negative heritability found. Exitting...")
# raise Exception("Negative heritability found. Exitting...")
if (np.isnan(h2)):
h2 = 0.01
print("Invalid heritability estimate. " +
"Please double-check your input for any errors.")
# raise Exception("Invalid heritability estimate. "+
# "Please double-check your input for any errors.")
logger.debug('h2: %0.6f', h2)
return h2
| 6,602 | 0 | 115 |
abb4ba64e345114c0b5be170656a0f297a42cd96 | 511 | py | Python | Vignette_filter.py | OhmVikrant/Vignette-Filter-using-OpenCV | 4ffe8ad956370721cea9b648765e22d6ae56cdcc | [
"MIT"
] | 2 | 2020-09-05T19:03:29.000Z | 2020-09-05T19:08:56.000Z | Vignette_filter.py | OhmVikrant/Vignette-Filter-using-OpenCV | 4ffe8ad956370721cea9b648765e22d6ae56cdcc | [
"MIT"
] | null | null | null | Vignette_filter.py | OhmVikrant/Vignette-Filter-using-OpenCV | 4ffe8ad956370721cea9b648765e22d6ae56cdcc | [
"MIT"
] | null | null | null | import numpy as np
import cv2
input = cv2.imread('input/strawberry.jpg')
height, width = input_image.shape[:2]
x_gauss = cv2.getGaussianKernel(width,250)
y_gauss = cv2.getGaussianKernel(height,200)
kernel = x_gauss * y_gauss.T
mask = kernel * 255 / np.linalg.norm(kernel)
output[:,:,0] = input[:,:,0] * mask
output[:,:,1] = input[:,:,1] * mask
output[:,:,2] = input[:,:,2] * mask
cv2.imshow('vignette', output)
cv2.waitKey(0)
cv2.destroyAllWindows() | 20.44 | 47 | 0.610568 | import numpy as np
import cv2
input = cv2.imread('input/strawberry.jpg')
height, width = input_image.shape[:2]
x_gauss = cv2.getGaussianKernel(width,250)
y_gauss = cv2.getGaussianKernel(height,200)
kernel = x_gauss * y_gauss.T
mask = kernel * 255 / np.linalg.norm(kernel)
output[:,:,0] = input[:,:,0] * mask
output[:,:,1] = input[:,:,1] * mask
output[:,:,2] = input[:,:,2] * mask
cv2.imshow('vignette', output)
cv2.waitKey(0)
cv2.destroyAllWindows() | 0 | 0 | 0 |
b54d847ae63ed6f54873fdf4e76f651ed8a2b61d | 4,248 | py | Python | python-src/graphoire/digraph.py | ccorbell/graphoire | 566b0a27a9d6b87c5952bcc6e257a6d90621ca06 | [
"Apache-2.0"
] | null | null | null | python-src/graphoire/digraph.py | ccorbell/graphoire | 566b0a27a9d6b87c5952bcc6e257a6d90621ca06 | [
"Apache-2.0"
] | null | null | null | python-src/graphoire/digraph.py | ccorbell/graphoire | 566b0a27a9d6b87c5952bcc6e257a6d90621ca06 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 11:40:58 2021
@author: Christopher Corbell
Things we can use here:
- construct Digraph from underlying Graph (default direction for edges)
- DigraphFactory to construct some interesting digraphs
"""
from graphoire.graph import Graph
class Digraph(Graph):
"""
Digraph is a subclass of Graph that implements edge direction.
This includes distinguishing between u,v and v,u edges (the
base class resolves such edges to u,v). The class also can
calculate in-degree and out-degree of vertices; note that the
base class vertexDegree() and related methods consider out-degree only.
"""
def getOutNeighbors(self, vertex):
"""
Get a list of vertices that this vertex connects-outward to.
Parameters
----------
vertex : int
The vertex index
Returns list of adjacent head-vertex integer indices.
"""
neighbors = []
for edge in self.edges:
if edge[0] == vertex:
neighbors.append(edge[1])
return neighbors
def getInNeighbors(self, vertex):
"""
Get a list of vertices that connect inward to this vertex.
Parameters
----------
vertex : int
The vertex index
Returns list of adjacent tail-vertex integer indicdes.
"""
neighbors = []
for edge in self.edges:
if edge[1] == vertex:
neighbors.append(edge[0])
return neighbors
def edgeDirection(self, tail, head):
"""
Get the direction of edge between tail and head.
Parameters
----------
tail : integer (vertex index)
The vertex to interpret as tail
head : integer (vertex index)
The vertex to interpret as head
Returns
-------
An integer value 1 if this is a directed edge from
tail to head, -1 if the edge is the other direction,
and 0 if there is no edge.
"""
if self.hasEdge(tail, head):
return 1
elif self.hasEdge(head, tail):
return -1
else:
return 0
| 28.702703 | 87 | 0.558851 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 11:40:58 2021
@author: Christopher Corbell
Things we can use here:
- construct Digraph from underlying Graph (default direction for edges)
- DigraphFactory to construct some interesting digraphs
"""
from graphoire.graph import Graph
class Digraph(Graph):
"""
Digraph is a subclass of Graph that implements edge direction.
This includes distinguishing between u,v and v,u edges (the
base class resolves such edges to u,v). The class also can
calculate in-degree and out-degree of vertices; note that the
base class vertexDegree() and related methods consider out-degree only.
"""
def __init__(self, n: int):
Graph.__init__(self, n)
self.directed = True
self.indegree_cache = {}
def addEdge(self, i, j, sortEdges=False):
# i = head, j = tail
edge = [i, j]
if not edge in self.edges:
self.edges.append(edge)
if True == sortEdges:
self.sortEdges()
self.degree_cache.clear()
self.indegree_cache.clear()
def vertexDegree(self, n):
return self.vertexOutDegree(n)
def vertexOutDegree(self, n):
if n >= self.n:
raise Exception(f"Vertex index {n} out of range for graph degree {self.n}")
if n in self.degree_cache.keys():
return self.degree_cache[n]
degree = 0
for edge in self.edges:
if edge[0] == n:
degree += 1
if edge[0] > n:
break
self.degree_cache[n] = degree
return degree
def vertexInDegree(self, n):
if n >= self.n:
raise Exception(f"Vertex index {n} out of range for graph degree {self.n}")
if n in self.indegree_cache.keys():
return self.indegree_cache[n]
degree = 0
for edge in self.edges:
if edge[1] == n:
degree += 1
self.indegree_cache[n] = degree
return degree
def getUnderlyingGraph(self):
underG = Graph(self.n)
# Add our directed edges to the undirected copy,
# which will automatically consolidate any
# duplicates and discard direction information
for edge in self.edges:
underG.addEdge(edge[0], edge[1])
# Copy vertex labels but not edge labels
if self.hasVertexLabels():
underG.vtx_labels = self.vtx_labels.copy()
return underG
def getOutNeighbors(self, vertex):
"""
Get a list of vertices that this vertex connects-outward to.
Parameters
----------
vertex : int
The vertex index
Returns list of adjacent head-vertex integer indices.
"""
neighbors = []
for edge in self.edges:
if edge[0] == vertex:
neighbors.append(edge[1])
return neighbors
def getInNeighbors(self, vertex):
"""
Get a list of vertices that connect inward to this vertex.
Parameters
----------
vertex : int
The vertex index
Returns list of adjacent tail-vertex integer indicdes.
"""
neighbors = []
for edge in self.edges:
if edge[1] == vertex:
neighbors.append(edge[0])
return neighbors
def edgeDirection(self, tail, head):
"""
Get the direction of edge between tail and head.
Parameters
----------
tail : integer (vertex index)
The vertex to interpret as tail
head : integer (vertex index)
The vertex to interpret as head
Returns
-------
An integer value 1 if this is a directed edge from
tail to head, -1 if the edge is the other direction,
and 0 if there is no edge.
"""
if self.hasEdge(tail, head):
return 1
elif self.hasEdge(head, tail):
return -1
else:
return 0
def clearCaches(self):
self.indegree_cache.clear()
Graph.clearCaches(self)
| 1,771 | 0 | 229 |
0c131e9958b01000cfd500c7e63ab05467f30879 | 6,362 | py | Python | bgp/rlkit/torch/tdm/envs/reacher_7dof_env.py | aypan17/value_learning | 240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe | [
"MIT"
] | null | null | null | bgp/rlkit/torch/tdm/envs/reacher_7dof_env.py | aypan17/value_learning | 240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe | [
"MIT"
] | null | null | null | bgp/rlkit/torch/tdm/envs/reacher_7dof_env.py | aypan17/value_learning | 240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe | [
"MIT"
] | null | null | null | from collections import OrderedDict
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym.spaces import Box
from bgp.rlkit.core import logger as default_logger
from bgp.rlkit.core.eval_util import create_stats_ordered_dict
from bgp.rlkit.core.serializable import Serializable
from bgp.rlkit.envs.mujoco_env import get_asset_xml
from bgp.rlkit.samplers.util import get_stat_in_paths
from bgp.rlkit.torch.tdm.envs.multitask_env import MultitaskEnv
| 33.661376 | 86 | 0.596511 | from collections import OrderedDict
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym.spaces import Box
from bgp.rlkit.core import logger as default_logger
from bgp.rlkit.core.eval_util import create_stats_ordered_dict
from bgp.rlkit.core.serializable import Serializable
from bgp.rlkit.envs.mujoco_env import get_asset_xml
from bgp.rlkit.samplers.util import get_stat_in_paths
from bgp.rlkit.torch.tdm.envs.multitask_env import MultitaskEnv
class Reacher7DofMultitaskEnv(
MultitaskEnv, mujoco_env.MujocoEnv, Serializable
):
def __init__(self, distance_metric_order=None, goal_dim_weights=None):
self._desired_xyz = np.zeros(3)
Serializable.quick_init(self, locals())
MultitaskEnv.__init__(
self,
distance_metric_order=distance_metric_order,
goal_dim_weights=goal_dim_weights,
)
mujoco_env.MujocoEnv.__init__(
self,
get_asset_xml('reacher_7dof.xml'),
5,
)
self.observation_space = Box(
np.array([
-2.28, -0.52, -1.4, -2.32, -1.5, -1.094, -1.5, # joint
-3, -3, -3, -3, -3, -3, -3, # velocity
-0.75, -1.25, -0.2, # EE xyz
]),
np.array([
1.71, 1.39, 1.7, 0, 1.5, 0, 1.5, # joints
3, 3, 3, 3, 3, 3, 3, # velocity
0.75, 0.25, 0.6, # EE xyz
])
)
def viewer_setup(self):
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-0.005,
high=0.005, size=self.model.nv)
qvel[-7:] = 0
self.set_state(qpos, qvel)
self._set_goal_xyz(self._desired_xyz)
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.model.data.qpos.flat[:7],
self.model.data.qvel.flat[:7],
self.get_body_com("tips_arm"),
])
def _step(self, a):
distance = np.linalg.norm(
self.get_body_com("tips_arm") - self._desired_xyz
)
reward = - distance
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(
distance=distance,
multitask_goal=self.multitask_goal,
desired_xyz=self._desired_xyz,
goal=self.multitask_goal,
)
def _set_goal_xyz(self, xyz_pos):
current_qpos = self.model.data.qpos.flat
current_qvel = self.model.data.qvel.flat.copy()
new_qpos = current_qpos.copy()
new_qpos[-7:-4] = xyz_pos
self._desired_xyz = xyz_pos
self.set_state(new_qpos, current_qvel)
def log_diagnostics(self, paths, logger=default_logger):
super().log_diagnostics(paths)
statistics = OrderedDict()
euclidean_distances = get_stat_in_paths(
paths, 'env_infos', 'distance'
)
statistics.update(create_stats_ordered_dict(
'Euclidean distance to goal', euclidean_distances
))
statistics.update(create_stats_ordered_dict(
'Final Euclidean distance to goal',
[d[-1] for d in euclidean_distances],
always_show_all_stats=True,
))
for key, value in statistics.items():
logger.record_tabular(key, value)
def joints_to_full_state(self, joints):
current_qpos = self.model.data.qpos.flat.copy()
current_qvel = self.model.data.qvel.flat.copy()
new_qpos = current_qpos.copy()
new_qpos[:7] = joints
self.set_state(new_qpos, current_qvel)
full_state = self._get_obs().copy()
self.set_state(current_qpos, current_qvel)
return full_state
class Reacher7DofFullGoal(Reacher7DofMultitaskEnv):
@property
def goal_dim(self) -> int:
return 17
def sample_goals(self, batch_size):
return self.sample_states(batch_size)
def convert_obs_to_goals(self, obs):
return obs
def set_goal(self, goal):
super().set_goal(goal)
self._set_goal_xyz_automatically(goal)
def modify_goal_for_rollout(self, goal):
goal[7:14] = 0
return goal
def _set_goal_xyz_automatically(self, goal):
current_qpos = self.model.data.qpos.flat.copy()
current_qvel = self.model.data.qvel.flat.copy()
new_qpos = current_qpos.copy()
new_qpos[:7] = goal[:7]
self.set_state(new_qpos, current_qvel)
goal_xyz = self.get_body_com("tips_arm").copy()
self.set_state(current_qpos, current_qvel)
self._set_goal_xyz(goal_xyz)
self.multitask_goal[14:17] = goal_xyz
def sample_states(self, batch_size):
random_pos = np.random.uniform(
[-2.28, -0.52, -1.4, -2.32, -1.5, -1.094, -1.5],
[1.71, 1.39, 1.7, 0, 1.5, 0, 1.5, ],
(batch_size, 7)
)
random_vel = np.random.uniform(-3, 3, (batch_size, 7))
random_xyz = np.random.uniform(
np.array([-0.75, -1.25, -0.2]),
np.array([0.75, 0.25, 0.6]),
(batch_size, 3)
)
return np.hstack((
random_pos,
random_vel,
random_xyz,
))
def cost_fn(self, states, actions, next_states):
"""
This is added for model-based code. This is COST not reward.
So lower is better.
:param states: (BATCH_SIZE x state_dim) numpy array
:param actions: (BATCH_SIZE x action_dim) numpy array
:param next_states: (BATCH_SIZE x state_dim) numpy array
:return: (BATCH_SIZE, ) numpy array
"""
if len(next_states.shape) == 1:
next_states = np.expand_dims(next_states, 0)
# xyz_pos = next_states[:, 14:17]
# desired_xyz_pos = self.multitask_goal[14:17] * np.ones_like(xyz_pos)
# diff = xyz_pos - desired_xyz_pos
next_joint_angles = next_states[:, :7]
desired_joint_angles = (
self.multitask_goal[:7] * np.ones_like(next_joint_angles)
)
diff = next_joint_angles - desired_joint_angles
return (diff**2).sum(1, keepdims=True)
| 4,407 | 1,231 | 261 |
066cb5c7846b0bc11e82f86423780ee8635d8724 | 1,295 | py | Python | simpleformat.py | Kronuz/sublime-rst-completion | ed265f303ff2b3e1c4e8d92d2c8f23ebb8ba425c | [
"BSD-3-Clause"
] | 173 | 2015-01-05T06:26:06.000Z | 2022-03-26T08:18:58.000Z | simpleformat.py | Kronuz/sublime-rst-completion | ed265f303ff2b3e1c4e8d92d2c8f23ebb8ba425c | [
"BSD-3-Clause"
] | 29 | 2015-02-17T09:16:40.000Z | 2022-02-07T11:25:26.000Z | simpleformat.py | Kronuz/sublime-rst-completion | ed265f303ff2b3e1c4e8d92d2c8f23ebb8ba425c | [
"BSD-3-Clause"
] | 44 | 2015-03-08T20:49:23.000Z | 2022-03-09T23:52:53.000Z | import sublime
import sublime_plugin
class SurroundCommand(sublime_plugin.TextCommand):
"""
Base class to surround the selection with text.
"""
surround = ''
| 30.116279 | 100 | 0.613127 | import sublime
import sublime_plugin
class SurroundCommand(sublime_plugin.TextCommand):
"""
Base class to surround the selection with text.
"""
surround = ''
def run(self, edit):
for sel in self.view.sel():
len_surround = len(self.surround)
sel_str = self.view.substr(sel)
rsel = sublime.Region(sel.begin() - len_surround, sel.end() + len_surround)
rsel_str = self.view.substr(rsel)
if sel_str[:len_surround] == self.surround and sel_str[-len_surround:] == self.surround:
replacement = sel_str[len_surround:-len_surround]
else:
replacement = "%s%s%s" % (self.surround, sel_str, self.surround)
if rsel_str == replacement:
self.view.sel().subtract(sel)
self.view.replace(edit, rsel, sel_str)
self.view.sel().add(sublime.Region(rsel.begin(), rsel.begin() + len(sel_str)))
else:
self.view.replace(edit, sel, replacement)
class StrongemphasisCommand(SurroundCommand):
surround = "**"
class EmphasisCommand(SurroundCommand):
surround = "*"
class LiteralCommand(SurroundCommand):
surround = "``"
class SubstitutionCommand(SurroundCommand):
surround = "|"
| 837 | 159 | 119 |
5db314ef9db7f8c30d914a66c1929ddcb62a2832 | 535 | py | Python | server/generator.py | cryptSky/hlsa_task8 | ed0d8d9d69b5e8f3bdfa5964c66ce6dcf27f07c1 | [
"MIT"
] | null | null | null | server/generator.py | cryptSky/hlsa_task8 | ed0d8d9d69b5e8f3bdfa5964c66ce6dcf27f07c1 | [
"MIT"
] | null | null | null | server/generator.py | cryptSky/hlsa_task8 | ed0d8d9d69b5e8f3bdfa5964c66ce6dcf27f07c1 | [
"MIT"
] | null | null | null | import requests
from faker import Faker
from faker.providers import date_time
import json
fake = Faker()
fake.add_provider(date_time)
for i in range(40000000):
user = {
'name': fake.name(),
'email': fake.email(),
'birthdate': fake.date()
}
response = requests.post('http://localhost:8000/users', json=json.dumps(user))
if response.ok:
if i % 100000 == 0:
user_id = response.json()['id']
print("User {0} added".format(user_id))
else:
print("Error") | 23.26087 | 82 | 0.6 | import requests
from faker import Faker
from faker.providers import date_time
import json
fake = Faker()
fake.add_provider(date_time)
for i in range(40000000):
user = {
'name': fake.name(),
'email': fake.email(),
'birthdate': fake.date()
}
response = requests.post('http://localhost:8000/users', json=json.dumps(user))
if response.ok:
if i % 100000 == 0:
user_id = response.json()['id']
print("User {0} added".format(user_id))
else:
print("Error") | 0 | 0 | 0 |
6d8fbfaae089b733b5e1d89796d42c25b15b2835 | 1,212 | py | Python | python/korean-breaks.py | ye-kyaw-thu/tools- | 805e0759cb1b700cb99ce96364e9d8056143df64 | [
"MIT"
] | 11 | 2018-10-01T11:00:12.000Z | 2021-11-20T18:18:17.000Z | python/korean-breaks.py | ye-kyaw-thu/tools- | 805e0759cb1b700cb99ce96364e9d8056143df64 | [
"MIT"
] | null | null | null | python/korean-breaks.py | ye-kyaw-thu/tools- | 805e0759cb1b700cb99ce96364e9d8056143df64 | [
"MIT"
] | 4 | 2020-06-12T09:42:18.000Z | 2021-12-12T07:04:28.000Z | import sys
from hangul_utils import *
# for word segmentation and pos tagging of Korean text
# Note: You need to install "hangul-utils" in advanced
# Ref link: https://github.com/kaniblu/hangul-utils
# written by Ye Kyaw Thu, Visiting Professor, LST, NECTEC, Thailand
#
# How to run: python ./korean-breaks.py <input-filename> <word|morph|pos>
# eg 1: python ./korean-breaks.py ./tst.ko -pos
# eg 2: python ./korean-breaks.py ./tst.ko -morph
# e.g 3: python ./korean-breaks.py ./tst.ko -word
if len(sys.argv) < 3:
print ("You must set two arguments!")
print ("How to run:")
print ("python ./korean-breaks.py <raw-korean-text-filename> <-word|-morph|-pos>")
sys.exit()
else:
f1 = sys.argv[1]
arg = sys.argv[2]
fp1=open(f1,"r")
for line1 in fp1:
if arg.lower() == '-word':
# Word tokenization (mainly using space):
print (" ".join(list(word_tokenize(line1.strip()))))
elif arg.lower() == '-morph':
# Morpheme tokenization
print (" ".join(list(morph_tokenize(line1.strip()))))
elif arg.lower() == '-pos':
# Morpheme tokenization with POS
print (list(morph_tokenize(line1.strip(), pos=True)))
fp1.close()
| 33.666667 | 85 | 0.633663 | import sys
from hangul_utils import *
# for word segmentation and pos tagging of Korean text
# Note: You need to install "hangul-utils" in advanced
# Ref link: https://github.com/kaniblu/hangul-utils
# written by Ye Kyaw Thu, Visiting Professor, LST, NECTEC, Thailand
#
# How to run: python ./korean-breaks.py <input-filename> <word|morph|pos>
# eg 1: python ./korean-breaks.py ./tst.ko -pos
# eg 2: python ./korean-breaks.py ./tst.ko -morph
# e.g 3: python ./korean-breaks.py ./tst.ko -word
if len(sys.argv) < 3:
print ("You must set two arguments!")
print ("How to run:")
print ("python ./korean-breaks.py <raw-korean-text-filename> <-word|-morph|-pos>")
sys.exit()
else:
f1 = sys.argv[1]
arg = sys.argv[2]
fp1=open(f1,"r")
for line1 in fp1:
if arg.lower() == '-word':
# Word tokenization (mainly using space):
print (" ".join(list(word_tokenize(line1.strip()))))
elif arg.lower() == '-morph':
# Morpheme tokenization
print (" ".join(list(morph_tokenize(line1.strip()))))
elif arg.lower() == '-pos':
# Morpheme tokenization with POS
print (list(morph_tokenize(line1.strip(), pos=True)))
fp1.close()
| 0 | 0 | 0 |
8aa0dc07029827c92adf1033bab41cb860f33c8b | 127 | py | Python | DataStructure_And_Algorithm/Week5/edit_distance/edit_distance.py | sngvahmed/Algorithm-Coursera | 6b789b32ddee0bad6f754f3466cfb1a237ce6d0e | [
"Apache-2.0"
] | null | null | null | DataStructure_And_Algorithm/Week5/edit_distance/edit_distance.py | sngvahmed/Algorithm-Coursera | 6b789b32ddee0bad6f754f3466cfb1a237ce6d0e | [
"Apache-2.0"
] | null | null | null | DataStructure_And_Algorithm/Week5/edit_distance/edit_distance.py | sngvahmed/Algorithm-Coursera | 6b789b32ddee0bad6f754f3466cfb1a237ce6d0e | [
"Apache-2.0"
] | 1 | 2018-07-09T09:49:01.000Z | 2018-07-09T09:49:01.000Z | # Uses python3
if __name__ == "__main__":
print(edit_distance(input(), input()))
| 12.7 | 42 | 0.645669 | # Uses python3
def edit_distance(s, t):
return 0
if __name__ == "__main__":
print(edit_distance(input(), input()))
| 19 | 0 | 22 |
cb33b840af0eb1bd12c538c2fcda80451df7fc05 | 3,729 | py | Python | robot_teleop/nodes/auto_move_special.py | caffreyu/icra_ros_love_cannot_speak | 75719fdb2c69ac229ef22146076593af8e70905f | [
"Apache-2.0"
] | null | null | null | robot_teleop/nodes/auto_move_special.py | caffreyu/icra_ros_love_cannot_speak | 75719fdb2c69ac229ef22146076593af8e70905f | [
"Apache-2.0"
] | 1 | 2020-01-02T20:55:07.000Z | 2020-01-02T20:55:07.000Z | robot_teleop/nodes/auto_move_special.py | caffreyu/icra_ros_love_cannot_speak | 75719fdb2c69ac229ef22146076593af8e70905f | [
"Apache-2.0"
] | 1 | 2019-12-27T02:51:08.000Z | 2019-12-27T02:51:08.000Z | #!/usr/bin/env python
# encoding: utf-8
import rospy
import tf
from std_msgs.msg import Float64, Int32, Int8
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Vector3
from PID import PID
from math import sin, cos, pi, atan2, sqrt
autoMove = AUTO_MOVE()
"""LinearPub = rospy.Publisher("/command/linear", self.twist, queue_size=5)
AngularPub = rospy.Publisher("/command/angular", self.twist, queue_size=5)"""
# pub = rospy.Publisher('cmd_vel', self.twist, queue_size=10)
if __name__ == '__main__':
rospy.init_node('robot_teleop')
pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
# Set subscribers
rospy.Subscriber("/odom", Odometry, autoMove.getState)
rospy.Subscriber("/command/pos", Vector3, autoMove.moveCommand)
# Server(AlignmentControllerConfig, dynamicReconfigureCb)
rospy.spin()
| 28.25 | 92 | 0.565567 | #!/usr/bin/env python
# encoding: utf-8
import rospy
import tf
from std_msgs.msg import Float64, Int32, Int8
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Vector3
from PID import PID
from math import sin, cos, pi, atan2, sqrt
class AUTO_MOVE():
# all self properties
linear_vel = 0.02
angular_vel = 0.5
current_pos_x = 0
current_pos_y = 0
cmd_pos_x = 0
cmd_pos_y = 0
diff_x = 0
diff_y = 0
angular_cal = 0
step_size = 0.1
pid_controller = PID(p=2, i=0.1, d=0, i_max=10, output_max=100)
twist = Twist()
def shutdown(self):
self.twist.linear.x = 0
self.twist.linear.y = 0
self.twist.linear.z = 0
self.twist.angular.x = 0
self.twist.angular.y = 0
self.twist.angular.z = 0
pub.publish(self.twist)
rospy.loginfo('Shut down')
def getState(self, msg):
odom = msg
position = odom.pose.pose.position
self.position_x = position.x
self.position_y = position.y
self.position_z = position.z
quaternion = odom.pose.pose.orientation
q = [quaternion.x,
quaternion.y,
quaternion.z,
quaternion.w]
(self.roll, self.pitch, self.yaw) = tf.transformations.euler_from_quaternion(q)
def moveCommand(self, msg):
self.cmd_pos_x = msg.x
self.cmd_pos_y = msg.y
# 当前姿态
current_yaw = self.yaw
# 当前位置
current_x=self.position_x
current_y=self.position_y
if (self.cmd_pos_x == -0.1 and self.cmd_pos_y == -0.1):
# 目标位置
target_x=-0.1
target_y=-0.0
# linear error
x_error=target_x-current_x
y_error=target_y-current_y
distance_error=sqrt(x_error**2+y_error**2)
# # 改变角度
# change_angle=-current_yaw
# 目标绝对姿态
target_yaw = atan2(y_error,x_error)
# error
yaw_error = target_yaw-current_yaw
# error限幅,找到最小差角
if yaw_error >= pi:
yaw_error -= 2*pi
elif yaw_error <= -pi:
yaw_error += 2*pi
# pid控制z轴旋转角速度
# if abs(yaw_error) > 0.005:
# self.twist.angular.z = self.pid_controller.calculate_pid(yaw_error)
# pub.publish(self.twist)
# print('yaw_error: {:g}, current_yaw: {:g}'.format(yaw_error, current_yaw))
# # else:
# # self.shutdown()
# # rospy.loginfo('reached')
# else:
# pid控制线速度
if abs(x_error) > 0.005 :
twist2=Twist()
twist2.linear.x=self.pid_controller.calculate_pid(x_error)
twist2.linear.y=0
twist2.linear.z=0
pub.publish(twist2)
print('x_error: {:g}, current_x: {:g} '.format(x_error, current_x))
else:
self.shutdown()
rospy.loginfo('reached')
else:
self.shutdown()
autoMove = AUTO_MOVE()
"""LinearPub = rospy.Publisher("/command/linear", self.twist, queue_size=5)
AngularPub = rospy.Publisher("/command/angular", self.twist, queue_size=5)"""
# pub = rospy.Publisher('cmd_vel', self.twist, queue_size=10)
if __name__ == '__main__':
rospy.init_node('robot_teleop')
pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
# Set subscribers
rospy.Subscriber("/odom", Odometry, autoMove.getState)
rospy.Subscriber("/command/pos", Vector3, autoMove.moveCommand)
# Server(AlignmentControllerConfig, dynamicReconfigureCb)
rospy.spin()
| 2,538 | 402 | 23 |
9e9a250fcdb96c00671c5336a19a02e3051aab22 | 777 | py | Python | rcs_back/users_app/views.py | e-kondr01/rcs_back | f0f224d01f7051cce9d5feef692216d48cba6f31 | [
"MIT"
] | null | null | null | rcs_back/users_app/views.py | e-kondr01/rcs_back | f0f224d01f7051cce9d5feef692216d48cba6f31 | [
"MIT"
] | null | null | null | rcs_back/users_app/views.py | e-kondr01/rcs_back | f0f224d01f7051cce9d5feef692216d48cba6f31 | [
"MIT"
] | 1 | 2021-09-25T19:18:55.000Z | 2021-09-25T19:18:55.000Z | from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework.response import Response
from rest_framework.views import APIView
User = get_user_model()
class RetrieveCurrentUserView(APIView):
"""Возвращает информацию о текущем пользователе"""
| 28.777778 | 63 | 0.664093 | from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework.response import Response
from rest_framework.views import APIView
User = get_user_model()
class RetrieveCurrentUserView(APIView):
"""Возвращает информацию о текущем пользователе"""
def get(self, request, *args, **kwargs):
has_eco_group = False
if request.user.groups.filter(name=settings.ECO_GROUP):
has_eco_group = True
resp = {}
resp["id"] = request.user.pk
resp["email"] = request.user.email
resp["has_eco_group"] = has_eco_group
if request.user.building:
resp["building"] = request.user.building.pk
else:
resp["building"] = None
return Response(resp)
| 462 | 0 | 27 |
1e2e746d8cf1983c40a783689474f3881ce5bf4c | 624 | py | Python | SCRIPTS/radiotelescopes/plot.py | sarrvesh/academicpages.github.io | 909d8e700ed62c00d48472cf8d8564b0bf4da369 | [
"MIT"
] | null | null | null | SCRIPTS/radiotelescopes/plot.py | sarrvesh/academicpages.github.io | 909d8e700ed62c00d48472cf8d8564b0bf4da369 | [
"MIT"
] | null | null | null | SCRIPTS/radiotelescopes/plot.py | sarrvesh/academicpages.github.io | 909d8e700ed62c00d48472cf8d8564b0bf4da369 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# Create a Miller project
map = Basemap(projection='hammer', lon_0=20, resolution='l')
# Plot coastlines
map.drawcoastlines(linewidth=0.)
map.fillcontinents(alpha=0.85)
# Parse telescopes.txt and plot the points on the map
for line in open('telescopes.txt', 'r').readlines():
if line[0] == '#': continue
lat = float( line.split()[1][:-1] )
lon = float( line.split()[2] )
xpt, ypt = map(lon, lat)
map.plot([xpt],[ypt],'ro', markersize=0.75)
#
plt.savefig('radiotelescopes.png', dpi=500, bbox_inches='tight')
| 28.363636 | 64 | 0.690705 | #!/usr/bin/env python
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# Create a Miller project
map = Basemap(projection='hammer', lon_0=20, resolution='l')
# Plot coastlines
map.drawcoastlines(linewidth=0.)
map.fillcontinents(alpha=0.85)
# Parse telescopes.txt and plot the points on the map
for line in open('telescopes.txt', 'r').readlines():
if line[0] == '#': continue
lat = float( line.split()[1][:-1] )
lon = float( line.split()[2] )
xpt, ypt = map(lon, lat)
map.plot([xpt],[ypt],'ro', markersize=0.75)
#
plt.savefig('radiotelescopes.png', dpi=500, bbox_inches='tight')
| 0 | 0 | 0 |
c8ca8a03f3df1b90e7b2cb76b7672aa11b991729 | 56 | py | Python | PI/ButtonCodes/__init__.py | HotShot0901/PI | 7e6fd0f68b4222e09ea825f27709ec5b1e51e928 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null | PI/ButtonCodes/__init__.py | HotShot0901/PI | 7e6fd0f68b4222e09ea825f27709ec5b1e51e928 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null | PI/ButtonCodes/__init__.py | HotShot0901/PI | 7e6fd0f68b4222e09ea825f27709ec5b1e51e928 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null | from .KeyCodes import *
from .MouseButtonCodes import *
| 18.666667 | 31 | 0.785714 | from .KeyCodes import *
from .MouseButtonCodes import *
| 0 | 0 | 0 |
679f5da77d443219a419e0989ed1854a3c205526 | 12,787 | py | Python | train_models/evaluation.py | WangStephen/DL-limited-angle-CT-reconstruction | f43c3fe806a2eee316dcbb26bddeb51c4f4a9f92 | [
"MIT"
] | 7 | 2019-11-07T11:33:28.000Z | 2021-04-01T07:43:15.000Z | train_models/evaluation.py | WangStephen/DL-limited-angle-CT-reconstruction | f43c3fe806a2eee316dcbb26bddeb51c4f4a9f92 | [
"MIT"
] | 1 | 2021-03-14T03:19:33.000Z | 2022-01-12T21:47:32.000Z | train_models/evaluation.py | WangStephen/DL-limited-angle-CT-reconstruction | f43c3fe806a2eee316dcbb26bddeb51c4f4a9f92 | [
"MIT"
] | 2 | 2020-04-03T05:58:16.000Z | 2021-01-06T10:24:55.000Z | """Evaluation
This script consists of evaluation functions needed
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import datetime
import tensorflow as tf
from tensorflow.python.tools import inspect_checkpoint as chkp
import load_data
from geometry_parameters import TEST_INDEX, RECONSTRUCT_PARA
def show_reconstruction(model, phantom_index):
"""
show reconstructed CT
Parameters
----------
model : str
which model's results to use
phantom_index : int
which CT to display
"""
recon_dir = model + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon = np.load(recon_dir)
fig = plt.figure()
imgs = []
for i in range(recon.shape[0]):
img = plt.imshow(recon[i, :, :], animated=True, cmap=plt.get_cmap('gist_gray'))
imgs.append([img])
animation.ArtistAnimation(fig, imgs, interval=50, blit=True, repeat_delay=1000)
plt.show()
def compare_reconstruction(model_one, model_two, phantom_index, slice_index):
"""
compared reconstructed CT results from different two models
Parameters
----------
model_one : str
the first model's result to use
model_two : str
the second model's result to use
phantom_index : int
which CT to display
slice_index : int
which slice in the CT to display
"""
recon_one = model_one + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_one = np.load(recon_one)
recon_one = recon_one[slice_index-1,:,:]
recon_two = model_two + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_two = np.load(recon_two)
recon_two = recon_two[slice_index-1,:,:]
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(recon_one, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model_one)
ax = fig.add_subplot(1, 2, 2)
ax.imshow(recon_two, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model_two)
plt.show()
def single_ct_normalize(input):
"""
normalize one CT sample to [0, 1]
Parameters
----------
input : ndarray
The input CT to normalize
Returns
-------
ndarray
the normalized CT
"""
max = np.max(input)
min = np.min(input)
input = (input - min) / (max - min)
return input
def compare_reconstruction_with_fdk(model, phantom_index, slice_index):
"""
compare reconstructed CT results with the conventional FDK and the ground truth
Parameters
----------
model : str
which model's results to use
phantom_index : int
which CT to display
slice_index : int
which slice in the CT to display
"""
recon_one = '../data_preprocessing/recon_145/recon_' + str(phantom_index) + '.npy'
recon_one = single_ct_normalize(np.load(recon_one))
recon_one = recon_one[slice_index - 1, :, :]
recon_two = model + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_two = np.load(recon_two)
recon_two = recon_two[slice_index - 1, :, :]
recon_three = '../data_preprocessing/recon_360/recon_' + str(phantom_index) + '.npy'
recon_three = single_ct_normalize(np.load(recon_three))
recon_three = recon_three[slice_index - 1, :, :]
fig = plt.figure(figsize=plt.figaspect(0.3))
ax = fig.add_subplot(1, 3, 1)
ax.imshow(recon_one, cmap=plt.get_cmap('gist_gray'))
ax.set_title('pure_fdk')
ax = fig.add_subplot(1, 3, 2)
ax.imshow(recon_two, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model)
ax = fig.add_subplot(1, 3, 3)
ax.imshow(recon_three, cmap=plt.get_cmap('gist_gray'))
ax.set_title('ground truth')
plt.show()
def calculate_ssim(predictions, gt_labels, max_val):
"""
ssim calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_ssim_value = tf.image.ssim(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ssim = sess.run(tf_ssim_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(ssim)
def calculate_ms_ssim(predictions, gt_labels, max_val):
"""
ms-ssim calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_ms_ssim_value = tf.image.ssim_multiscale(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ms_ssim = sess.run(tf_ms_ssim_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(ms_ssim)
def calculate_psnr(predictions, gt_labels, max_val):
"""
psnr calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_psnr_value = tf.image.psnr(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
psnr = sess.run(tf_psnr_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(psnr)
def normalize(input):
"""
normalize more than one CT sample to [0, 1]
Parameters
----------
input : ndarray
The input CT samples to normalize
Returns
-------
ndarray
the normalized CT results
"""
for i in range(input.shape[0]):
min_bound = np.min(input[i,::])
max_bound = np.max(input[i,::])
input[i,::] = (input[i,::] - min_bound) / (max_bound - min_bound)
return input
# ms-ssim, psnr, mse
def evaluate_on_metrics(model):
"""
do evaluation on mse, ssim, ms-ssim and psnr
Parameters
----------
model : str
The model for evaluation
"""
# get the labels
_, labels = load_data.load_test_data()
labels = normalize(labels)
# load the recons on the model
recon_phantoms = np.empty(labels.shape)
for i in range(recon_phantoms.shape[0]):
recon_file = model + '/eval_recon/recon_' + str(TEST_INDEX[i]) + '.npy'
recon_phantoms[i,:,:,:] = np.load(recon_file)
# MSE
mse = np.mean(np.square(recon_phantoms - labels))
#
max_val = 1.0
# SSIM
ssim = calculate_ssim(recon_phantoms, labels, max_val)
# MS-SSIM
ms_ssim = calculate_ms_ssim(recon_phantoms, labels, max_val)
# Peak Signal-to-Noise Ratio
psnr = calculate_psnr(recon_phantoms, labels, max_val)
# print the results
print('mse value: ', str(mse))
print('ssim value: ', str(ssim))
print('ms-ssim value: ', str(ms_ssim))
print('psnr value: ', str(psnr))
# save the metrics results
f = open(model + '/eval_result/metrics_result.txt', 'a+')
f.write("Model: {0}, Date: {1:%Y-%m-%d_%H:%M:%S} \nMSE: {2:3.8f} \nSSIM: {3:3.8f} \nMS-SSIM: {4:3.8f} \nPSNR: {5:3.8f}\n\n".format(
model, datetime.datetime.now(), mse, ssim, ms_ssim, psnr))
f.close()
def check_stored_sess_var(sess_file, var_name):
"""
display variable results for trained models in the stored session
Parameters
----------
sess_file : str
the stored session file
var_name : str
the variable to see
"""
if var_name == '':
# print all tensors in checkpoint file (.ckpt)
chkp.print_tensors_in_checkpoint_file(sess_file, tensor_name='', all_tensors=True)
else:
chkp.print_tensors_in_checkpoint_file(sess_file, tensor_name=var_name, all_tensors=False)
def eval_pure_fdk():
"""
do evaluation on mse, ssim, ms-ssim and psnr for the conventional FDK algorithm
"""
# get the labels
_, labels = load_data.load_test_data()
labels = normalize(labels)
# load the recons
recon_phantoms = np.empty(labels.shape)
for i in range(recon_phantoms.shape[0]):
recon_file = '../data_preprocessing/recon_145/recon_' + str(TEST_INDEX[i]) + '.npy'
recon_phantoms[i, :, :, :] = np.load(recon_file)
recon_phantoms = normalize(recon_phantoms)
# MSE
mse = np.mean(np.square(recon_phantoms - labels))
#
max_val = 1.0
# SSIM
ssim = calculate_ssim(recon_phantoms, labels, max_val)
# MS-SSIM
ms_ssim = calculate_ms_ssim(recon_phantoms, labels, max_val)
# Peak Signal-to-Noise Ratio
psnr = calculate_psnr(recon_phantoms, labels, max_val)
# print the results
print('mse value: ', str(mse))
print('ssim value: ', str(ssim))
print('ms-ssim value: ', str(ms_ssim))
print('psnr value: ', str(psnr))
# save the metrics results
f = open('pure_fdk_model/eval_result/metrics_result.txt', 'a+')
f.write(
"Model: {0}, Date: {1:%Y-%m-%d_%H:%M:%S} \nMSE: {2:3.8f} \nSSIM: {3:3.8f} \nMS-SSIM: {4:3.8f} \nPSNR: {5:3.8f}\n\n".format(
'pure_fdk_model', datetime.datetime.now(), mse, ssim, ms_ssim, psnr))
f.close()
def convert_to_raw_bin(model):
"""
convert the reconstructed results of the model to raw data file
Parameters
----------
model : str
The model for which results to convert
"""
dir = model + '/eval_recon/'
for i in range(len(TEST_INDEX)):
recon_file = dir + 'recon_' + str(TEST_INDEX[i]) + '.npy'
recon = np.load(recon_file)
recon.astype('float32').tofile(dir + 'recon_' + str(TEST_INDEX[i]) + '_float32_' +
str(RECONSTRUCT_PARA['volume_shape'][1]) + 'x' +
str(RECONSTRUCT_PARA['volume_shape'][2]) + 'x' +
str(RECONSTRUCT_PARA['volume_shape'][0]) + '_bin')
if __name__ == "__main__":
###########################################
# show reconstructed result CT
show_reconstruction('fdk_nn_model', TEST_INDEX[1])
# show_reconstruction('cnn_projection_model', TEST_INDEX[1])
# show_reconstruction('cnn_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('dense_cnn_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('unet_projection_model', TEST_INDEX[1])
# show_reconstruction('unet_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('unet_proposed_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('combined_projection_reconstruction_model', TEST_INDEX[1])
###########################################
# Evaluation on each model
# evaluate_on_metrics('fdk_nn_model')
# evaluate_on_metrics('cnn_projection_model')
# evaluate_on_metrics('cnn_reconstruction_model')
# evaluate_on_metrics('dense_cnn_reconstruction_model')
# evaluate_on_metrics('unet_projection_model')
# evaluate_on_metrics('unet_reconstruction_model')
# evaluate_on_metrics('unet_proposed_reconstruction_model')
# evaluate_on_metrics('combined_projection_reconstruction_model')
# eval_pure_fdk()
###########################################
# compare_reconstruction results
# compare_reconstruction('cnn_projection_model', 'unet_projection_model', TEST_INDEX[1], 75)
# compare_reconstruction_with_fdk('combined_projection_reconstruction_model', TEST_INDEX[1], 75)
###########################################
# generate raw binary reconstruction files
# convert_to_raw_bin('combined_projection_reconstruction_model') | 28.929864 | 135 | 0.635098 | """Evaluation
This script consists of evaluation functions needed
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import datetime
import tensorflow as tf
from tensorflow.python.tools import inspect_checkpoint as chkp
import load_data
from geometry_parameters import TEST_INDEX, RECONSTRUCT_PARA
def show_reconstruction(model, phantom_index):
"""
show reconstructed CT
Parameters
----------
model : str
which model's results to use
phantom_index : int
which CT to display
"""
recon_dir = model + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon = np.load(recon_dir)
fig = plt.figure()
imgs = []
for i in range(recon.shape[0]):
img = plt.imshow(recon[i, :, :], animated=True, cmap=plt.get_cmap('gist_gray'))
imgs.append([img])
animation.ArtistAnimation(fig, imgs, interval=50, blit=True, repeat_delay=1000)
plt.show()
def compare_reconstruction(model_one, model_two, phantom_index, slice_index):
"""
compared reconstructed CT results from different two models
Parameters
----------
model_one : str
the first model's result to use
model_two : str
the second model's result to use
phantom_index : int
which CT to display
slice_index : int
which slice in the CT to display
"""
recon_one = model_one + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_one = np.load(recon_one)
recon_one = recon_one[slice_index-1,:,:]
recon_two = model_two + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_two = np.load(recon_two)
recon_two = recon_two[slice_index-1,:,:]
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(recon_one, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model_one)
ax = fig.add_subplot(1, 2, 2)
ax.imshow(recon_two, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model_two)
plt.show()
def single_ct_normalize(input):
"""
normalize one CT sample to [0, 1]
Parameters
----------
input : ndarray
The input CT to normalize
Returns
-------
ndarray
the normalized CT
"""
max = np.max(input)
min = np.min(input)
input = (input - min) / (max - min)
return input
def compare_reconstruction_with_fdk(model, phantom_index, slice_index):
"""
compare reconstructed CT results with the conventional FDK and the ground truth
Parameters
----------
model : str
which model's results to use
phantom_index : int
which CT to display
slice_index : int
which slice in the CT to display
"""
recon_one = '../data_preprocessing/recon_145/recon_' + str(phantom_index) + '.npy'
recon_one = single_ct_normalize(np.load(recon_one))
recon_one = recon_one[slice_index - 1, :, :]
recon_two = model + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_two = np.load(recon_two)
recon_two = recon_two[slice_index - 1, :, :]
recon_three = '../data_preprocessing/recon_360/recon_' + str(phantom_index) + '.npy'
recon_three = single_ct_normalize(np.load(recon_three))
recon_three = recon_three[slice_index - 1, :, :]
fig = plt.figure(figsize=plt.figaspect(0.3))
ax = fig.add_subplot(1, 3, 1)
ax.imshow(recon_one, cmap=plt.get_cmap('gist_gray'))
ax.set_title('pure_fdk')
ax = fig.add_subplot(1, 3, 2)
ax.imshow(recon_two, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model)
ax = fig.add_subplot(1, 3, 3)
ax.imshow(recon_three, cmap=plt.get_cmap('gist_gray'))
ax.set_title('ground truth')
plt.show()
def calculate_ssim(predictions, gt_labels, max_val):
"""
ssim calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_ssim_value = tf.image.ssim(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ssim = sess.run(tf_ssim_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(ssim)
def calculate_ms_ssim(predictions, gt_labels, max_val):
"""
ms-ssim calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_ms_ssim_value = tf.image.ssim_multiscale(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ms_ssim = sess.run(tf_ms_ssim_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(ms_ssim)
def calculate_psnr(predictions, gt_labels, max_val):
"""
psnr calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_psnr_value = tf.image.psnr(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
psnr = sess.run(tf_psnr_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(psnr)
def normalize(input):
"""
normalize more than one CT sample to [0, 1]
Parameters
----------
input : ndarray
The input CT samples to normalize
Returns
-------
ndarray
the normalized CT results
"""
for i in range(input.shape[0]):
min_bound = np.min(input[i,::])
max_bound = np.max(input[i,::])
input[i,::] = (input[i,::] - min_bound) / (max_bound - min_bound)
return input
# ms-ssim, psnr, mse
def evaluate_on_metrics(model):
"""
do evaluation on mse, ssim, ms-ssim and psnr
Parameters
----------
model : str
The model for evaluation
"""
# get the labels
_, labels = load_data.load_test_data()
labels = normalize(labels)
# load the recons on the model
recon_phantoms = np.empty(labels.shape)
for i in range(recon_phantoms.shape[0]):
recon_file = model + '/eval_recon/recon_' + str(TEST_INDEX[i]) + '.npy'
recon_phantoms[i,:,:,:] = np.load(recon_file)
# MSE
mse = np.mean(np.square(recon_phantoms - labels))
#
max_val = 1.0
# SSIM
ssim = calculate_ssim(recon_phantoms, labels, max_val)
# MS-SSIM
ms_ssim = calculate_ms_ssim(recon_phantoms, labels, max_val)
# Peak Signal-to-Noise Ratio
psnr = calculate_psnr(recon_phantoms, labels, max_val)
# print the results
print('mse value: ', str(mse))
print('ssim value: ', str(ssim))
print('ms-ssim value: ', str(ms_ssim))
print('psnr value: ', str(psnr))
# save the metrics results
f = open(model + '/eval_result/metrics_result.txt', 'a+')
f.write("Model: {0}, Date: {1:%Y-%m-%d_%H:%M:%S} \nMSE: {2:3.8f} \nSSIM: {3:3.8f} \nMS-SSIM: {4:3.8f} \nPSNR: {5:3.8f}\n\n".format(
model, datetime.datetime.now(), mse, ssim, ms_ssim, psnr))
f.close()
def check_stored_sess_var(sess_file, var_name):
"""
display variable results for trained models in the stored session
Parameters
----------
sess_file : str
the stored session file
var_name : str
the variable to see
"""
if var_name == '':
# print all tensors in checkpoint file (.ckpt)
chkp.print_tensors_in_checkpoint_file(sess_file, tensor_name='', all_tensors=True)
else:
chkp.print_tensors_in_checkpoint_file(sess_file, tensor_name=var_name, all_tensors=False)
def eval_pure_fdk():
"""
do evaluation on mse, ssim, ms-ssim and psnr for the conventional FDK algorithm
"""
# get the labels
_, labels = load_data.load_test_data()
labels = normalize(labels)
# load the recons
recon_phantoms = np.empty(labels.shape)
for i in range(recon_phantoms.shape[0]):
recon_file = '../data_preprocessing/recon_145/recon_' + str(TEST_INDEX[i]) + '.npy'
recon_phantoms[i, :, :, :] = np.load(recon_file)
recon_phantoms = normalize(recon_phantoms)
# MSE
mse = np.mean(np.square(recon_phantoms - labels))
#
max_val = 1.0
# SSIM
ssim = calculate_ssim(recon_phantoms, labels, max_val)
# MS-SSIM
ms_ssim = calculate_ms_ssim(recon_phantoms, labels, max_val)
# Peak Signal-to-Noise Ratio
psnr = calculate_psnr(recon_phantoms, labels, max_val)
# print the results
print('mse value: ', str(mse))
print('ssim value: ', str(ssim))
print('ms-ssim value: ', str(ms_ssim))
print('psnr value: ', str(psnr))
# save the metrics results
f = open('pure_fdk_model/eval_result/metrics_result.txt', 'a+')
f.write(
"Model: {0}, Date: {1:%Y-%m-%d_%H:%M:%S} \nMSE: {2:3.8f} \nSSIM: {3:3.8f} \nMS-SSIM: {4:3.8f} \nPSNR: {5:3.8f}\n\n".format(
'pure_fdk_model', datetime.datetime.now(), mse, ssim, ms_ssim, psnr))
f.close()
def convert_to_raw_bin(model):
"""
convert the reconstructed results of the model to raw data file
Parameters
----------
model : str
The model for which results to convert
"""
dir = model + '/eval_recon/'
for i in range(len(TEST_INDEX)):
recon_file = dir + 'recon_' + str(TEST_INDEX[i]) + '.npy'
recon = np.load(recon_file)
recon.astype('float32').tofile(dir + 'recon_' + str(TEST_INDEX[i]) + '_float32_' +
str(RECONSTRUCT_PARA['volume_shape'][1]) + 'x' +
str(RECONSTRUCT_PARA['volume_shape'][2]) + 'x' +
str(RECONSTRUCT_PARA['volume_shape'][0]) + '_bin')
if __name__ == "__main__":
###########################################
# show reconstructed result CT
show_reconstruction('fdk_nn_model', TEST_INDEX[1])
# show_reconstruction('cnn_projection_model', TEST_INDEX[1])
# show_reconstruction('cnn_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('dense_cnn_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('unet_projection_model', TEST_INDEX[1])
# show_reconstruction('unet_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('unet_proposed_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('combined_projection_reconstruction_model', TEST_INDEX[1])
###########################################
# Evaluation on each model
# evaluate_on_metrics('fdk_nn_model')
# evaluate_on_metrics('cnn_projection_model')
# evaluate_on_metrics('cnn_reconstruction_model')
# evaluate_on_metrics('dense_cnn_reconstruction_model')
# evaluate_on_metrics('unet_projection_model')
# evaluate_on_metrics('unet_reconstruction_model')
# evaluate_on_metrics('unet_proposed_reconstruction_model')
# evaluate_on_metrics('combined_projection_reconstruction_model')
# eval_pure_fdk()
###########################################
# compare_reconstruction results
# compare_reconstruction('cnn_projection_model', 'unet_projection_model', TEST_INDEX[1], 75)
# compare_reconstruction_with_fdk('combined_projection_reconstruction_model', TEST_INDEX[1], 75)
###########################################
# generate raw binary reconstruction files
# convert_to_raw_bin('combined_projection_reconstruction_model') | 0 | 0 | 0 |
4943f5346adc95d886e6def13a429e87d873fbf5 | 407 | py | Python | object_oriented_programming/exercise_online_shopping/main.py | jepster/python_advanced_techniques | f4b0e0dda7b66be55f650f9f902e735d3f5a9f64 | [
"MIT"
] | null | null | null | object_oriented_programming/exercise_online_shopping/main.py | jepster/python_advanced_techniques | f4b0e0dda7b66be55f650f9f902e735d3f5a9f64 | [
"MIT"
] | null | null | null | object_oriented_programming/exercise_online_shopping/main.py | jepster/python_advanced_techniques | f4b0e0dda7b66be55f650f9f902e735d3f5a9f64 | [
"MIT"
] | null | null | null | from user import User
brianna = User(1, 'Brianna')
mary = User(2, 'Mary')
keyboard = brianna.sell_product('Keyboard', 'A nice mechanical keyboard', 100)
print(keyboard.availability) # => True
mary.buy_product(keyboard)
print(keyboard.availability) # => False
review = mary.write_review('This is the best keyboard ever!', keyboard)
review in mary.reviews # => True
review in keyboard.reviews # => True | 31.307692 | 78 | 0.732187 | from user import User
brianna = User(1, 'Brianna')
mary = User(2, 'Mary')
keyboard = brianna.sell_product('Keyboard', 'A nice mechanical keyboard', 100)
print(keyboard.availability) # => True
mary.buy_product(keyboard)
print(keyboard.availability) # => False
review = mary.write_review('This is the best keyboard ever!', keyboard)
review in mary.reviews # => True
review in keyboard.reviews # => True | 0 | 0 | 0 |
f955de0f4e7b4a1551cb812c5d39fa4c25a310b3 | 3,125 | py | Python | wins/factories.py | uktrade/export-wins-data | 46caa444812e89abe504bec8c15aa7f7ba1a247e | [
"MIT"
] | 5 | 2016-09-12T12:52:45.000Z | 2020-03-24T14:43:13.000Z | wins/factories.py | uktrade/export-wins-data | 46caa444812e89abe504bec8c15aa7f7ba1a247e | [
"MIT"
] | 435 | 2016-10-18T12:51:39.000Z | 2021-06-09T17:22:08.000Z | wins/factories.py | uktrade/export-wins-data | 46caa444812e89abe504bec8c15aa7f7ba1a247e | [
"MIT"
] | 2 | 2016-12-06T10:37:21.000Z | 2017-02-22T17:27:43.000Z | import datetime
import factory
from factory.fuzzy import FuzzyChoice
from wins.models import (
Advisor,
Breakdown,
CustomerResponse,
HVC,
Notification,
Win,
)
from wins.constants import BUSINESS_POTENTIAL, SECTORS, WIN_TYPES
from users.factories import UserFactory
WIN_TYPES_DICT = {y: x for x, y in WIN_TYPES}
| 23.496241 | 83 | 0.71008 | import datetime
import factory
from factory.fuzzy import FuzzyChoice
from wins.models import (
Advisor,
Breakdown,
CustomerResponse,
HVC,
Notification,
Win,
)
from wins.constants import BUSINESS_POTENTIAL, SECTORS, WIN_TYPES
from users.factories import UserFactory
class WinFactory(factory.DjangoModelFactory):
class Meta(object):
model = Win
user = factory.SubFactory(UserFactory)
company_name = "company name"
cdms_reference = "cdms reference"
customer_name = "customer name"
customer_job_title = "customer job title"
customer_email_address = "customer@email.address"
customer_location = 1
description = "description"
type = 1
date = datetime.datetime(2016, 5, 25)
country = "CA"
business_potential = BUSINESS_POTENTIAL.MEDIUM_OR_HIGH.value
total_expected_export_value = 100000
goods_vs_services = 1
total_expected_non_export_value = 2300
total_expected_odi_value = 6400
sector = factory.Faker('random_element', elements=[2, 14, 15, 33, 35, 36, 115])
is_prosperity_fund_related = True
hvo_programme = "AER-01"
has_hvo_specialist_involvement = True
is_e_exported = True
type_of_support_1 = 1
is_personally_confirmed = True
is_line_manager_confirmed = True
lead_officer_name = "lead officer name"
line_manager_name = "line manager name"
team_type = "team"
hq_team = "team:1"
complete = False
WIN_TYPES_DICT = {y: x for x, y in WIN_TYPES}
class HVCFactory(factory.DjangoModelFactory):
class Meta:
model = HVC
campaign_id = factory.Sequence(lambda n: 'E%03d' % (n + 1))
name = factory.LazyAttribute(lambda o: 'HVC: {}'.format(o.campaign_id))
financial_year = FuzzyChoice([16, 17])
class BreakdownFactory(factory.DjangoModelFactory):
class Meta:
model = Breakdown
type = WIN_TYPES_DICT['Export']
year = 2016
value = 182818284
class AdvisorFactory(factory.DjangoModelFactory):
class Meta:
model = Advisor
name = 'Billy Bragg'
team_type = 'dso'
hq_team = 'team:1'
class CustomerResponseFactory(factory.DjangoModelFactory):
class Meta:
model = CustomerResponse
our_support = 1
access_to_contacts = 2
access_to_information = 3
improved_profile = 4
gained_confidence = 5
developed_relationships = 1
overcame_problem = 2
involved_state_enterprise = True
interventions_were_prerequisite = False
support_improved_speed = True
expected_portion_without_help = 6
last_export = 2
company_was_at_risk_of_not_exporting = False
has_explicit_export_plans = True
has_enabled_expansion_into_new_market = False
has_increased_exports_as_percent_of_turnover = True
has_enabled_expansion_into_existing_market = False
agree_with_win = FuzzyChoice([True, False])
case_study_willing = False
name = 'Cakes'
comments = 'Good work'
marketing_source = 1
class NotificationFactory(factory.DjangoModelFactory):
class Meta:
model = Notification
recipient = 'a@b.com'
type = 'c'
| 0 | 2,642 | 138 |
75d8ba5a4947dfff19c704082544896b55e0089a | 75 | py | Python | Common/Python/Data-Structures/Trees/__init__.py | MattiKemp/Data-Structures-And-Algorithms | 37a4eb4f092f5a058643ef5ac302fe16d97f84dc | [
"Unlicense"
] | null | null | null | Common/Python/Data-Structures/Trees/__init__.py | MattiKemp/Data-Structures-And-Algorithms | 37a4eb4f092f5a058643ef5ac302fe16d97f84dc | [
"Unlicense"
] | null | null | null | Common/Python/Data-Structures/Trees/__init__.py | MattiKemp/Data-Structures-And-Algorithms | 37a4eb4f092f5a058643ef5ac302fe16d97f84dc | [
"Unlicense"
] | null | null | null | from . import BinarySearchTree
from . import BinaryTree
from . import Tree
| 18.75 | 30 | 0.8 | from . import BinarySearchTree
from . import BinaryTree
from . import Tree
| 0 | 0 | 0 |
925e82a012459a533ccdda2bba698f54b3c8fa68 | 205 | py | Python | flyingpigeon/tests/test_cdo.py | Zeitsperre/flyingpigeon | 678370bf428af7ffe11ee79be3b8a89c73215e5e | [
"Apache-2.0"
] | 1 | 2016-12-04T18:01:49.000Z | 2016-12-04T18:01:49.000Z | flyingpigeon/tests/test_cdo.py | Zeitsperre/flyingpigeon | 678370bf428af7ffe11ee79be3b8a89c73215e5e | [
"Apache-2.0"
] | 13 | 2017-03-16T15:44:21.000Z | 2019-08-19T16:56:04.000Z | flyingpigeon/tests/test_cdo.py | Zeitsperre/flyingpigeon | 678370bf428af7ffe11ee79be3b8a89c73215e5e | [
"Apache-2.0"
] | null | null | null | import pytest
from .common import TESTDATA
from flyingpigeon.utils import local_path
from cdo import Cdo
cdo = Cdo()
| 17.083333 | 65 | 0.77561 | import pytest
from .common import TESTDATA
from flyingpigeon.utils import local_path
from cdo import Cdo
cdo = Cdo()
def test_sinfo():
cdo.sinfo(input=local_path(TESTDATA['cmip5_tasmax_2006_nc']))
| 62 | 0 | 23 |
481cea70c60a9294257dfd00b7d7c5217cf84b4b | 4,758 | py | Python | tests/test_TransactionBook.py | LukHad/AccountBook | 8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8 | [
"MIT"
] | null | null | null | tests/test_TransactionBook.py | LukHad/AccountBook | 8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8 | [
"MIT"
] | null | null | null | tests/test_TransactionBook.py | LukHad/AccountBook | 8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8 | [
"MIT"
] | null | null | null | from datetime import datetime
import os
import nose
import nose.tools
from TransactionBook.model.TransactionBook import *
def save_load(tb):
"""
Helper function wich does save and load the data.
:param tb: Transaction Book
:return tb2: Transaction Book after save load operation
"""
filename = "dummy_database.csv"
tb.save_as(filename)
tb2 = TransactionBook()
tb2.load_from(filename)
os.remove(filename)
return tb2
if __name__ == '__main__':
test_populate_list_from_data()
test_filter_date()
test_account_balance()
test_save_load()
test_pivot_category_pie()
test_years()
test_total_balance()
test_pivot_monthly_trend()
test_delete_transaction()
| 34.729927 | 100 | 0.678857 | from datetime import datetime
import os
import nose
import nose.tools
from TransactionBook.model.TransactionBook import *
def dummy_transactions():
tb = TransactionBook()
tb.new_transaction("01.07.2017", "Account 1", "My first transaction", 1000, "Income")
tb.new_transaction("11.08.2017", "Account 1", "Cinema", -17, "Entertainment")
tb.new_transaction("24.12.2017", "Account 2", "Bread and Milk", -5.0, "Food")
tb.new_transaction("03.02.2018", "Account 1", "Fuel", -30, "Mobility")
tb.new_transaction("01.12.2018", "Account 1", "Netflix", -11.95, "Entertainment")
return tb
def dummy_transactions_2():
tb = TransactionBook()
tb.new_transaction("01.07.2018", "Account 1", "My first transaction", 1000, "Income")
tb.new_transaction("11.08.2018", "Account 1", "Cinema", -17, "Entertainment")
tb.new_transaction("24.12.2019", "Account 2", "Bread and Milk", -5.0, "Food")
tb.new_transaction("03.02.2019", "Account 1", "Fuel", -30, "Mobility")
tb.new_transaction("01.12.2019", "Account 1", "Netflix", -11.95, "Entertainment")
tb.new_transaction("06.01.2019", "Account 2", "Sugar", -0.99, "Food")
tb.new_transaction("13.05.2019", "Account 2", "Strawberries", -6.49, "Food")
tb.new_transaction("17.09.2019", "Account 2", "Cheese", -5.0, "Food")
return tb
def save_load(tb):
"""
Helper function wich does save and load the data.
:param tb: Transaction Book
:return tb2: Transaction Book after save load operation
"""
filename = "dummy_database.csv"
tb.save_as(filename)
tb2 = TransactionBook()
tb2.load_from(filename)
os.remove(filename)
return tb2
def test_account_balance(save_load_test=False):
tb = dummy_transactions()
if save_load_test:
tb = save_load(tb)
err_message = "Method account_balance failed after save and load"
else:
err_message = "Method account_balance failed"
nose.tools.ok_(tb.account_balance("Account 1", tb.get_data()) == 941.05, err_message)
nose.tools.ok_(tb.account_balance("Account 2", tb.get_data()) == -5, err_message)
def test_filter_date(save_load_test=False):
tb = dummy_transactions()
if save_load_test:
tb = save_load(tb)
err_message = "Method test_filter_date failed after save and load"
else:
err_message = "Method test_filter_date failed"
from_date = datetime.strptime("01.09.2017", tb.DATE_TIME_FORMAT)
to_date = datetime.strptime("01.03.2018", tb.DATE_TIME_FORMAT)
df_filtered = tb.filter_date(from_date, to_date)
df_filtered = df_filtered.reset_index()
ass_cond = (df_filtered[tb.DATE][0] == datetime.strptime("24.12.2017", tb.DATE_TIME_FORMAT) and
df_filtered[tb.DATE][1] == datetime.strptime("03.02.2018", tb.DATE_TIME_FORMAT))
nose.tools.ok_(ass_cond, err_message)
def test_save_load():
test_filter_date(True)
test_account_balance(True)
def test_populate_list_from_data():
tb = dummy_transactions()
tb.populate_lists_from_data()
exp_cat = ['Income', 'Entertainment', 'Food', 'Mobility']
exp_acc = ['Account 1', 'Account 2']
ass_cond_cat = all(x in tb.categories for x in exp_cat) and (len(tb.categories) == len(exp_cat))
ass_cond_acc = all(x in tb.accounts for x in exp_acc) and (len(tb.accounts) == len(exp_acc))
nose.tools.ok_(ass_cond_cat and ass_cond_acc, "populate_lists_from_data failed")
def test_pivot_monthly_trend():
tb = dummy_transactions()
_, result = tb.pivot_monthly_trend(tb.get_data())
nose.tools.ok_(result == [0, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, -11.95])
def test_pivot_category_pie():
tb = dummy_transactions_2()
year = 2019
df = tb.get_data()
df = df.loc[df[tb.DATE] >= datetime(2019, 1, 1)]
df = df.loc[df[tb.DATE] <= datetime(2019, 12, 31)]
cat, result = tb.pivot_category_pie(df)
nose.tools.ok_(result == [-30, -17.48, -11.95] and cat == ['Mobility', 'Food', 'Entertainment'])
def test_years():
tb = dummy_transactions()
nose.tools.ok_(tb.years() == [2017, 2018])
def test_total_balance():
tb = dummy_transactions()
nose.tools.ok_(tb.total_balance(tb.get_data()) == 936.05)
def test_delete_transaction():
tb = dummy_transactions()
nose.tools.ok_(tb.total_balance(tb.get_data()) == 936.05)
tb.delete_transaction(2)
nose.tools.ok_(tb.total_balance(tb.get_data()) == 941.05)
tb.delete_transaction(1)
nose.tools.ok_(tb.total_balance(tb.get_data()) == 958.05)
if __name__ == '__main__':
test_populate_list_from_data()
test_filter_date()
test_account_balance()
test_save_load()
test_pivot_category_pie()
test_years()
test_total_balance()
test_pivot_monthly_trend()
test_delete_transaction()
| 3,765 | 0 | 253 |
36f627215083cec554625dd2e5e80318d8b62864 | 782 | py | Python | tests/builders.py | Spairet/nip | c37beede2709fee68663eee76e7a63a36aae03da | [
"MIT"
] | 13 | 2021-06-17T10:50:13.000Z | 2022-03-26T14:54:26.000Z | tests/builders.py | Spairet/nip | c37beede2709fee68663eee76e7a63a36aae03da | [
"MIT"
] | 2 | 2021-07-09T08:59:54.000Z | 2021-07-21T12:22:59.000Z | tests/builders.py | Spairet/nip | c37beede2709fee68663eee76e7a63a36aae03da | [
"MIT"
] | 1 | 2021-07-26T17:31:38.000Z | 2021-07-26T17:31:38.000Z | from nip import nip, dumps
@nip
@nip("myfunc")
@nip
| 17 | 58 | 0.589514 | from nip import nip, dumps
@nip
class SimpleClass:
def __init__(self, name: str):
self.name = name
@nip("print_method")
def print(self):
print(self.name)
return 312983
@nip("myfunc")
def MySecretFunction(a: int, b: int = 0, c: int = 0):
return a + 2 * b + 3 * c
@nip
class MyClass:
def __init__(self, name: str, f: object):
self.name = name
self.f = f
def __str__(self):
return f"name: {self.name}, func result: {self.f}"
class NotNipClass:
def __init__(self, name):
self.name = name
def NoNipFunc(name):
print("NoYapFunc:", name)
def show(*args, **kwargs):
print('args:', args)
print('kwargs:', kwargs)
def main(param, config):
print(param)
print(dumps(config)) | 419 | 65 | 237 |
205f841d52dcdd4a86affc009e851addf2fcf525 | 1,814 | py | Python | 141 Linked List Cycle.py | scorpionpd/LeetCode-all | 0d65494f37d093d650b83b93409e874c041f3abe | [
"MIT"
] | null | null | null | 141 Linked List Cycle.py | scorpionpd/LeetCode-all | 0d65494f37d093d650b83b93409e874c041f3abe | [
"MIT"
] | null | null | null | 141 Linked List Cycle.py | scorpionpd/LeetCode-all | 0d65494f37d093d650b83b93409e874c041f3abe | [
"MIT"
] | null | null | null | """
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
"""
__author__ = 'Danyang'
# Definition for singly-linked list.
| 36.28 | 76 | 0.312018 | """
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
"""
__author__ = 'Danyang'
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def hasCycle(self, head):
"""
if extra space available, use hash table
if not, use the model of Hare and Tortoise
Algorithm:
Hare & Tortoise
Physics, relative velocity.
___-------___
_-~~ ~~-_
_-~ /~-_
/^\__/^\ /~ \ / \
/| O|| O| / \_______________/ \
| |___||__| / / \ \
| \ / / \ \
| (_______) /______/ \_________ \
| / / \ / \
\ \^\\ \ / \ /
\ || \______________/ _-_ //\__//
\ ||------_-~~-_ ------------- \ --/~ ~\ || __/
~-----||====/~ |==================| |/~~~~~
(_(__/ ./ / \_\ \.
(_(___/ \_____)_)
:param head: ListNode
:return: boolean
"""
hare = head
tortoise = head
while hare and hare.next and tortoise:
hare = hare.next.next
tortoise = tortoise.next
if hare==tortoise:
return True
return False
| 47 | 1,516 | 71 |
20647d88459b1d700e8e2bbf54730b3fedf4e894 | 5,226 | py | Python | huaweicloud-sdk-cloudrtc/huaweicloudsdkcloudrtc/v2/model/record_rule_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-cloudrtc/huaweicloudsdkcloudrtc/v2/model/record_rule_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-cloudrtc/huaweicloudsdkcloudrtc/v2/model/record_rule_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecordRuleReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'obs_addr': 'RecordObsFileAddr',
'record_formats': 'list[str]',
'hls_config': 'HLSRecordConfig',
'mp4_config': 'MP4RecordConfig'
}
attribute_map = {
'obs_addr': 'obs_addr',
'record_formats': 'record_formats',
'hls_config': 'hls_config',
'mp4_config': 'mp4_config'
}
def __init__(self, obs_addr=None, record_formats=None, hls_config=None, mp4_config=None):
"""RecordRuleReq - a model defined in huaweicloud sdk"""
self._obs_addr = None
self._record_formats = None
self._hls_config = None
self._mp4_config = None
self.discriminator = None
self.obs_addr = obs_addr
self.record_formats = record_formats
if hls_config is not None:
self.hls_config = hls_config
if mp4_config is not None:
self.mp4_config = mp4_config
@property
def obs_addr(self):
"""Gets the obs_addr of this RecordRuleReq.
:return: The obs_addr of this RecordRuleReq.
:rtype: RecordObsFileAddr
"""
return self._obs_addr
@obs_addr.setter
def obs_addr(self, obs_addr):
"""Sets the obs_addr of this RecordRuleReq.
:param obs_addr: The obs_addr of this RecordRuleReq.
:type: RecordObsFileAddr
"""
self._obs_addr = obs_addr
@property
def record_formats(self):
"""Gets the record_formats of this RecordRuleReq.
录制格式:支持HLS格式和MP4格式(HLS和MP4为大写)。 - 若配置HLS则必须携带HLSRecordConfig参数 - 若配置MP4则需要携带MP4RecordConfig
:return: The record_formats of this RecordRuleReq.
:rtype: list[str]
"""
return self._record_formats
@record_formats.setter
def record_formats(self, record_formats):
"""Sets the record_formats of this RecordRuleReq.
录制格式:支持HLS格式和MP4格式(HLS和MP4为大写)。 - 若配置HLS则必须携带HLSRecordConfig参数 - 若配置MP4则需要携带MP4RecordConfig
:param record_formats: The record_formats of this RecordRuleReq.
:type: list[str]
"""
self._record_formats = record_formats
@property
def hls_config(self):
"""Gets the hls_config of this RecordRuleReq.
:return: The hls_config of this RecordRuleReq.
:rtype: HLSRecordConfig
"""
return self._hls_config
@hls_config.setter
def hls_config(self, hls_config):
"""Sets the hls_config of this RecordRuleReq.
:param hls_config: The hls_config of this RecordRuleReq.
:type: HLSRecordConfig
"""
self._hls_config = hls_config
@property
def mp4_config(self):
"""Gets the mp4_config of this RecordRuleReq.
:return: The mp4_config of this RecordRuleReq.
:rtype: MP4RecordConfig
"""
return self._mp4_config
@mp4_config.setter
def mp4_config(self, mp4_config):
"""Sets the mp4_config of this RecordRuleReq.
:param mp4_config: The mp4_config of this RecordRuleReq.
:type: MP4RecordConfig
"""
self._mp4_config = mp4_config
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecordRuleReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.797872 | 104 | 0.590126 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecordRuleReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'obs_addr': 'RecordObsFileAddr',
'record_formats': 'list[str]',
'hls_config': 'HLSRecordConfig',
'mp4_config': 'MP4RecordConfig'
}
attribute_map = {
'obs_addr': 'obs_addr',
'record_formats': 'record_formats',
'hls_config': 'hls_config',
'mp4_config': 'mp4_config'
}
def __init__(self, obs_addr=None, record_formats=None, hls_config=None, mp4_config=None):
"""RecordRuleReq - a model defined in huaweicloud sdk"""
self._obs_addr = None
self._record_formats = None
self._hls_config = None
self._mp4_config = None
self.discriminator = None
self.obs_addr = obs_addr
self.record_formats = record_formats
if hls_config is not None:
self.hls_config = hls_config
if mp4_config is not None:
self.mp4_config = mp4_config
@property
def obs_addr(self):
"""Gets the obs_addr of this RecordRuleReq.
:return: The obs_addr of this RecordRuleReq.
:rtype: RecordObsFileAddr
"""
return self._obs_addr
@obs_addr.setter
def obs_addr(self, obs_addr):
"""Sets the obs_addr of this RecordRuleReq.
:param obs_addr: The obs_addr of this RecordRuleReq.
:type: RecordObsFileAddr
"""
self._obs_addr = obs_addr
@property
def record_formats(self):
"""Gets the record_formats of this RecordRuleReq.
录制格式:支持HLS格式和MP4格式(HLS和MP4为大写)。 - 若配置HLS则必须携带HLSRecordConfig参数 - 若配置MP4则需要携带MP4RecordConfig
:return: The record_formats of this RecordRuleReq.
:rtype: list[str]
"""
return self._record_formats
@record_formats.setter
def record_formats(self, record_formats):
"""Sets the record_formats of this RecordRuleReq.
录制格式:支持HLS格式和MP4格式(HLS和MP4为大写)。 - 若配置HLS则必须携带HLSRecordConfig参数 - 若配置MP4则需要携带MP4RecordConfig
:param record_formats: The record_formats of this RecordRuleReq.
:type: list[str]
"""
self._record_formats = record_formats
@property
def hls_config(self):
"""Gets the hls_config of this RecordRuleReq.
:return: The hls_config of this RecordRuleReq.
:rtype: HLSRecordConfig
"""
return self._hls_config
@hls_config.setter
def hls_config(self, hls_config):
"""Sets the hls_config of this RecordRuleReq.
:param hls_config: The hls_config of this RecordRuleReq.
:type: HLSRecordConfig
"""
self._hls_config = hls_config
@property
def mp4_config(self):
"""Gets the mp4_config of this RecordRuleReq.
:return: The mp4_config of this RecordRuleReq.
:rtype: MP4RecordConfig
"""
return self._mp4_config
@mp4_config.setter
def mp4_config(self, mp4_config):
"""Sets the mp4_config of this RecordRuleReq.
:param mp4_config: The mp4_config of this RecordRuleReq.
:type: MP4RecordConfig
"""
self._mp4_config = mp4_config
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecordRuleReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
74458867b1ca3d7d9fb26951a2f67859b9b082c0 | 542 | py | Python | contest/impls/render_markdown.py | nya3jp/rules_contest | e74a9892785912b11bbd975068641e558aa4a623 | [
"MIT"
] | 6 | 2020-09-03T13:10:49.000Z | 2021-03-10T01:13:49.000Z | contest/impls/render_markdown.py | nya3jp/rules_contest | e74a9892785912b11bbd975068641e558aa4a623 | [
"MIT"
] | 11 | 2020-05-22T09:43:29.000Z | 2021-03-24T10:55:49.000Z | contest/impls/render_markdown.py | nya3jp/rules_contest | e74a9892785912b11bbd975068641e558aa4a623 | [
"MIT"
] | null | null | null | import argparse
import markdown
_EXTENSIONS = (
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
)
if __name__ == '__main__':
main()
| 19.357143 | 58 | 0.656827 | import argparse
import markdown
_EXTENSIONS = (
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('--input', required=True)
options = parser.parse_args()
with open(options.input, 'r') as f:
text = f.read()
html = markdown.markdown(text, extensions=_EXTENSIONS)
with open(options.output, 'w') as f:
f.write(html)
if __name__ == '__main__':
main()
| 353 | 0 | 23 |
c26d1e2e9e627e7d37923e241ab3efd749499c2a | 2,813 | py | Python | html_parser.py | WuTao1530663/web_spider_in_python | 1a3ba3471942bc5e38b1d5cac37568341ff0dd6f | [
"Apache-2.0"
] | null | null | null | html_parser.py | WuTao1530663/web_spider_in_python | 1a3ba3471942bc5e38b1d5cac37568341ff0dd6f | [
"Apache-2.0"
] | null | null | null | html_parser.py | WuTao1530663/web_spider_in_python | 1a3ba3471942bc5e38b1d5cac37568341ff0dd6f | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
import re
import urllib.parse
import requests
if __name__ == '__main__':
info = u"""<div id="info" class="">\
<span>\
<span class="pl"> 作者</span>\
<a class="" href="/search/%E5%8D%A1%E5%8B%92%E5%BE%B7%C2%B7%E8%83%A1%E8%B5%9B%E5%B0%BC">[美] 卡勒德·胡赛尼</a>\
</span><br>\
<span class="pl">出版社:</span> 上海人民出版社<br>\
<span class="pl">原作名:</span> The Kite Runner<br>\
<span>\
<span class="pl"> 译者</span>:\
<a class="" href="/search/%E6%9D%8E%E7%BB%A7%E5%AE%8F">李继宏</a>
</span><br>\
<span class="pl">出版年:</span> 2006-5<br>\
<span class="pl">页数:</span> 362<br>\
<span class="pl">定价:</span> 29.00元<br>\
<span class="pl">装帧:</span> 平装<br>\
<span class="pl">丛书:</span> <a href="https://book.douban.com/series/19760">卡勒德·胡赛尼作品</a><br>\
<span class="pl">ISBN:</span> 9787208061644<br>\
</div>"""
info = "clearfix"
HtmlParser().parse("https://book.douban.com/subject/1082154/",requests.get("https://book.douban.com/subject/1082154/").content)
| 43.276923 | 131 | 0.603271 | from bs4 import BeautifulSoup
import re
import urllib.parse
import requests
class HtmlParser(object):
def parse(self,url, html_content):
book_data = {}
soup = BeautifulSoup(html_content, 'html.parser',from_encoding='utf-8')
book_data['书籍ID'] = url[-8:-1]
book_data['书名'] = soup.find('span',property="v:itemreviewed").text
info = soup.find('div', id='info')
book_data['评分'] = soup.find("strong", class_="ll rating_num ").text
book_data['ISBN'] = info.find("span", text=re.compile(u'ISBN')).next_sibling
if float(book_data['评分'])<=7.8 or book_data['ISBN'] is None:
return None
book_data['作者'] = info.find("span",text=re.compile(u'作者')).find_next_sibling().text
book_data['出版社'] = info.find("span",text=re.compile(u'出版社')).next_sibling
book_data['出版年'] = info.find("span",text=re.compile(u'出版年')).next_sibling
book_data['页数'] = info.find("span",text=re.compile(u'页数')).next_sibling
book_data['定价'] = info.find("span",text=re.compile(u'定价')).next_sibling
#<strong class="ll rating_num " property="v:average"> 9.1 </strong>
#<span property="v:votes">122319</span>
book_data['评价人数'] = soup.find("span", property="v:votes").text
book_data['推荐书籍ID'] = []
#<div class="intro">
book_data['简介'] = soup.find('div',class_='intro').text
#<div id="db-rec-section" class="block5 subject_show knnlike">
recommand_book_urls = soup.find('div',id="db-rec-section").find("div",class_="content clearfix")
for book in recommand_book_urls.find_all("dl",class_=""):
book_data['推荐书籍ID'].append(book.dd.a['href'][-8:-1])
# for key,value in zip(book_data.keys(),book_data.values()):
# print ("%s : %s"%(key,value))
return book_data
if __name__ == '__main__':
info = u"""<div id="info" class="">\
<span>\
<span class="pl"> 作者</span>\
<a class="" href="/search/%E5%8D%A1%E5%8B%92%E5%BE%B7%C2%B7%E8%83%A1%E8%B5%9B%E5%B0%BC">[美] 卡勒德·胡赛尼</a>\
</span><br>\
<span class="pl">出版社:</span> 上海人民出版社<br>\
<span class="pl">原作名:</span> The Kite Runner<br>\
<span>\
<span class="pl"> 译者</span>:\
<a class="" href="/search/%E6%9D%8E%E7%BB%A7%E5%AE%8F">李继宏</a>
</span><br>\
<span class="pl">出版年:</span> 2006-5<br>\
<span class="pl">页数:</span> 362<br>\
<span class="pl">定价:</span> 29.00元<br>\
<span class="pl">装帧:</span> 平装<br>\
<span class="pl">丛书:</span> <a href="https://book.douban.com/series/19760">卡勒德·胡赛尼作品</a><br>\
<span class="pl">ISBN:</span> 9787208061644<br>\
</div>"""
info = "clearfix"
HtmlParser().parse("https://book.douban.com/subject/1082154/",requests.get("https://book.douban.com/subject/1082154/").content)
| 1,804 | 4 | 48 |
0443796cd2d92bcfaddb64cdd45f4bd50ae576c8 | 4,012 | py | Python | pybycus/authtab.py | lutetiensis/pybycus | 20ed6f2d7aeeee397bc27593fe085d981a1cc2a0 | [
"BSD-3-Clause"
] | 1 | 2020-10-17T17:23:58.000Z | 2020-10-17T17:23:58.000Z | pybycus/authtab.py | lutetiensis/pybycus | 20ed6f2d7aeeee397bc27593fe085d981a1cc2a0 | [
"BSD-3-Clause"
] | null | null | null | pybycus/authtab.py | lutetiensis/pybycus | 20ed6f2d7aeeee397bc27593fe085d981a1cc2a0 | [
"BSD-3-Clause"
] | null | null | null | """ AUTHTAB.DIR file parser. """
from pybycus.file import File
class AuthTab(File):
""" The Author List (with the filename AUTHTAB.DIR) contains
descriptive information for each text file on the disc. The
purpose of the Author Table is to allow the user to ask for
the author Plato, for example, without having to know that
the actual file name is TLG0059. Each entry contains the
author name, the corresponding file name, synonyms, remarks,
and language. The entries are arranged by category. """
def content(path):
""" Return the content of an AUTHTAB.DIR file. """
return AuthTab(path).content()
if __name__ == "__main__":
import sys
import pprint
pprint.pprint(content(sys.argv[1]))
| 47.761905 | 80 | 0.55683 | """ AUTHTAB.DIR file parser. """
from pybycus.file import File
class AuthTab(File):
""" The Author List (with the filename AUTHTAB.DIR) contains
descriptive information for each text file on the disc. The
purpose of the Author Table is to allow the user to ask for
the author Plato, for example, without having to know that
the actual file name is TLG0059. Each entry contains the
author name, the corresponding file name, synonyms, remarks,
and language. The entries are arranged by category. """
def __init__(self, path):
super().__init__(path)
while True:
# An (optional) synonym for the author name is introduced by a
# byte of hex 80 and is terminated by the first byte value above
# hex 7f. Up to five synonyms are allowed for each author name.
# pylint: disable=E0601
if self.peek_ubyte() == 0x80:
_ = self.read_ubyte()
synonym = self.read_string()
entry["aliases"].append(synonym)
assert len(entry["aliases"]) <= 5
# The (optional) remarks field is introduced by a byte of hex 81
# and is terminated by the first byte value above hex 7f.
elif self.peek_ubyte() == 0x81:
assert False
# The optional file size field is introduced by a byte of hex 82
# and is terminated by the first byte value above hex 7f.
elif self.peek_ubyte() == 0x82:
assert False
# The optional language code field is introduced by a byte of hex 83
# and is terminated by the first byte value above hex 7f.
elif self.peek_ubyte() == 0x83:
_ = self.read_ubyte()
language_code = self.read_string()
entry["language_code"] = language_code
# The entry is terminated by at least one hex ff (decimal 255). A
# second ff is used when needed to pad the entry to an even byte
# boundary.
elif self.peek_ubyte() == 0xff:
_ = self.read_ubyte()
# Each entry begins with a file name (without any file name
# extension) on an even byte boundary. The name is padded with
# blanks if necessary to reach the fixed length of 8 bytes.
else:
# If the file name starts with an asterisk, it is a library
# name (four characters including the asterisk). In this case
# the second four bytes are the binary length of the library
# (including the 8 bytes for the asterisk, name and length).
if chr(self.peek_ubyte()) == '*':
name = self.read_nstring(4)
# If the file name starts *END it marks the end of the
# list. The second four bytes are binary zeroes.
if name == "*END":
padding = self.read_uint()
assert len(name) == 4 and padding == 0x0000
break
listlen = self.read_uint()
title = self.read_string()
library = {"name": name, "title": title, "entries": []}
self._content.append(library)
# The full author name (of any reasonable length) starts after
# the filename and is terminated by the first byte value above
# 7f (decimal 127).
else:
filename = self.read_string()
entry = {"id": filename[:7],
"name": filename[8:],
"aliases": []}
library["entries"].append(entry)
def content(path):
""" Return the content of an AUTHTAB.DIR file. """
return AuthTab(path).content()
if __name__ == "__main__":
import sys
import pprint
pprint.pprint(content(sys.argv[1]))
| 3,246 | 0 | 27 |
a18b85ca07be6bc3638261609d5cbff00fdb06a4 | 724 | py | Python | rmf_building_map_tools/building_map/vertex.py | morty-clobot/rmf_traffic_editor | 2d9f32cef709482914c20b4b3c4fef938f87bdf3 | [
"Apache-2.0"
] | 40 | 2019-12-03T09:02:16.000Z | 2021-03-16T00:25:38.000Z | rmf_building_map_tools/building_map/vertex.py | morty-clobot/rmf_traffic_editor | 2d9f32cef709482914c20b4b3c4fef938f87bdf3 | [
"Apache-2.0"
] | 149 | 2019-11-28T14:47:39.000Z | 2021-03-24T14:05:58.000Z | rmf_building_map_tools/building_map/vertex.py | CLOBOT-Co-Ltd/rmf_traffic_editor | 2d9f32cef709482914c20b4b3c4fef938f87bdf3 | [
"Apache-2.0"
] | 30 | 2019-11-28T14:49:47.000Z | 2021-03-14T18:28:17.000Z | from .param_value import ParamValue
| 30.166667 | 64 | 0.584254 | from .param_value import ParamValue
class Vertex:
def __init__(self, yaml_node):
self.x = float(yaml_node[0])
self.y = float(-yaml_node[1])
self.z = float(yaml_node[2]) # currently always 0
self.name = yaml_node[3]
self.params = {}
if len(yaml_node) > 4 and len(yaml_node[4]) > 0:
for param_name, param_yaml in yaml_node[4].items():
self.params[param_name] = ParamValue(param_yaml)
def xy(self):
return (self.x, self.y)
def to_yaml(self):
y = [self.x, -self.y, self.z, self.name, {}]
for param_name, param_value in self.params.items():
y[4][param_name] = param_value.to_yaml()
return y
| 592 | -8 | 103 |
320e0a142f301f2fe27e02d1482f04be409d2b92 | 2,980 | py | Python | tests/data_collection_tests/test_observational_data_collector.py | CITCOM-project/CausalTestingFramework | ca83012dbaf7b1f95c118939570fc8b2c49bca68 | [
"MIT"
] | 1 | 2021-12-15T14:54:32.000Z | 2021-12-15T14:54:32.000Z | tests/data_collection_tests/test_observational_data_collector.py | AndrewC19/CausalTestingFramework | ca83012dbaf7b1f95c118939570fc8b2c49bca68 | [
"MIT"
] | 42 | 2021-11-25T11:11:07.000Z | 2022-03-21T09:47:02.000Z | tests/data_collection_tests/test_observational_data_collector.py | AndrewC19/CausalTestingFramework | ca83012dbaf7b1f95c118939570fc8b2c49bca68 | [
"MIT"
] | null | null | null | import unittest
import os
import pandas as pd
from causal_testing.data_collection.data_collector import ObservationalDataCollector
from causal_testing.specification.causal_specification import Scenario
from causal_testing.specification.variable import Input, Output, Meta
from scipy.stats import uniform, rv_discrete
from tests.test_helpers import create_temp_dir_if_non_existent, remove_temp_dir_if_existent
if __name__ == "__main__":
unittest.main()
| 48.852459 | 110 | 0.696309 | import unittest
import os
import pandas as pd
from causal_testing.data_collection.data_collector import ObservationalDataCollector
from causal_testing.specification.causal_specification import Scenario
from causal_testing.specification.variable import Input, Output, Meta
from scipy.stats import uniform, rv_discrete
from tests.test_helpers import create_temp_dir_if_non_existent, remove_temp_dir_if_existent
class TestObservationalDataCollector(unittest.TestCase):
def setUp(self) -> None:
temp_dir_path = create_temp_dir_if_non_existent()
self.dag_dot_path = os.path.join(temp_dir_path, "dag.dot")
self.observational_df_path = os.path.join(temp_dir_path, "observational_data.csv")
# Y = 3*X1 + X2*X3 + 10
self.observational_df = pd.DataFrame({"X1": [1, 2, 3, 4], "X2": [5, 6, 7, 8], "X3": [10, 20, 30, 40]})
self.observational_df["Y"] = self.observational_df.apply(
lambda row: (3 * row.X1) + (row.X2 * row.X3) + 10, axis=1)
self.observational_df.to_csv(self.observational_df_path)
self.X1 = Input("X1", int, uniform(1, 4))
self.X2 = Input("X2", int, rv_discrete(values=([7], [1])))
self.X3 = Input("X3", int, uniform(10, 40))
self.X4 = Input("X4", int, rv_discrete(values=([10], [1])))
self.Y = Output("Y", int)
def test_not_all_variables_in_data(self):
scenario = Scenario({self.X1, self.X2, self.X3, self.X4})
observational_data_collector = ObservationalDataCollector(scenario, self.observational_df_path)
self.assertRaises(IndexError, observational_data_collector.collect_data)
def test_all_variables_in_data(self):
scenario = Scenario({self.X1, self.X2, self.X3, self.Y})
observational_data_collector = ObservationalDataCollector(scenario, self.observational_df_path)
df = observational_data_collector.collect_data(index_col=0)
assert df.equals(self.observational_df), f"{df}\nwas not equal to\n{self.observational_df}"
def test_data_constraints(self):
scenario = Scenario({self.X1, self.X2, self.X3, self.Y}, {self.X1.z3 > 2})
observational_data_collector = ObservationalDataCollector(scenario, self.observational_df_path)
df = observational_data_collector.collect_data(index_col=0)
expected = self.observational_df.loc[[2, 3]]
assert df.equals(expected), f"{df}\nwas not equal to\n{expected}"
def test_meta_population(self):
def populate_m(data):
data['M'] = data['X1'] * 2
meta = Meta("M", int, populate_m)
scenario = Scenario({self.X1, meta})
observational_data_collector = ObservationalDataCollector(scenario, self.observational_df_path)
data = observational_data_collector.collect_data()
assert all((m == 2*x1 for x1, m in zip(data['X1'], data['M'])))
def tearDown(self) -> None:
remove_temp_dir_if_existent()
if __name__ == "__main__":
unittest.main()
| 2,301 | 35 | 185 |
9a5d1f0b059d53e886a9801f5cbd41c0b55883cd | 2,953 | py | Python | pymoo/model/individual.py | gabicavalcante/pymoo | 1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846 | [
"Apache-2.0"
] | 3 | 2020-09-18T19:33:31.000Z | 2020-09-18T19:33:33.000Z | pymoo/model/individual.py | gabicavalcante/pymoo | 1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846 | [
"Apache-2.0"
] | null | null | null | pymoo/model/individual.py | gabicavalcante/pymoo | 1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846 | [
"Apache-2.0"
] | 1 | 2022-03-31T08:19:13.000Z | 2022-03-31T08:19:13.000Z | import copy
# @property
# def F(self):
# attr = "F"
# if attr in self.__dict__:
# return self.__dict__[attr]
# else:
# return None
# Gets called when the item is not found via __getattribute__
# def __getattr__(self, item):
# return super(Individual, self).__setattr__(item, 'orphan')
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __getitem__(self, key):
# return self.__dict__.get(key)
# def __getattr__(self, attr):
#
# if attr == "F":
# if attr in self.__dict__:
# return self.__dict__[attr]
# else:
# return None
#
# if attr in self.__dict__:
# return self.__dict__[attr]
#
#
#
| 24.815126 | 89 | 0.525906 | import copy
class Individual:
def __init__(self, X=None, F=None, CV=None, G=None, feasible=None, **kwargs) -> None:
self.X = X
self.F = F
self.CV = CV
self.G = G
self.feasible = feasible
self.data = kwargs
self.attr = set(self.__dict__.keys())
def has(self, key):
return key in self.attr or key in self.data
def set(self, key, value):
if key in self.attr:
self.__dict__[key] = value
else:
self.data[key] = value
return self
def copy(self, deep=False):
ind = copy.copy(self)
ind.data = copy.copy(self.data) if not deep else copy.deepcopy(self.data)
return ind
def get(self, *keys):
def _get(key):
if key in self.data:
return self.data[key]
elif key in self.attr:
return self.__dict__[key]
else:
return None
ret = []
for key in keys:
ret.append(_get(key))
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class eIndividual:
def __init__(self, **kwargs) -> None:
kwargs = {**kwargs, **dict(X=None, F=None, CV=None, G=None, feasible=None)}
for k, v in kwargs.items():
self.__dict__[k] = v
def has(self, key):
return key in self.__dict__
def set(self, key, val):
self.__dict__[key] = val
def get(self, *keys):
if len(keys) == 1:
return self.__dict__.get(keys[0])
else:
return tuple([self.__dict__.get(key) for key in keys])
def copy(self, deep=False):
ind = copy.copy(self)
ind.data = copy.copy(self.__dict__) if not deep else copy.deepcopy(self.__dict__)
return ind
if not deep:
d = dict(self.__dict__)
else:
d = copy.deepcopy(self.__dict__)
ind = Individual(**d)
return ind
# @property
# def F(self):
# attr = "F"
# if attr in self.__dict__:
# return self.__dict__[attr]
# else:
# return None
# Gets called when the item is not found via __getattribute__
# def __getattr__(self, item):
# return super(Individual, self).__setattr__(item, 'orphan')
def __getattr__(self, val):
return self.__dict__.get(val)
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __getitem__(self, key):
# return self.__dict__.get(key)
# def __getattr__(self, attr):
#
# if attr == "F":
# if attr in self.__dict__:
# return self.__dict__[attr]
# else:
# return None
#
# if attr in self.__dict__:
# return self.__dict__[attr]
#
#
#
def __setattr__(self, key, value):
self.__dict__[key] = value
| 1,767 | -7 | 369 |
e0cfec01c7bd874d0cf8518ed6414b1765f009fe | 2,171 | py | Python | tests/test_controllers.py | kaaass/BGmi | 564ea664efed10a18dfcedb39552e688a66966c0 | [
"MIT"
] | 483 | 2017-09-15T16:35:11.000Z | 2022-03-29T16:34:56.000Z | tests/test_controllers.py | kaaass/BGmi | 564ea664efed10a18dfcedb39552e688a66966c0 | [
"MIT"
] | 205 | 2017-09-14T01:24:25.000Z | 2022-03-17T09:59:28.000Z | tests/test_controllers.py | kaaass/BGmi | 564ea664efed10a18dfcedb39552e688a66966c0 | [
"MIT"
] | 48 | 2017-09-19T16:09:55.000Z | 2022-02-04T10:08:25.000Z | import unittest
from bgmi.lib.constants import BANGUMI_UPDATE_TIME
from bgmi.lib.controllers import (
add,
cal,
delete,
mark,
recreate_source_relatively_table,
search,
)
from bgmi.main import setup
| 31.014286 | 74 | 0.58176 | import unittest
from bgmi.lib.constants import BANGUMI_UPDATE_TIME
from bgmi.lib.controllers import (
add,
cal,
delete,
mark,
recreate_source_relatively_table,
search,
)
from bgmi.main import setup
class ControllersTest(unittest.TestCase):
def setUp(self):
self.bangumi_name_1 = "名侦探柯南"
self.bangumi_name_2 = "海贼王"
def test_a_cal(self):
r = cal()
assert isinstance(r, dict)
for day in r.keys():
assert day.lower() in (x.lower() for x in BANGUMI_UPDATE_TIME)
assert isinstance(r[day], list)
for bangumi in r[day]:
assert "status" in bangumi
assert "subtitle_group" in bangumi
assert "name" in bangumi
assert "update_time" in bangumi
assert "cover" in bangumi
def test_b_add(self):
r = add(self.bangumi_name_1, 0)
assert r["status"] == "success", r["message"]
r = add(self.bangumi_name_1, 0)
assert r["status"] == "warning", r["message"]
r = delete(self.bangumi_name_1)
assert r["status"] == "warning", r["message"]
def test_c_mark(self):
add(self.bangumi_name_1, 0)
r = mark(self.bangumi_name_1, 1)
assert r["status"] == "success", r["message"]
r = mark(self.bangumi_name_1, None)
assert r["status"] == "info", r["message"]
r = mark(self.bangumi_name_2, 0)
assert r["status"] == "error", r["message"]
def test_d_delete(self):
r = delete()
assert r["status"] == "warning", r["message"]
r = delete(self.bangumi_name_1)
assert r["status"] == "warning", r["message"]
r = delete(self.bangumi_name_1)
assert r["status"] == "warning", r["message"]
r = delete(self.bangumi_name_2)
assert r["status"] == "error", r["message"]
r = delete(clear_all=True, batch=True)
assert r["status"] == "warning", r["message"]
def test_e_search(self):
search(self.bangumi_name_1, dupe=False)
@staticmethod
def setUpClass():
setup()
recreate_source_relatively_table()
| 1,714 | 226 | 23 |
9f86c14a0806b55f9050c006457010a10a435371 | 1,262 | py | Python | data formatting/format.py | ManindraDeMel/Deep-Deblurring | 0332591d3aaa3940542f34b6603fd3dd154416bf | [
"MIT"
] | null | null | null | data formatting/format.py | ManindraDeMel/Deep-Deblurring | 0332591d3aaa3940542f34b6603fd3dd154416bf | [
"MIT"
] | null | null | null | data formatting/format.py | ManindraDeMel/Deep-Deblurring | 0332591d3aaa3940542f34b6603fd3dd154416bf | [
"MIT"
] | null | null | null | from PIL import Image, ImageFilter
import random
# This library only words with the assumption that the dataset has been formatted as 0.jpg, 1.jpg ... or 0.png, 1.png ... accordingly | 45.071429 | 171 | 0.681458 | from PIL import Image, ImageFilter
import random
# This library only words with the assumption that the dataset has been formatted as 0.jpg, 1.jpg ... or 0.png, 1.png ... accordingly
def blurImage(path, blur_num):
if (random.randint(0, 2) > 1): # Using different blurs randomly, if you only have a dataset with sharpened images.
OriImage = Image.open(path)
boxImage = OriImage.filter(ImageFilter.BoxBlur(blur_num))
boxImage.save(path)
else:
OriImage = Image.open(path)
gaussImage = OriImage.filter(ImageFilter.GaussianBlur(blur_num))
gaussImage.save(path)
def blurImages(path, dataset_size, imgFormat = "jpg"):
for img in range(dataset_size):
blur = random.randint(5, 20) # change this range to get different blurs
blurImage(f"{path}/{img}.{imgFormat}", blur)
def resizeImage(x, y, path):
image = Image.open(path)
image = image.resize((x,y),Image.ANTIALIAS)
image.save(path)
def resizeImages(x, y, dataset_size, path, imgFormat = "jpg"):
for img in range(dataset_size): # This resizes the image of a given dataset. Make sure that each image is named in ascending order i.e (0.jpg, 1.jpg, 2.jpg, 3.jpg ...)
resizeImage(x, y, f"{path}/{img}.{imgFormat}") | 986 | 0 | 92 |
ae93b350a737e64fb7552c84ff50d4dbc14ab372 | 1,610 | py | Python | get_bitcoin_price.py | ZanW/newsScraper_new | a1995c18d256856c66387bcf290d02e82ae24869 | [
"MIT"
] | null | null | null | get_bitcoin_price.py | ZanW/newsScraper_new | a1995c18d256856c66387bcf290d02e82ae24869 | [
"MIT"
] | null | null | null | get_bitcoin_price.py | ZanW/newsScraper_new | a1995c18d256856c66387bcf290d02e82ae24869 | [
"MIT"
] | null | null | null | import os
import time
import pandas as pd
FETCH_URL = "https://poloniex.com/public?command=returnChartData¤cyPair=%s&start=%d&end=%d&period=300"
#PAIR_LIST = ["BTC_ETH"]
DATA_DIR = "data"
COLUMNS = ["date","high","low","open","close","volume","quoteVolume","weightedAverage"]
if __name__ == '__main__':
main()
| 26.393443 | 108 | 0.625466 | import os
import time
import pandas as pd
FETCH_URL = "https://poloniex.com/public?command=returnChartData¤cyPair=%s&start=%d&end=%d&period=300"
#PAIR_LIST = ["BTC_ETH"]
DATA_DIR = "data"
COLUMNS = ["date","high","low","open","close","volume","quoteVolume","weightedAverage"]
def get_data(pair):
datafile = os.path.join(DATA_DIR, pair+".csv")
timefile = os.path.join(DATA_DIR, pair)
if os.path.exists(datafile):
newfile = False
start_time = int(open(timefile).readline()) + 1
else:
newfile = True
start_time = 1486166400 # 2014.01.01
end_time = start_time + 86400*30
url = FETCH_URL % (pair, start_time, end_time)
print("Get %s from %d to %d" % (pair, start_time, end_time))
df = pd.read_json(url, convert_dates=False)
#import pdb;pdb.set_trace()
if df["date"].iloc[-1] == 0:
print("No data.")
return
end_time = df["date"].iloc[-1]
ft = open(timefile,"w")
ft.write("%d\n" % end_time)
ft.close()
outf = open(datafile, "a")
if newfile:
df.to_csv(outf, index=False, index_label=COLUMNS)
else:
df.to_csv(outf, index=False, index_label=COLUMNS, header=False)
outf.close()
print("Finish.")
time.sleep(30)
def main():
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
df = pd.read_json("https://poloniex.com/public?command=return24hVolume")
pairs = [pair for pair in df.columns if pair.startswith('BTC')]
print(pairs)
for pair in pairs:
get_data(pair)
time.sleep(2)
if __name__ == '__main__':
main()
| 1,241 | 0 | 46 |
8c40bdc8938659a4ff899b242ef3d828e3dcba06 | 1,551 | py | Python | hearthbreaker/cards/weapons/hunter.py | anuragpapineni/Hearthbreaker-evolved-agent | d519d42babd93e3567000c33a381e93db065301c | [
"MIT"
] | null | null | null | hearthbreaker/cards/weapons/hunter.py | anuragpapineni/Hearthbreaker-evolved-agent | d519d42babd93e3567000c33a381e93db065301c | [
"MIT"
] | null | null | null | hearthbreaker/cards/weapons/hunter.py | anuragpapineni/Hearthbreaker-evolved-agent | d519d42babd93e3567000c33a381e93db065301c | [
"MIT"
] | null | null | null | from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
from hearthbreaker.game_objects import WeaponCard, Weapon
| 33 | 94 | 0.601547 | from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
from hearthbreaker.game_objects import WeaponCard, Weapon
class EaglehornBow(WeaponCard):
def __init__(self):
super().__init__("Eaglehorn Bow", 3, CHARACTER_CLASS.HUNTER,
CARD_RARITY.RARE)
def create_weapon(self, player):
def apply_effect(w, p):
def increase_durability(s):
w.durability += 1
p.bind("secret_revealed", increase_durability)
w.bind_once("destroyed", lambda: p.unbind("secret_revealed", increase_durability))
w.bind("copied", apply_effect)
weapon = Weapon(3, 2)
apply_effect(weapon, player)
return weapon
class GladiatorsLongbow(WeaponCard):
def __init__(self):
super().__init__("Gladiator's Longbow", 7, CHARACTER_CLASS.HUNTER,
CARD_RARITY.EPIC)
def create_weapon(self, player):
def add_effect(w, p):
def make_immune(ignored_target):
p.hero.immune = True
def end_immune():
p.hero.immune = False
def on_destroy():
p.hero.unbind("attack", make_immune)
p.hero.unbind("attack_completed", end_immune)
p.hero.bind("attack", make_immune)
p.hero.bind("attack_completed", end_immune)
w.bind_once("destroyed", on_destroy)
w.bind("copied", add_effect)
weapon = Weapon(5, 2)
add_effect(weapon, player)
return weapon
| 1,249 | 25 | 152 |
069bc2c04b617aebb7c7459106d35ec20698985f | 2,659 | py | Python | sdk/python/touca/cli/_merge.py | trytouca/trytouca | eae38a96407d1ecac543c5a5fb05cbbe632ddfca | [
"Apache-2.0"
] | 6 | 2022-03-19T02:57:11.000Z | 2022-03-31T16:34:34.000Z | sdk/python/touca/cli/_merge.py | trytouca/trytouca | eae38a96407d1ecac543c5a5fb05cbbe632ddfca | [
"Apache-2.0"
] | null | null | null | sdk/python/touca/cli/_merge.py | trytouca/trytouca | eae38a96407d1ecac543c5a5fb05cbbe632ddfca | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Touca, Inc. Subject to Apache-2.0 License.
from sys import stderr, stdout
from pathlib import Path
from argparse import ArgumentParser
from loguru import logger
from touca.cli._common import Operation
| 33.2375 | 81 | 0.599097 | # Copyright 2022 Touca, Inc. Subject to Apache-2.0 License.
from sys import stderr, stdout
from pathlib import Path
from argparse import ArgumentParser
from loguru import logger
from touca.cli._common import Operation
def _merge(touca_cli: Path, dir_src: Path, dir_dst: Path):
from subprocess import Popen
if not dir_src.exists():
logger.error(f"expected directory {dir_src} to exist")
return False
dir_dst.mkdir(parents=True, exist_ok=True)
logger.info(f"merging result directory {dir_src} into {dir_dst}")
cmd = [touca_cli, "merge", f"--src={dir_src}", f"--out={dir_dst}"]
proc = Popen(cmd, universal_newlines=True, stdout=stdout, stderr=stderr)
exit_status = proc.wait()
if 0 != exit_status:
logger.warning(f"failed to merge {dir_src}")
if exit_status is not None:
logger.warning(f"touca_cli returned code {exit_status}")
return False
return True
class Merge(Operation):
name = "merge"
help = "Merge binary archive files"
def __init__(self, options: dict):
self.__options = options
@classmethod
def parser(self, parser: ArgumentParser):
parser.add_argument(
"--src",
help="path to directory with original Touca archives directories",
required=True,
)
parser.add_argument(
"--out",
help="path to directory where the merged archives should be created",
required=True,
)
parser.add_argument(
"--cli",
help='path to "touca_cli" C++ executable',
required=True,
)
def run(self):
src = Path(self.__options.get("src")).expanduser().resolve()
out = Path(self.__options.get("out")).expanduser().resolve()
cli = Path(self.__options.get("cli")).expanduser().resolve()
if not src.exists():
logger.error(f"directory {src} does not exist")
return False
if not out.exists():
out.mkdir(parents=True, exist_ok=True)
for dir_src in src.glob("*"):
if not dir_src.is_dir():
continue
if dir_src.name.endswith("-merged"):
continue
dir_dst = out.joinpath(dir_src.name + "-merged")
if dir_dst.exists():
continue
logger.info(f"merging {dir_src}")
if not _merge(cli, dir_src, dir_dst):
logger.error(f"failed to merge {dir_src}")
return False
logger.info(f"merged {dir_src}")
logger.info("merged all result directories")
return True
| 2,233 | 159 | 46 |
9cb3935c9a570950e26ec0e2f7fcb068ee01b448 | 4,939 | py | Python | chess/game.py | rdebek/chess | 0f72894ded5b464994ae03993c5224f705dc8eb7 | [
"MIT"
] | null | null | null | chess/game.py | rdebek/chess | 0f72894ded5b464994ae03993c5224f705dc8eb7 | [
"MIT"
] | null | null | null | chess/game.py | rdebek/chess | 0f72894ded5b464994ae03993c5224f705dc8eb7 | [
"MIT"
] | null | null | null | import pygame
import moves
from typing import List
from pieces.king import King
import copy
SIZE = (1000, 800)
SQUARE_WIDTH = int(0.8 * SIZE[0] // 8)
SQUARE_HEIGHT = SIZE[1] // 8
IMAGES = {}
pygame.init()
screen = pygame.display.set_mode(SIZE)
move_feed = []
running = True
board_array = [
['Br', 'Bn', 'Bb', 'Bq', 'Bk', 'Bb', 'Bn', 'Br'],
['Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp'],
['Wr', 'Wn', 'Wb', 'Wq', 'Wk', 'Wb', 'Wn', 'Wr']
]
count = 0
load_images()
draw_board()
draw_pieces()
draw_sidebar()
pygame.display.update()
last_color_moved = 'B'
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if count == 0:
initial_pos = event.pos
if (last_color_moved == 'B' and get_piece_color(initial_pos) == 'W') or (
last_color_moved == 'W' and get_piece_color(initial_pos) == 'B'):
count += 1
draw_board()
highlight_square(initial_pos)
draw_pieces()
elif count == 1:
ending_pos = event.pos
count = 0
if color := handle_move(initial_pos, ending_pos):
last_color_moved = color
draw_board()
draw_pieces()
pygame.display.update()
pygame.quit()
| 33.828767 | 117 | 0.561652 | import pygame
import moves
from typing import List
from pieces.king import King
import copy
SIZE = (1000, 800)
SQUARE_WIDTH = int(0.8 * SIZE[0] // 8)
SQUARE_HEIGHT = SIZE[1] // 8
IMAGES = {}
pygame.init()
screen = pygame.display.set_mode(SIZE)
move_feed = []
running = True
board_array = [
['Br', 'Bn', 'Bb', 'Bq', 'Bk', 'Bb', 'Bn', 'Br'],
['Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp'],
['Wr', 'Wn', 'Wb', 'Wq', 'Wk', 'Wb', 'Wn', 'Wr']
]
def load_images():
pieces = ['Br', 'Bn', 'Bb', 'Bq', 'Bk', 'Bp', 'Wp', 'Wr', 'Wn', 'Wb', 'Wq', 'Wk']
for piece in pieces:
img = pygame.transform.scale(pygame.image.load(f'../resources/{piece}.svg'), (SQUARE_WIDTH, SQUARE_HEIGHT))
IMAGES[piece] = img
def draw_pieces():
for i in range(8):
for j in range(8):
piece = board_array[i][j]
if piece != '--':
screen.blit(IMAGES[piece],
pygame.Rect(SQUARE_WIDTH * j, SQUARE_HEIGHT * i, SQUARE_WIDTH, SQUARE_HEIGHT))
def draw_board():
for i in range(8):
for j in range(8):
left = SQUARE_WIDTH * j
top = SQUARE_HEIGHT * i
square = pygame.Rect(left, top, SQUARE_WIDTH, SQUARE_HEIGHT)
if i % 2 == 0 and j % 2 != 0:
pygame.draw.rect(screen, (255, 255, 255), square)
elif i % 2 != 0 and j % 2 == 0:
pygame.draw.rect(screen, (255, 255, 255), square)
else:
pygame.draw.rect(screen, (255, 125, 0), square)
def handle_move(initial_position, ending_position):
init_x, init_y = initial_position[0] // SQUARE_WIDTH, initial_position[1] // SQUARE_HEIGHT
end_x, end_y = ending_position[0] // SQUARE_WIDTH, ending_position[1] // SQUARE_HEIGHT
piece = board_array[init_y][init_x][1]
color = board_array[init_y][init_x][0]
if piece == 'k' and (
king_and_rook_cords := moves.validate_castles(board_array, (init_y, init_x), (end_y, end_x), move_feed)):
perform_castles(king_and_rook_cords, (init_y, init_x))
return color
elif moves.basic_move_validation(board_array, (init_y, init_x), (end_y, end_x)):
temp_board = copy.deepcopy(board_array)
temp_board[end_y][end_x] = temp_board[init_y][init_x]
temp_board[init_y][init_x] = '--'
if not King.is_in_check(temp_board, color):
board_array[end_y][end_x] = board_array[init_y][init_x]
board_array[init_y][init_x] = '--'
move_feed.append(((init_y, init_x), (end_y, end_x)))
return color
def get_piece_color(initial_position):
init_x, init_y = initial_position[0] // SQUARE_WIDTH, initial_position[1] // SQUARE_HEIGHT
color = board_array[init_y][init_x][0]
return color
def perform_castles(king_and_rook_cords, init_king_cords):
init_y, init_x = init_king_cords
king_cords, rook_cords, side = king_and_rook_cords
board_array[king_cords[0]][king_cords[1]] = board_array[init_y][init_x]
board_array[rook_cords[0]][rook_cords[1]] = board_array[rook_cords[0]][7]
board_array[init_y][init_x] = '--'
if side == 'O-O':
board_array[rook_cords[0]][7] = '--'
elif side == 'O-O-O':
board_array[rook_cords[0]][0] = '--'
def highlight_square(cords):
left = cords[0] // SQUARE_WIDTH * SQUARE_WIDTH
top = cords[1] // SQUARE_HEIGHT * SQUARE_HEIGHT
square = pygame.Rect(left, top, SQUARE_WIDTH, SQUARE_HEIGHT)
pygame.draw.rect(screen, (0, 255, 100), square)
def draw_sidebar():
square = pygame.Rect(0.8 * SIZE[0], 0, 0.2 * SIZE[0], SIZE[1])
pygame.draw.rect(screen, (0, 255, 100), square)
count = 0
load_images()
draw_board()
draw_pieces()
draw_sidebar()
pygame.display.update()
last_color_moved = 'B'
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if count == 0:
initial_pos = event.pos
if (last_color_moved == 'B' and get_piece_color(initial_pos) == 'W') or (
last_color_moved == 'W' and get_piece_color(initial_pos) == 'B'):
count += 1
draw_board()
highlight_square(initial_pos)
draw_pieces()
elif count == 1:
ending_pos = event.pos
count = 0
if color := handle_move(initial_pos, ending_pos):
last_color_moved = color
draw_board()
draw_pieces()
pygame.display.update()
pygame.quit()
| 3,010 | 0 | 184 |
00893092dc39939fef6d823715dc13387f457e50 | 1,135 | py | Python | pyiArduinoI2Cbumper/examples/changeLineType.py | tremaru/pyiArduinoI2Cbumper | 94cb0ee7c38cb375cf1df97cacc7b3db3374e594 | [
"MIT"
] | null | null | null | pyiArduinoI2Cbumper/examples/changeLineType.py | tremaru/pyiArduinoI2Cbumper | 94cb0ee7c38cb375cf1df97cacc7b3db3374e594 | [
"MIT"
] | null | null | null | pyiArduinoI2Cbumper/examples/changeLineType.py | tremaru/pyiArduinoI2Cbumper | 94cb0ee7c38cb375cf1df97cacc7b3db3374e594 | [
"MIT"
] | null | null | null | # ПРИМЕР ПОЛУЧЕНИЯ И УКАЗАНИЯ ТИПА ЛИНИИ ТРАССЫ:
# Тип линии, равно как и калибровочные значения,
# хранятся в энергонезависимой памяти модуля.
from time import sleep
# Подключаем библиотеку для работы с бампером I2C-flash.
from pyiArduinoI2Cbumper import *
# Объявляем объект bum для работы с функциями и методами
# библиотеки pyiArduinoI2Cbumper, указывая адрес модуля на шине I2C.
# Если объявить объект без указания адреса bum = pyiArduinoI2Cbumper(),
# то адрес будет найден автоматически.
bum = pyiArduinoI2Cbumper(0x09)
while True:
# ОПРЕДЕЛЯЕМ ИСПОЛЬЗУЕМЫЙ ТИП ЛИНИИ:
if bum.getLineType() == BUM_LINE_BLACK:
first = "тёмной"
second = "светлой"
elif bum.getLineType() == BUM_LINE_WHITE:
first = "светлой"
second = "тёмной"
t = "Модуль использовал трассу с {} линией"\
", а теперь использует трассу"\
"с {} линией".format(first, second)
print(t)
# УКАЗЫВАЕМ НОВЫЙ ТИП ЛИНИИ:
# Тип линии задаётся как BUM_LINE_BLACK - тёмная
# BUM_LINE_WHITE - светлая
# BUM_LINE_CHANGE - сменить тип линии.
bum.setLineType(BUM_LINE_CHANGE)
sleep(2)
| 28.375 | 73 | 0.710132 | # ПРИМЕР ПОЛУЧЕНИЯ И УКАЗАНИЯ ТИПА ЛИНИИ ТРАССЫ:
# Тип линии, равно как и калибровочные значения,
# хранятся в энергонезависимой памяти модуля.
from time import sleep
# Подключаем библиотеку для работы с бампером I2C-flash.
from pyiArduinoI2Cbumper import *
# Объявляем объект bum для работы с функциями и методами
# библиотеки pyiArduinoI2Cbumper, указывая адрес модуля на шине I2C.
# Если объявить объект без указания адреса bum = pyiArduinoI2Cbumper(),
# то адрес будет найден автоматически.
bum = pyiArduinoI2Cbumper(0x09)
while True:
# ОПРЕДЕЛЯЕМ ИСПОЛЬЗУЕМЫЙ ТИП ЛИНИИ:
if bum.getLineType() == BUM_LINE_BLACK:
first = "тёмной"
second = "светлой"
elif bum.getLineType() == BUM_LINE_WHITE:
first = "светлой"
second = "тёмной"
t = "Модуль использовал трассу с {} линией"\
", а теперь использует трассу"\
"с {} линией".format(first, second)
print(t)
# УКАЗЫВАЕМ НОВЫЙ ТИП ЛИНИИ:
# Тип линии задаётся как BUM_LINE_BLACK - тёмная
# BUM_LINE_WHITE - светлая
# BUM_LINE_CHANGE - сменить тип линии.
bum.setLineType(BUM_LINE_CHANGE)
sleep(2)
| 0 | 0 | 0 |
8a9aae2331f5c2c081342a50ca86c1dea6863527 | 470 | py | Python | FMsystem/dashboard/migrations/0002_auto_20191112_1718.py | emetowinner/FMS | 85fd1791ab9835c20cf6473703e6adf72416719a | [
"Apache-2.0"
] | null | null | null | FMsystem/dashboard/migrations/0002_auto_20191112_1718.py | emetowinner/FMS | 85fd1791ab9835c20cf6473703e6adf72416719a | [
"Apache-2.0"
] | null | null | null | FMsystem/dashboard/migrations/0002_auto_20191112_1718.py | emetowinner/FMS | 85fd1791ab9835c20cf6473703e6adf72416719a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-12 17:18
from django.db import migrations
| 19.583333 | 47 | 0.548936 | # Generated by Django 2.2.6 on 2019-11-12 17:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.DeleteModel(
name='FuelTrack',
),
migrations.DeleteModel(
name='UserProfile',
),
]
| 0 | 364 | 23 |
c91d0475a83d6bf7dbbdd3cfc8f67e8f444ecc8a | 9,594 | py | Python | demo_doc2sim_education.py | interxuxing/qa_education | 1ae8247bd05b1870b14c3af6c1eceea2c5c9dd14 | [
"MIT"
] | 1 | 2018-07-05T06:20:55.000Z | 2018-07-05T06:20:55.000Z | demo_doc2sim_education.py | interxuxing/qa_education | 1ae8247bd05b1870b14c3af6c1eceea2c5c9dd14 | [
"MIT"
] | null | null | null | demo_doc2sim_education.py | interxuxing/qa_education | 1ae8247bd05b1870b14c3af6c1eceea2c5c9dd14 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
This script is used to build a qa data for usage.
Typically, each enty contains three elements: a question, an answer, a url
"""
import sys
import re
import os
import jieba
import gensim
try:
import cPickle as pickle
except:
import pickle
reload(sys)
sys.setdefaultencoding('utf-8')
def filtering_line(line_content, stopwords_list):
'''
this function spams the noisy symbols, then cut the line to words and remove the stopwords in each line
:param line_content:
:return:
'''
multi_version = re.compile(ur'-\{.*?(zh-hans|zh-cn):([^;]*?)(;.*?)?\}-')
# punctuation = re.compile(u"[-~!@#$%^&*()_+`=\[\]\\\{\}\\t\\r\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
punctuation = re.compile(u"[\[\]\\\{\}\\t\\r\"|;',<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
line_content = multi_version.sub(ur'\2', line_content)
line_content = punctuation.sub('', line_content.decode('utf8'))
# cut the line content to words
line_content_cut = [w for w in jieba.cut(line_content)]
if stopwords_list is not None:
new_line = []
for word in line_content_cut:
if word not in stopwords_list:
new_line.append(word)
return new_line
else:
return line_content_cut
def load_qa_education(data_dir, education_file):
'''
load the eudcation file, return a list, with each element is a string in each line
'''
education_content = []
idx = 0
with open(os.path.join(data_dir, education_file)) as fid:
for item in fid:
education_content.append(item.strip('\n'))
idx = idx + 1
# if idx % 1000 == 0:
# print 'loading %d-th questions done!' % idx
return education_content
def load_qa_education_with_answer(data_dir, education_file):
'''
load the eudcation file, return a list, with each element is a string in each line
'''
education_content = []
answer_content = []
idx = 0
with open(os.path.join(data_dir, education_file)) as fid:
for item in fid:
if idx % 2 == 0: # questions
education_content.append(item.strip('\n'))
elif idx % 2 == 1: # answer
answer_content.append(item.strip('\n'))
idx = idx + 1
# if idx % 1000 == 0:
# print 'loading %d-th questions done!' % idx
print 'loading %d questions done!' % int(idx/2)
return education_content, answer_content
def load_stopwords_file(data_dir, stopwords_file):
'''
load the stopwords file, return a list, with each element is a string in each line
'''
stopwords_list = []
idx = 0
with open(os.path.join(data_dir, stopwords_file)) as fid:
for item in fid:
stopwords_list.append(item.strip('\n'))
idx = idx + 1
print 'loading %d stopwords done!' % idx
return stopwords_list
def calculate_education_data(data_dir, education_content, stopwords_list):
'''
this file is to calculate the dictionary, similarity matrix given a data.txt file
:param data_dir: the root dir that save the returned data
:param eudcation_content: a list that each element is a eudcation question
:param stopwords_list: stopwords list for eudcation corpus
:return: a dictionary, a simialrity matrix
'''
corpora_documents_name = 'qa_education_corpora.pickle'
if not os.path.exists(os.path.join(data_dir, corpora_documents_name)):
corpora_documents = []
idx = 0
for item_text in education_content:
item_str = filtering_line(item_text, stopwords_list)
corpora_documents.append(item_str)
idx = idx + 1
if idx % 1000 == 0:
print 'jieba cutting for %d-th sentence' % idx
# dump pickfile
fid_corpora = open(os.path.join(data_dir, corpora_documents_name), 'wb')
pickle.dump(corpora_documents, fid_corpora)
fid_corpora.close()
print 'save %s finished' % corpora_documents_name
else:
# load pickfile
fid_corpora = open(os.path.join(data_dir, corpora_documents_name), 'rb')
corpora_documents = pickle.load(fid_corpora)
fid_corpora.close()
print 'load %s finished' % corpora_documents_name
dict_name = 'dict_education'
# 生成字典和向量语料
if not os.path.exists(os.path.join(data_dir, dict_name)):
print 'calculating dictionary education !'
dictionary = gensim.corpora.Dictionary(corpora_documents)
dictionary.save(os.path.join(data_dir, dict_name))
else:
print 'dictionary for education already exists, load it!'
dictionary = gensim.corpora.Dictionary.load(os.path.join(data_dir, dict_name))
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
numSen = len(corpus)
# calculate the similarity for pairwise training samples
num_features = len(dictionary.keys())
print '%d words in dictionary' % num_features
# # save object
sim_name = 'sim_education'
if not os.path.exists(os.path.join(data_dir, sim_name)):
print 'calculating sim_education !'
similarity = gensim.similarities.Similarity(os.path.join(data_dir, sim_name), corpus, num_features)
similarity.save(os.path.join(data_dir, sim_name))
else:
print 'sim_eudcation already exists, load it!'
similarity = gensim.similarities.Similarity.load(os.path.join(data_dir, sim_name))
return dictionary, similarity
def calculate_education_data_w2v(data_dir, education_content, w2v_model, stopwords_list):
'''
this file is to calculate the dictionary, similarity matrix given a data.txt file
:param data_dir: the root dir that save the returned data
:param eudcation_content: a list that each element is a eudcation question
:param stopwords_list: stopwords list for eudcation corpus
:return: a dictionary, a simialrity matrix
'''
corpora_documents = []
idx = 0
for item_text in education_content:
item_str = filtering_line(item_text, stopwords_list)
corpora_documents.append(item_str)
idx = idx + 1
if idx % 1000 == 10:
print 'jieba cutting for %d-th sentence' % idx
# corpus = [text for text in corpora_documents]
corpus = corpora_documents
numSen = len(corpus)
# calculate the similarity for pairwise training samples
# # save object
sim_name = 'sim_education_w2v'
if not os.path.exists(os.path.join(data_dir, sim_name)):
print 'calculating sim_education !'
similarity = gensim.similarities.WmdSimilarity(corpus, w2v_model, num_best=3)
similarity.save(os.path.join(data_dir, sim_name))
else:
print 'sim_eudcation already exists, load it!'
similarity = gensim.similarities.WmdSimilarity.load(os.path.join(data_dir, sim_name))
return similarity
'''
测试的问题:
北京小升初的政策?
成都比较好的小学推荐
小孩子谈恋爱怎么办?
怎么提高小孩子英语学习?
北京好的幼儿园推荐
中考前饮食应该注意什么?
我家小孩上课注意力不集中,贪玩,怎么办?
小孩子在学校打架,怎么办?
成都龙江路小学划片么?
小孩子厌学怎么办?
孩子上课注意力不集中,贪玩怎么办?
武汉比较好的中学有哪些?
幼儿园学前教育有必要吗?
'''
if __name__ == '__main__':
# load the eudcation data
data_dir = './qa_dataset'
qa_education_file = 'qa_education.txt'
# education_content = load_qa_education(data_dir, qa_education_file)
education_content, answer_content = load_qa_education_with_answer(data_dir, qa_education_file)
# use jieba to cut the sentence in each line with stopwords
stopwords_file = 'stopwords_gaokao.txt'
stopwords_dir = './stopwords_cn'
stopwords_list = load_stopwords_file(stopwords_dir, stopwords_file)
# caluclate the dictionary and the similarity of the given corpus
dictionary, similarity = calculate_education_data(data_dir, education_content, stopwords_list)
print 'obtained the dictionary and similarity of the %s corpus!' % qa_education_file
similarity.num_best = 3
while(True):
print '欢迎来到小题博士-教育问答 @_@'
print '你可以咨询与中小学教育相关的问题,比如:'
print ' 北京好的幼儿园推荐? \n 中考前饮食应该注意什么?\n 我家小孩上课注意力不集中,贪玩,怎么办? \n 小孩子在学校打架,怎么办?'
print '################################'
print ''
input_query = raw_input(u'请输入你要问的问题:')
input_query_cut = filtering_line(input_query, stopwords_list)
# parse the input query, get its doc vector
doc_input_query = dictionary.doc2bow(input_query_cut)
res = similarity[doc_input_query]
print '这是你要问的问题吗?'
for idx, content in res:
print '%d, %s' % (idx, education_content[idx])
print '%s' % answer_content[idx]
print '################################'
print '请问下一个问题 @_@'
'''
# caluclate the dictionary and the similarity using walking-earth similarity measure of the given corpus
# load wiki model
wiki_model_file = './tempfile/out_w2v_qa_incremental.model'
wiki_model = gensim.models.Word2Vec.load(wiki_model_file)
similarity = calculate_education_data_w2v(data_dir, education_content, wiki_model, stopwords_list)
print 'obtained the dictionary and similarity of the %s corpus!' % qa_education_file
num_best = 3
while (True):
print '欢迎来到小题博士-教育问答 @_@'
input_query = raw_input(u'请输入你要问的问题:')
input_query_cut = filtering_line(input_query, stopwords_list)
res = similarity[input_query_cut]
print '这是你要问的问题吗?'
for idx, content in res:
print '%d, %s' % (idx, education_content[idx])
print '################################'
print '请问下一个问题 @_@'
'''
| 35.014599 | 114 | 0.654472 | # -*- coding:utf-8 -*-
"""
This script is used to build a qa data for usage.
Typically, each enty contains three elements: a question, an answer, a url
"""
import sys
import re
import os
import jieba
import gensim
try:
import cPickle as pickle
except:
import pickle
reload(sys)
sys.setdefaultencoding('utf-8')
def filtering_line(line_content, stopwords_list):
'''
this function spams the noisy symbols, then cut the line to words and remove the stopwords in each line
:param line_content:
:return:
'''
multi_version = re.compile(ur'-\{.*?(zh-hans|zh-cn):([^;]*?)(;.*?)?\}-')
# punctuation = re.compile(u"[-~!@#$%^&*()_+`=\[\]\\\{\}\\t\\r\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
punctuation = re.compile(u"[\[\]\\\{\}\\t\\r\"|;',<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
line_content = multi_version.sub(ur'\2', line_content)
line_content = punctuation.sub('', line_content.decode('utf8'))
# cut the line content to words
line_content_cut = [w for w in jieba.cut(line_content)]
if stopwords_list is not None:
new_line = []
for word in line_content_cut:
if word not in stopwords_list:
new_line.append(word)
return new_line
else:
return line_content_cut
def load_qa_education(data_dir, education_file):
'''
load the eudcation file, return a list, with each element is a string in each line
'''
education_content = []
idx = 0
with open(os.path.join(data_dir, education_file)) as fid:
for item in fid:
education_content.append(item.strip('\n'))
idx = idx + 1
# if idx % 1000 == 0:
# print 'loading %d-th questions done!' % idx
return education_content
def load_qa_education_with_answer(data_dir, education_file):
'''
load the eudcation file, return a list, with each element is a string in each line
'''
education_content = []
answer_content = []
idx = 0
with open(os.path.join(data_dir, education_file)) as fid:
for item in fid:
if idx % 2 == 0: # questions
education_content.append(item.strip('\n'))
elif idx % 2 == 1: # answer
answer_content.append(item.strip('\n'))
idx = idx + 1
# if idx % 1000 == 0:
# print 'loading %d-th questions done!' % idx
print 'loading %d questions done!' % int(idx/2)
return education_content, answer_content
def load_stopwords_file(data_dir, stopwords_file):
'''
load the stopwords file, return a list, with each element is a string in each line
'''
stopwords_list = []
idx = 0
with open(os.path.join(data_dir, stopwords_file)) as fid:
for item in fid:
stopwords_list.append(item.strip('\n'))
idx = idx + 1
print 'loading %d stopwords done!' % idx
return stopwords_list
def calculate_education_data(data_dir, education_content, stopwords_list):
'''
this file is to calculate the dictionary, similarity matrix given a data.txt file
:param data_dir: the root dir that save the returned data
:param eudcation_content: a list that each element is a eudcation question
:param stopwords_list: stopwords list for eudcation corpus
:return: a dictionary, a simialrity matrix
'''
corpora_documents_name = 'qa_education_corpora.pickle'
if not os.path.exists(os.path.join(data_dir, corpora_documents_name)):
corpora_documents = []
idx = 0
for item_text in education_content:
item_str = filtering_line(item_text, stopwords_list)
corpora_documents.append(item_str)
idx = idx + 1
if idx % 1000 == 0:
print 'jieba cutting for %d-th sentence' % idx
# dump pickfile
fid_corpora = open(os.path.join(data_dir, corpora_documents_name), 'wb')
pickle.dump(corpora_documents, fid_corpora)
fid_corpora.close()
print 'save %s finished' % corpora_documents_name
else:
# load pickfile
fid_corpora = open(os.path.join(data_dir, corpora_documents_name), 'rb')
corpora_documents = pickle.load(fid_corpora)
fid_corpora.close()
print 'load %s finished' % corpora_documents_name
dict_name = 'dict_education'
# 生成字典和向量语料
if not os.path.exists(os.path.join(data_dir, dict_name)):
print 'calculating dictionary education !'
dictionary = gensim.corpora.Dictionary(corpora_documents)
dictionary.save(os.path.join(data_dir, dict_name))
else:
print 'dictionary for education already exists, load it!'
dictionary = gensim.corpora.Dictionary.load(os.path.join(data_dir, dict_name))
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
numSen = len(corpus)
# calculate the similarity for pairwise training samples
num_features = len(dictionary.keys())
print '%d words in dictionary' % num_features
# # save object
sim_name = 'sim_education'
if not os.path.exists(os.path.join(data_dir, sim_name)):
print 'calculating sim_education !'
similarity = gensim.similarities.Similarity(os.path.join(data_dir, sim_name), corpus, num_features)
similarity.save(os.path.join(data_dir, sim_name))
else:
print 'sim_eudcation already exists, load it!'
similarity = gensim.similarities.Similarity.load(os.path.join(data_dir, sim_name))
return dictionary, similarity
def calculate_education_data_w2v(data_dir, education_content, w2v_model, stopwords_list):
'''
this file is to calculate the dictionary, similarity matrix given a data.txt file
:param data_dir: the root dir that save the returned data
:param eudcation_content: a list that each element is a eudcation question
:param stopwords_list: stopwords list for eudcation corpus
:return: a dictionary, a simialrity matrix
'''
corpora_documents = []
idx = 0
for item_text in education_content:
item_str = filtering_line(item_text, stopwords_list)
corpora_documents.append(item_str)
idx = idx + 1
if idx % 1000 == 10:
print 'jieba cutting for %d-th sentence' % idx
# corpus = [text for text in corpora_documents]
corpus = corpora_documents
numSen = len(corpus)
# calculate the similarity for pairwise training samples
# # save object
sim_name = 'sim_education_w2v'
if not os.path.exists(os.path.join(data_dir, sim_name)):
print 'calculating sim_education !'
similarity = gensim.similarities.WmdSimilarity(corpus, w2v_model, num_best=3)
similarity.save(os.path.join(data_dir, sim_name))
else:
print 'sim_eudcation already exists, load it!'
similarity = gensim.similarities.WmdSimilarity.load(os.path.join(data_dir, sim_name))
return similarity
'''
测试的问题:
北京小升初的政策?
成都比较好的小学推荐
小孩子谈恋爱怎么办?
怎么提高小孩子英语学习?
北京好的幼儿园推荐
中考前饮食应该注意什么?
我家小孩上课注意力不集中,贪玩,怎么办?
小孩子在学校打架,怎么办?
成都龙江路小学划片么?
小孩子厌学怎么办?
孩子上课注意力不集中,贪玩怎么办?
武汉比较好的中学有哪些?
幼儿园学前教育有必要吗?
'''
if __name__ == '__main__':
# load the eudcation data
data_dir = './qa_dataset'
qa_education_file = 'qa_education.txt'
# education_content = load_qa_education(data_dir, qa_education_file)
education_content, answer_content = load_qa_education_with_answer(data_dir, qa_education_file)
# use jieba to cut the sentence in each line with stopwords
stopwords_file = 'stopwords_gaokao.txt'
stopwords_dir = './stopwords_cn'
stopwords_list = load_stopwords_file(stopwords_dir, stopwords_file)
# caluclate the dictionary and the similarity of the given corpus
dictionary, similarity = calculate_education_data(data_dir, education_content, stopwords_list)
print 'obtained the dictionary and similarity of the %s corpus!' % qa_education_file
similarity.num_best = 3
while(True):
print '欢迎来到小题博士-教育问答 @_@'
print '你可以咨询与中小学教育相关的问题,比如:'
print ' 北京好的幼儿园推荐? \n 中考前饮食应该注意什么?\n 我家小孩上课注意力不集中,贪玩,怎么办? \n 小孩子在学校打架,怎么办?'
print '################################'
print ''
input_query = raw_input(u'请输入你要问的问题:')
input_query_cut = filtering_line(input_query, stopwords_list)
# parse the input query, get its doc vector
doc_input_query = dictionary.doc2bow(input_query_cut)
res = similarity[doc_input_query]
print '这是你要问的问题吗?'
for idx, content in res:
print '%d, %s' % (idx, education_content[idx])
print '%s' % answer_content[idx]
print '################################'
print '请问下一个问题 @_@'
'''
# caluclate the dictionary and the similarity using walking-earth similarity measure of the given corpus
# load wiki model
wiki_model_file = './tempfile/out_w2v_qa_incremental.model'
wiki_model = gensim.models.Word2Vec.load(wiki_model_file)
similarity = calculate_education_data_w2v(data_dir, education_content, wiki_model, stopwords_list)
print 'obtained the dictionary and similarity of the %s corpus!' % qa_education_file
num_best = 3
while (True):
print '欢迎来到小题博士-教育问答 @_@'
input_query = raw_input(u'请输入你要问的问题:')
input_query_cut = filtering_line(input_query, stopwords_list)
res = similarity[input_query_cut]
print '这是你要问的问题吗?'
for idx, content in res:
print '%d, %s' % (idx, education_content[idx])
print '################################'
print '请问下一个问题 @_@'
'''
| 0 | 0 | 0 |
cac677165073ebf21ff71868eeada85dd2f640ab | 3,661 | py | Python | satchmo/apps/l10n/south_migrations/0001_initial.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | 1 | 2019-10-08T16:19:59.000Z | 2019-10-08T16:19:59.000Z | satchmo/apps/l10n/south_migrations/0001_initial.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/l10n/south_migrations/0001_initial.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
| 57.203125 | 123 | 0.575253 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table('l10n_country', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('iso2_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=2)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('printable_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('iso3_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=3)),
('numcode', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('continent', self.gf('django.db.models.fields.CharField')(max_length=2)),
('admin_area', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
))
db.send_create_signal('l10n', ['Country'])
# Adding model 'AdminArea'
db.create_table('l10n_adminarea', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['l10n.Country'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('abbrev', self.gf('django.db.models.fields.CharField')(max_length=3, null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('l10n', ['AdminArea'])
def backwards(self, orm):
# Deleting model 'Country'
db.delete_table('l10n_country')
# Deleting model 'AdminArea'
db.delete_table('l10n_adminarea')
models = {
'l10n.adminarea': {
'Meta': {'ordering': "('name',)", 'object_name': 'AdminArea'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['l10n.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'l10n.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_area': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso2_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'iso3_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'numcode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['l10n']
| 1,793 | 1,753 | 25 |
c8da72bb34454c737c7eea75fbed5dc53854c72b | 217 | py | Python | sources/__init__.py | LXG-Shadow/BilibiliGetFavorite | a3912eb983be5f420c3729d705eefbf06d240309 | [
"Apache-2.0"
] | 60 | 2018-08-27T07:10:58.000Z | 2021-07-14T11:13:53.000Z | sources/__init__.py | LXG-Shadow/BilibiliGetFavorite | a3912eb983be5f420c3729d705eefbf06d240309 | [
"Apache-2.0"
] | 6 | 2019-09-09T02:50:23.000Z | 2021-06-08T21:46:16.000Z | sources/__init__.py | LXG-Shadow/BilibiliGetFavorite | a3912eb983be5f420c3729d705eefbf06d240309 | [
"Apache-2.0"
] | 17 | 2019-01-20T08:46:01.000Z | 2021-06-30T10:44:01.000Z | from .bilibili.biliAudio import *
from .bilibili.biliVideo import *
from .bilibili.biliLive import *
from .wenku8.Wenku8TXT import *
from .video.imomoe import *
from .video.kakadm import *
from .audio.netease import * | 31 | 33 | 0.778802 | from .bilibili.biliAudio import *
from .bilibili.biliVideo import *
from .bilibili.biliLive import *
from .wenku8.Wenku8TXT import *
from .video.imomoe import *
from .video.kakadm import *
from .audio.netease import * | 0 | 0 | 0 |
03b4111150c9056bb5e0216fdc83c869ac11a37e | 19,080 | py | Python | plotting_scripts/pylot_utils.py | erdos-project/erdos-experiments | 56eea1d52991ada5cc3c4a2e26ddc1da31f1ac2e | [
"Apache-2.0"
] | 1 | 2022-03-04T11:41:35.000Z | 2022-03-04T11:41:35.000Z | plotting_scripts/pylot_utils.py | erdos-project/erdos-experiments | 56eea1d52991ada5cc3c4a2e26ddc1da31f1ac2e | [
"Apache-2.0"
] | null | null | null | plotting_scripts/pylot_utils.py | erdos-project/erdos-experiments | 56eea1d52991ada5cc3c4a2e26ddc1da31f1ac2e | [
"Apache-2.0"
] | null | null | null | import ast
import csv
import json
from absl import flags
import numpy as np
import pandas as pd
FLAGS = flags.FLAGS
def get_timestamps_with_obstacles(filename, obstacle_distance_threshold=10):
"""Finds timestamps when we detected obstacles."""
print(filename)
df = pd.read_csv(
filename,
names=["timestamp", "ms", "log_label", "label_info", "label_value"])
df = df.dropna()
df['label_value'] = df['label_value'].str.replace(" ", ", ")
df['label_value'] = df['label_value'].apply(converter)
obstacles = df[df['log_label'] == 'obstacle']
obstacles = obstacles.set_index('ms')
pose = df[df['log_label'] == 'pose']
timestamps = []
first_timestamp = df["ms"].min()
for t, p in pose[["ms", "label_value"]].values:
if t not in obstacles.index:
continue
obs = obstacles.loc[t]['label_value']
if isinstance(obs, list):
obs = [obs]
else:
obs = obs.values
for o in obs:
dist = np.linalg.norm(np.array(p) - np.array(o))
if 0 < dist <= obstacle_distance_threshold:
timestamps.append(t - first_timestamp)
print("Selected {} timestamps".format(len(timestamps)))
return timestamps
| 39.503106 | 101 | 0.574266 | import ast
import csv
import json
from absl import flags
import numpy as np
import pandas as pd
FLAGS = flags.FLAGS
class ProfileEvent(object):
def __init__(self, json_dict):
self.name = json_dict['name']
self.event_time = float(json_dict['ts']) / 1000.0 # in ms
self.runtime = float(json_dict['dur']) # in us
self.sim_time = int(
json_dict['args']['timestamp'].strip('][').split(', ')[0])
class ProfileEvents(object):
def __init__(self, profile_file, no_offset=False):
data = None
first_sim_time = None
with open(profile_file) as prof_file:
data = json.load(prof_file)
self.events = []
for entry in data:
event = ProfileEvent(entry)
if first_sim_time is None:
first_sim_time = event.sim_time
if no_offset:
event.sim_time += FLAGS.ignore_first_sim_time_ms
else:
event.sim_time -= first_sim_time + FLAGS.ignore_first_sim_time_ms
if event.sim_time >= 0:
self.events.append(event)
def check_if_timestamps_overlapped(self):
"""Checks if a component got delayed because its run for the previous
timestamp didn't yet complete."""
planning_end = 0
planning_t = 0
prediction_end = 0
prediction_t = 0
loc_end = 0
loc_t = 0
tracker_end = 0
tracker_t = 0
detection_end = 0
detection_t = 0
for event in self.events:
end_time = event.event_time + event.runtime / 1000
if event.name == 'planning_operator.on_watermark':
if prediction_end < planning_end and prediction_t != planning_t:
print('Prediction from {} finished at {} before planning'
' from {} finished at {}'.format(
prediction_t, prediction_end, event.sim_time,
planning_end))
if end_time > planning_end:
planning_end = end_time
planning_t = event.sim_time
elif (event.name == 'linear_prediction_operator.on_watermark'
or event.name ==
'linear_prediction_operator.generate_predicted_trajectories'
):
if loc_end < prediction_end and loc_t != prediction_t:
print(
'Loc find from {} finished at {} before prediction from'
' {} finished at {}'.format(loc_t, loc_end,
event.sim_time,
prediction_end))
if end_time > prediction_end:
prediction_end = end_time
prediction_t = event.sim_time
elif (event.name ==
'center_camera_location_finder_history_operator.on_watermark'
):
if tracker_end < loc_end and tracker_t != loc_t:
print('Tracker from {} finished at {} before loc find from'
' {} finished at {}'.format(tracker_t, tracker_end,
loc_t, loc_end))
if end_time > loc_end:
loc_end = end_time
loc_t = event.sim_time
elif event.name == 'tracker_sort.on_watermark':
if detection_end < tracker_end and detection_t != tracker_t:
print('Detection from {} finished at {} before tracker '
'from {} finished at {}'.format(
detection_t, detection_end, tracker_t,
tracker_end))
if end_time > tracker_end:
tracker_end = end_time
tracker_t = event.sim_time
elif event.name == 'efficientdet_operator.on_watermark':
if end_time > detection_end:
detection_end = end_time
detection_t = event.sim_time
def get_runtimes(self,
event_name,
unit='ms',
timestamps_with_obstacles=None):
runtimes = []
for event in self.events:
if (event.name == event_name
and (timestamps_with_obstacles is None
or event.sim_time in timestamps_with_obstacles)):
if unit == 'ms':
runtimes.append(event.runtime / 1000)
elif unit == 'us':
runtimes.append(event.runtime)
else:
raise ValueError('Unexpected unit {}'.format(unit))
return runtimes
def get_filtered_runtimes(self,
event_name,
unit='ms',
timestamps_to_ban=None):
runtimes = []
for event in self.events:
if event.name == event_name:
if (timestamps_to_ban is not None
and event.sim_time in timestamps_to_ban):
runtimes.append(-1)
else:
if unit == 'ms':
runtimes.append(event.runtime / 1000)
elif unit == 'us':
runtimes.append(event.runtime)
else:
raise ValueError('Unexpected unit {}'.format(unit))
return runtimes
def get_inter_exec(self, event_name):
inter_exec = []
last_event = None
for event in self.events:
if event.name == event_name:
if last_event:
inter_exec.append(event.event_time - last_event.event_time)
last_event = event
return inter_exec
def get_timeline(self, event_name, unit='ms'):
timestamps = []
runtimes = []
for event in self.events:
if event.name == event_name:
timestamps.append(event.sim_time)
if unit == 'ms':
runtimes.append(event.runtime / 1000)
elif unit == 'us':
runtimes.append(event.runtime)
else:
raise ValueError('Unexpected unit {}'.format(unit))
return timestamps, runtimes
def read_end_to_end_runtimes(csv_file_path,
unit='ms',
timestamps_with_obstacles=None):
first_sim_time = None
csv_file = open(csv_file_path)
csv_reader = csv.reader(csv_file)
sim_times = []
runtimes = []
for row in csv_reader:
sim_time = int(row[1])
if not first_sim_time:
first_sim_time = sim_time
sim_time -= first_sim_time + FLAGS.ignore_first_sim_time_ms
if (row[2] == 'end-to-end-runtime' and sim_time >= 0
and (timestamps_with_obstacles is None
or sim_time in timestamps_with_obstacles)):
sim_times.append(sim_time)
if unit == 'ms':
runtimes.append(float(row[3]))
elif unit == 'us':
runtimes.append(float(row[3]) * 1000)
else:
raise ValueError('Unexpected unit {}'.format(unit))
return (sim_times, runtimes)
def converter(x):
return ast.literal_eval(x)
def get_timestamps_with_obstacles(filename, obstacle_distance_threshold=10):
"""Finds timestamps when we detected obstacles."""
print(filename)
df = pd.read_csv(
filename,
names=["timestamp", "ms", "log_label", "label_info", "label_value"])
df = df.dropna()
df['label_value'] = df['label_value'].str.replace(" ", ", ")
df['label_value'] = df['label_value'].apply(converter)
obstacles = df[df['log_label'] == 'obstacle']
obstacles = obstacles.set_index('ms')
pose = df[df['log_label'] == 'pose']
timestamps = []
first_timestamp = df["ms"].min()
for t, p in pose[["ms", "label_value"]].values:
if t not in obstacles.index:
continue
obs = obstacles.loc[t]['label_value']
if isinstance(obs, list):
obs = [obs]
else:
obs = obs.values
for o in obs:
dist = np.linalg.norm(np.array(p) - np.array(o))
if 0 < dist <= obstacle_distance_threshold:
timestamps.append(t - first_timestamp)
print("Selected {} timestamps".format(len(timestamps)))
return timestamps
def fix_pylot_profile(file_path):
with open(file_path, 'r') as f:
contents = f.read()
if contents[0] == "[":
return
print("Fixing Pylot {} json file".format(file_path))
with open(file_path, 'w') as f:
f.write("[\n")
f.write(contents[:-2])
f.write("\n]")
def read_challenge_runtimes(csv_file_path):
csv_file = open(csv_file_path)
csv_reader = csv.reader(csv_file)
sensor_send_runtime = {}
sim_times = []
sensor_times = []
e2e_runtimes = []
e2e_runtimes_w_sensor = []
sensor_send_runtimes = []
for row in csv_reader:
sim_time = int(row[1])
event_name = row[2]
if event_name == 'e2e_runtime':
e2e_runtime = float(row[3])
e2e_runtimes_w_sensor.append(e2e_runtime)
e2e_runtimes.append(e2e_runtime - sensor_send_runtime[sim_time])
sim_times.append(sim_time)
elif event_name == 'sensor_send_runtime':
sensor_send_runtime[sim_time] = float(row[3])
sensor_send_runtimes.append(float(row[3]))
return sim_times, e2e_runtimes, e2e_runtimes_w_sensor, sensor_send_runtimes
def read_challenge_collision_times(csv_file_path):
csv_file = open(csv_file_path)
csv_reader = csv.reader(csv_file)
collisions_times = []
prev_sim_time = 0
prev_col_time = 0
for row in csv_reader:
sim_time = int(row[1])
event_name = row[2]
if event_name == 'collision':
# TODO(ionel): Differentiate between the types of collisions.
if prev_sim_time - prev_col_time > 300:
# Ignore the repeatead collisions.
collisions_times.append(prev_sim_time)
prev_col_time = prev_sim_time
else:
prev_sim_time = sim_time
return collisions_times
def print_collisions_with_outlier_runtimes(csv_file,
sim_times,
run_e2e,
runtime_threshold=220):
collision_times = read_challenge_collision_times(csv_file)
for collision_time in collision_times:
index = sim_times.index(collision_time)
print("Collision at {}".format(sim_times[index]))
for i in range(0, 21):
if run_e2e[index - i] > runtime_threshold:
print("Runtime {} at {}".format(run_e2e[index - i],
sim_times[index - i]))
def read_challenge_stats(results_path, filter_carla_cola=False):
with open(results_path) as f:
data = json.load(f)
score = float(
data["_checkpoint"]["global_record"]["scores"]["score_composed"])
num_col_vec = len(data["_checkpoint"]["records"][0]["infractions"]
["collisions_vehicle"])
cols_vec = data["_checkpoint"]["records"][0]["infractions"][
"collisions_vehicle"]
if filter_carla_cola:
num_col_vec = 0
for col in cols_vec:
if 'carlacola' in col:
continue
num_col_vec += 1
else:
num_col_vec = len(cols_vec)
num_col_ped = len(data["_checkpoint"]["records"][0]["infractions"]
["collisions_pedestrian"])
# Collisions / km
collision_ped = float(data["values"][3])
collision_veh = float(data["values"][4])
collision_lay = float(data["values"][5])
# In meters.
route_length = float(
data["_checkpoint"]["records"][0]["meta"]["route_length"])
return score, collision_ped, collision_veh, collision_lay, route_length, num_col_ped, num_col_vec
def read_challenge_deadline_misses(log_file):
detection_miss = set()
tracker_miss = set()
loc_finder_miss = set()
prediction_miss = set()
planning_miss = set()
with open(log_file) as f:
for line in f:
if 'deadline miss' in line:
items = line.split(' ')
op_name = items[1]
sim_time = int(items[3][2:-2])
if op_name == 'tracker_sort':
detection_miss.add(sim_time)
elif op_name == 'center_camera_location_finder_history_operator':
tracker_miss.add(sim_time)
elif op_name == 'linear_prediction_operator':
loc_finder_miss.add(sim_time)
elif op_name == 'planning_operator':
prediction_miss.add(sim_time)
elif op_name == 'pid_control_operator':
planning_miss.add(sim_time)
else:
raise ValueError(
'Unexpected type of deadline miss: {}'.format(op_name))
return (detection_miss, tracker_miss, loc_finder_miss, prediction_miss,
planning_miss)
def read_challenge_results(log_dir_base,
town,
route,
detector,
num_reps,
segmentation_name,
segmentation_value,
filter_carla_cola=False):
scores = []
route_len = 0
collisions_ped = []
collisions_veh = []
collisions_lay = []
num_vec_collisions = []
num_ped_collisions = []
e2e_runtimes = []
e2e_runtimes_w_sensor = []
detector_runtimes = []
loc_finder_runtimes = []
tracker_runtimes = []
prediction_runtimes = []
planning_runtimes = []
for run in range(1, num_reps + 1):
log_dir = '{}_run_{}/'.format(log_dir_base, run)
result_file = log_dir + 'results.json'
csv_file = log_dir + 'challenge.csv'
profile_file = log_dir + 'challenge.json'
log_file = log_dir + 'challenge.log'
# Get the runtimes
fix_pylot_profile(profile_file)
profile_events = ProfileEvents(profile_file, no_offset=True)
# profile_events.check_if_timestamps_overlapped()
# Get the end-to-end runtimes.
(sim_times, run_e2e, run_e2e_w_sensor,
_) = read_challenge_runtimes(csv_file)
# print_collisions_with_outlier_runtimes(csv_file, sim_times, run_e2e)
e2e_runtimes = e2e_runtimes + run_e2e
e2e_runtimes_w_sensor = e2e_runtimes_w_sensor + run_e2e_w_sensor
detection_miss = tracker_miss = loc_finder_miss = prediction_miss = planning_miss = None
if segmentation_name == 'deadline' and segmentation_value:
(detection_miss, tracker_miss, loc_finder_miss, prediction_miss,
planning_miss) = read_challenge_deadline_misses(log_file)
# num_times = len(run_e2e)
# print('Percentage detection deadline misses {:0.2f}'.format(
# len(detection_miss) / num_times))
# print('Percentage tracker deadline misses {:0.2f}'.format(
# len(tracker_miss) / num_times))
# print('Percentage loc_finder deadline misses {:0.2f}'.format(
# len(loc_finder_miss) / num_times))
# print('Percentage prediction deadline misses {:0.2f}'.format(
# len(prediction_miss) / num_times))
# print('Percentage planning deadline misses {:0.2f}'.format(
# len(planning_miss) / num_times))
run_detector_runtimes = profile_events.get_filtered_runtimes(
'efficientdet_operator.on_watermark',
timestamps_to_ban=detection_miss)
run_loc_finder_runtimes = profile_events.get_filtered_runtimes(
'center_camera_location_finder_history_operator.on_watermark',
timestamps_to_ban=loc_finder_miss)
run_tracker_runtimes = profile_events.get_filtered_runtimes(
'tracker_sort.on_watermark', timestamps_to_ban=tracker_miss)
run_prediction_runtimes = profile_events.get_filtered_runtimes(
'linear_prediction_operator.generate_predicted_trajectories',
timestamps_to_ban=prediction_miss)
if len(run_prediction_runtimes) == 0:
run_prediction_runtimes = profile_events.get_filtered_runtimes(
'linear_prediction_operator.on_watermark',
timestamps_to_ban=prediction_miss)
run_planning_runtimes = profile_events.get_filtered_runtimes(
'planning_operator.on_watermark', timestamps_to_ban=planning_miss)
detector_runtimes = detector_runtimes + run_detector_runtimes
loc_finder_runtimes = loc_finder_runtimes + run_loc_finder_runtimes
tracker_runtimes = tracker_runtimes + run_tracker_runtimes
prediction_runtimes = prediction_runtimes + run_prediction_runtimes
planning_runtimes = planning_runtimes + run_planning_runtimes
# Get the scores.
score, cp, cv, cl, m_driven, num_ped_col, num_vec_col = \
read_challenge_stats(result_file, filter_carla_cola)
scores.append(score)
collisions_ped.append(cp)
collisions_veh.append(cv)
collisions_lay.append(cl)
num_vec_collisions.append(num_vec_col)
num_ped_collisions.append(num_ped_col)
route_len += m_driven
# Transform to km.
route_len /= 1000
entries = len(e2e_runtimes)
runtimes_df = pd.DataFrame({
'town': [town] * entries,
'route': [route] * entries,
'detector': [detector] * entries,
segmentation_name: [segmentation_value] * entries,
'e2e_runtime': e2e_runtimes,
'e2e_runtime_w_sensor': e2e_runtimes_w_sensor,
'detector_runtime': detector_runtimes,
'tracker_runtime': tracker_runtimes,
'loc_finder_runtime': loc_finder_runtimes,
'prediction_runtime': prediction_runtimes,
'planning_runtime': planning_runtimes,
})
score_df = pd.DataFrame({
'town': [town] * len(scores),
'route': [route] * len(scores),
segmentation_name: [segmentation_value] * len(scores),
'detector': [detector] * len(scores),
'score':
scores,
'collisions_ped':
collisions_ped,
'collisions_veh':
collisions_veh,
'collisions_lay':
collisions_lay,
'num_vec_collisions':
num_vec_collisions,
'num_ped_collisions':
num_ped_collisions
})
return runtimes_df, score_df, route_len
| 14,336 | 3,194 | 279 |
553261ede6908d15a260c9d80a6cc7e5e9bc751f | 1,256 | py | Python | solutions/longest-substring-without-repeating-characters.py | oopsno/leetcode.py | fe454137aef32b4950a1fdb398f90d5212a90fb8 | [
"WTFPL"
] | 1 | 2017-11-30T12:23:59.000Z | 2017-11-30T12:23:59.000Z | solutions/longest-substring-without-repeating-characters.py | oopsno/leetcode.py | fe454137aef32b4950a1fdb398f90d5212a90fb8 | [
"WTFPL"
] | null | null | null | solutions/longest-substring-without-repeating-characters.py | oopsno/leetcode.py | fe454137aef32b4950a1fdb398f90d5212a90fb8 | [
"WTFPL"
] | null | null | null | # encoding: UTF-8
from leetcode import *
from typing import Generator, Tuple
@Problem(3, 'Longest Substring Without Repeating Characters', Difficulty.Medium, Tags.HashTable, Tags.String, Tags.TwoPointers)
@Solution.test.lengthOfLongestSubstring
@Solution.test.lengthOfLongestSubstring
@Solution.test.lengthOfLongestSubstring
| 26.166667 | 127 | 0.585191 | # encoding: UTF-8
from leetcode import *
from typing import Generator, Tuple
@Problem(3, 'Longest Substring Without Repeating Characters', Difficulty.Medium, Tags.HashTable, Tags.String, Tags.TwoPointers)
class Solution:
@staticmethod
def iterate(s: str) -> Generator[Tuple[int, int], None, None]:
"""
搜索所有不包含重复字符的子串 [begin, end)
"""
begin, sub = 0, {}
for end, char in enumerate(s):
if begin <= sub.get(char, -1):
yield begin, end
begin = sub[char] + 1
sub[char] = end
yield begin, len(s)
def lengthOfLongestSubstring(self, s: str) -> int:
"""
检查并返回 s 中不包含重复字符的最长子串的长度
"""
return max(r - l for l, r in self.iterate(s))
@Solution.test.lengthOfLongestSubstring
def example(fn):
require(fn('abcabcbb') == len('abc'))
require(fn('bbbbb') == len('b'))
require(fn('pwwkew') == len('wke'))
@Solution.test.lengthOfLongestSubstring
def coverage(fn):
require(fn('') == 0)
require(fn('a') == 1)
require(fn('aa') == 1)
require(fn('ab') == 2)
require(fn('abba') == len('ab'))
@Solution.test.lengthOfLongestSubstring
def profile(fn):
require(fn('abc' * 30000) == len('abc'))
| 292 | 612 | 88 |
691c6dad4265dcd18d18b8b3764b927ce6ce8e3c | 381 | py | Python | wikidata-to-gedcom/mywikidata/WikidataKeys.py | lmallez/wikidata-to-gedcom | fac73e0cb6589e25bcb3c8c388ff7afc86273586 | [
"MIT"
] | 5 | 2019-11-12T20:45:46.000Z | 2020-04-30T05:57:01.000Z | wikidata-to-gedcom/mywikidata/WikidataKeys.py | lmallez/wikidata-to-gedcom | fac73e0cb6589e25bcb3c8c388ff7afc86273586 | [
"MIT"
] | null | null | null | wikidata-to-gedcom/mywikidata/WikidataKeys.py | lmallez/wikidata-to-gedcom | fac73e0cb6589e25bcb3c8c388ff7afc86273586 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
| 19.05 | 29 | 0.574803 | #!/usr/bin/env python3
class WikidataKey:
HUMAN = 'Q5'
MALE = 'Q6581097'
FEMALE = 'Q6581072'
ADOPTED = 'Q20746725'
SEX = 'P21'
FATHER = 'P22'
MOTHER = 'P25'
INSTANCE_OF = 'P31'
CHILD = 'P40'
FAMILY = 'P53'
GIVEN_NAME = 'P735'
FAMILY_NAME = 'P734'
DATE_OF_BIRTH = 'P569'
DATE_OF_DEATH = 'P570'
TYPE_OF_KINSHIP = 'P1039'
| 0 | 334 | 23 |
e4d46792cc68d8be20c27212cb496b7f1e2c6189 | 101,385 | py | Python | emoji_list.py | williln/emojihaiku | 87a5558c2de397726d4fc360cf1e11da2152a9f3 | [
"MIT"
] | 20 | 2015-07-24T09:36:41.000Z | 2020-05-09T16:22:02.000Z | emoji_list.py | williln/emojihaiku | 87a5558c2de397726d4fc360cf1e11da2152a9f3 | [
"MIT"
] | 9 | 2015-07-24T04:54:52.000Z | 2017-03-20T20:14:50.000Z | emoji_list.py | williln/emojihaiku | 87a5558c2de397726d4fc360cf1e11da2152a9f3 | [
"MIT"
] | 7 | 2015-08-18T06:18:35.000Z | 2021-04-10T17:06:24.000Z | EMOJI_LIST = [
':1st_place_medal:',
':2nd_place_medal:',
':3rd_place_medal:',
':AB_button_(blood_type):',
':ATM_sign:',
':A_button_(blood_type):',
':Afghanistan:',
':Albania:',
':Algeria:',
':American_Samoa:',
':Andorra:',
':Angola:',
':Anguilla:',
':Antarctica:',
':Antigua_&_Barbuda:',
':Aquarius:',
':Argentina:',
':Aries:',
':Armenia:',
':Aruba:',
':Ascension_Island:',
':Australia:',
':Austria:',
':Azerbaijan:',
':BACK_arrow:',
':B_button_(blood_type):',
':Bahamas:',
':Bahrain:',
':Bangladesh:',
':Barbados:',
':Belarus:',
':Belgium:',
':Belize:',
':Benin:',
':Bermuda:',
':Bhutan:',
':Bolivia:',
':Bosnia_&_Herzegovina:',
':Botswana:',
':Bouvet_Island:',
':Brazil:',
':British_Indian_Ocean_Territory:',
':British_Virgin_Islands:',
':Brunei:',
':Bulgaria:',
':Burkina_Faso:',
':Burundi:',
':CL_button:',
':COOL_button:',
':Cambodia:',
':Cameroon:',
':Canada:',
':Canary_Islands:',
':Cancer:',
':Cape_Verde:',
':Capricorn:',
':Caribbean_Netherlands:',
':Cayman_Islands:',
':Central_African_Republic:',
':Ceuta_&_Melilla:',
':Chad:',
':Chile:',
':China:',
':Christmas_Island:',
':Christmas_tree:',
':Clipperton_Island:',
':Cocos_(Keeling)_Islands:',
':Colombia:',
':Comoros:',
':Congo_-_Brazzaville:',
':Congo_-_Kinshasa:',
':Cook_Islands:',
':Costa_Rica:',
':Croatia:',
':Cuba:',
':Curaçao:',
':Cyprus:',
':Czech_Republic:',
':Côte_d’Ivoire:',
':Denmark:',
':Diego_Garcia:',
':Djibouti:',
':Dominica:',
':Dominican_Republic:',
':END_arrow:',
':Ecuador:',
':Egypt:',
':El_Salvador:',
':Equatorial_Guinea:',
':Eritrea:',
':Estonia:',
':Ethiopia:',
':European_Union:',
':FREE_button:',
':Falkland_Islands:',
':Faroe_Islands:',
':Fiji:',
':Finland:',
':France:',
':French_Guiana:',
':French_Polynesia:',
':French_Southern_Territories:',
':Gabon:',
':Gambia:',
':Gemini:',
':Georgia:',
':Germany:',
':Ghana:',
':Gibraltar:',
':Greece:',
':Greenland:',
':Grenada:',
':Guadeloupe:',
':Guam:',
':Guatemala:',
':Guernsey:',
':Guinea:',
':Guinea-Bissau:',
':Guyana:',
':Haiti:',
':Heard_&_McDonald_Islands:',
':Honduras:',
':Hong_Kong_SAR_China:',
':Hungary:',
':ID_button:',
':Iceland:',
':India:',
':Indonesia:',
':Iran:',
':Iraq:',
':Ireland:',
':Isle_of_Man:',
':Israel:',
':Italy:',
':Jamaica:',
':Japan:',
':Japanese_acceptable_button:',
':Japanese_application_button:',
':Japanese_bargain_button:',
':Japanese_castle:',
':Japanese_congratulations_button:',
':Japanese_discount_button:',
':Japanese_dolls:',
':Japanese_free_of_charge_button:',
':Japanese_here_button:',
':Japanese_monthly_amount_button:',
':Japanese_no_vacancy_button:',
':Japanese_not_free_of_charge_button:',
':Japanese_open_for_business_button:',
':Japanese_passing_grade_button:',
':Japanese_post_office:',
':Japanese_prohibited_button:',
':Japanese_reserved_button:',
':Japanese_secret_button:',
':Japanese_service_charge_button:',
':Japanese_symbol_for_beginner:',
':Japanese_vacancy_button:',
':Jersey:',
':Jordan:',
':Kazakhstan:',
':Kenya:',
':Kiribati:',
':Kosovo:',
':Kuwait:',
':Kyrgyzstan:',
':Laos:',
':Latvia:',
':Lebanon:',
':Leo:',
':Lesotho:',
':Liberia:',
':Libra:',
':Libya:',
':Liechtenstein:',
':Lithuania:',
':Luxembourg:',
':Macau_SAR_China:',
':Macedonia:',
':Madagascar:',
':Malawi:',
':Malaysia:',
':Maldives:',
':Mali:',
':Malta:',
':Marshall_Islands:',
':Martinique:',
':Mauritania:',
':Mauritius:',
':Mayotte:',
':Mexico:',
':Micronesia:',
':Moldova:',
':Monaco:',
':Mongolia:',
':Montenegro:',
':Montserrat:',
':Morocco:',
':Mozambique:',
':Mrs._Claus:',
':Mrs._Claus_dark_skin_tone:',
':Mrs._Claus_light_skin_tone:',
':Mrs._Claus_medium-dark_skin_tone:',
':Mrs._Claus_medium-light_skin_tone:',
':Mrs._Claus_medium_skin_tone:',
':Myanmar_(Burma):',
':NEW_button:',
':NG_button:',
':Namibia:',
':Nauru:',
':Nepal:',
':Netherlands:',
':New_Caledonia:',
':New_Zealand:',
':Nicaragua:',
':Niger:',
':Nigeria:',
':Niue:',
':Norfolk_Island:',
':North_Korea:',
':Northern_Mariana_Islands:',
':Norway:',
':OK_button:',
':OK_hand:',
':OK_hand_dark_skin_tone:',
':OK_hand_light_skin_tone:',
':OK_hand_medium-dark_skin_tone:',
':OK_hand_medium-light_skin_tone:',
':OK_hand_medium_skin_tone:',
':ON!_arrow:',
':O_button_(blood_type):',
':Oman:',
':Ophiuchus:',
':P_button:',
':Pakistan:',
':Palau:',
':Palestinian_Territories:',
':Panama:',
':Papua_New_Guinea:',
':Paraguay:',
':Peru:',
':Philippines:',
':Pisces:',
':Pitcairn_Islands:',
':Poland:',
':Portugal:',
':Puerto_Rico:',
':Qatar:',
':Romania:',
':Russia:',
':Rwanda:',
':Réunion:',
':SOON_arrow:',
':SOS_button:',
':Sagittarius:',
':Samoa:',
':San_Marino:',
':Santa_Claus:',
':Santa_Claus_dark_skin_tone:',
':Santa_Claus_light_skin_tone:',
':Santa_Claus_medium-dark_skin_tone:',
':Santa_Claus_medium-light_skin_tone:',
':Santa_Claus_medium_skin_tone:',
':Saudi_Arabia:',
':Scorpius:',
':Senegal:',
':Serbia:',
':Seychelles:',
':Sierra_Leone:',
':Singapore:',
':Sint_Maarten:',
':Slovakia:',
':Slovenia:',
':Solomon_Islands:',
':Somalia:',
':South_Africa:',
':South_Georgia_&_South_Sandwich_Islands:',
':South_Korea:',
':South_Sudan:',
':Spain:',
':Sri_Lanka:',
':St._Barthélemy:',
':St._Helena:',
':St._Kitts_&_Nevis:',
':St._Lucia:',
':St._Martin:',
':St._Pierre_&_Miquelon:',
':St._Vincent_&_Grenadines:',
':Statue_of_Liberty:',
':Sudan:',
':Suriname:',
':Svalbard_&_Jan_Mayen:',
':Swaziland:',
':Sweden:',
':Switzerland:',
':Syria:',
':São_Tomé_&_Príncipe:',
':TOP_arrow:',
':Taiwan:',
':Tajikistan:',
':Tanzania:',
':Taurus:',
':Thailand:',
':Timor-Leste:',
':Togo:',
':Tokelau:',
':Tokyo_tower:',
':Tonga:',
':Trinidad_&_Tobago:',
':Tristan_da_Cunha:',
':Tunisia:',
':Turkey:',
':Turkmenistan:',
':Turks_&_Caicos_Islands:',
':Tuvalu:',
':U.S._Outlying_Islands:',
':U.S._Virgin_Islands:',
':UP!_button:',
':Uganda:',
':Ukraine:',
':United_Arab_Emirates:',
':United_Kingdom:',
':United_Nations:',
':United_States:',
':Uruguay:',
':Uzbekistan:',
':VS_button:',
':Vanuatu:',
':Vatican_City:',
':Venezuela:',
':Vietnam:',
':Virgo:',
':Wallis_&_Futuna:',
':Western_Sahara:',
':Yemen:',
':Zambia:',
':Zimbabwe:',
':admission_tickets:',
':aerial_tramway:',
':airplane:',
':airplane_arrival:',
':airplane_departure:',
':alarm_clock:',
':alembic:',
':alien:',
':alien_monster:',
':ambulance:',
':american_football:',
':amphora:',
':anchor:',
':anger_symbol:',
':angry_face:',
':angry_face_with_horns:',
':anguished_face:',
':ant:',
':antenna_bars:',
':anticlockwise_arrows_button:',
':articulated_lorry:',
':artist_palette:',
':astonished_face:',
':atom_symbol:',
':automobile:',
':avocado:',
':baby:',
':baby_angel:',
':baby_angel_dark_skin_tone:',
':baby_angel_light_skin_tone:',
':baby_angel_medium-dark_skin_tone:',
':baby_angel_medium-light_skin_tone:',
':baby_angel_medium_skin_tone:',
':baby_bottle:',
':baby_chick:',
':baby_dark_skin_tone:',
':baby_light_skin_tone:',
':baby_medium-dark_skin_tone:',
':baby_medium-light_skin_tone:',
':baby_medium_skin_tone:',
':baby_symbol:',
':backhand_index_pointing_down:',
':backhand_index_pointing_down_dark_skin_tone:',
':backhand_index_pointing_down_light_skin_tone:',
':backhand_index_pointing_down_medium-dark_skin_tone:',
':backhand_index_pointing_down_medium-light_skin_tone:',
':backhand_index_pointing_down_medium_skin_tone:',
':backhand_index_pointing_left:',
':backhand_index_pointing_left_dark_skin_tone:',
':backhand_index_pointing_left_light_skin_tone:',
':backhand_index_pointing_left_medium-dark_skin_tone:',
':backhand_index_pointing_left_medium-light_skin_tone:',
':backhand_index_pointing_left_medium_skin_tone:',
':backhand_index_pointing_right:',
':backhand_index_pointing_right_dark_skin_tone:',
':backhand_index_pointing_right_light_skin_tone:',
':backhand_index_pointing_right_medium-dark_skin_tone:',
':backhand_index_pointing_right_medium-light_skin_tone:',
':backhand_index_pointing_right_medium_skin_tone:',
':backhand_index_pointing_up:',
':backhand_index_pointing_up_dark_skin_tone:',
':backhand_index_pointing_up_light_skin_tone:',
':backhand_index_pointing_up_medium-dark_skin_tone:',
':backhand_index_pointing_up_medium-light_skin_tone:',
':backhand_index_pointing_up_medium_skin_tone:',
':bacon:',
':badminton:',
':baggage_claim:',
':baguette_bread:',
':balance_scale:',
':balloon:',
':ballot_box_with_ballot:',
':ballot_box_with_check:',
':banana:',
':bank:',
':bar_chart:',
':barber_pole:',
':baseball:',
':basketball:',
':bat:',
':bathtub:',
':battery:',
':beach_with_umbrella:',
':bear_face:',
':beating_heart:',
':bed:',
':beer_mug:',
':bell:',
':bell_with_slash:',
':bellhop_bell:',
':bento_box:',
':bicycle:',
':bikini:',
':biohazard:',
':bird:',
':birthday_cake:',
':black_circle:',
':black_flag:',
':black_heart:',
':black_large_square:',
':black_medium-small_square:',
':black_medium_square:',
':black_nib:',
':black_small_square:',
':black_square_button:',
':blond-haired_man:',
':blond-haired_man_dark_skin_tone:',
':blond-haired_man_light_skin_tone:',
':blond-haired_man_medium-dark_skin_tone:',
':blond-haired_man_medium-light_skin_tone:',
':blond-haired_man_medium_skin_tone:',
':blond-haired_person:',
':blond-haired_person_dark_skin_tone:',
':blond-haired_person_light_skin_tone:',
':blond-haired_person_medium-dark_skin_tone:',
':blond-haired_person_medium-light_skin_tone:',
':blond-haired_person_medium_skin_tone:',
':blond-haired_woman:',
':blond-haired_woman_dark_skin_tone:',
':blond-haired_woman_light_skin_tone:',
':blond-haired_woman_medium-dark_skin_tone:',
':blond-haired_woman_medium-light_skin_tone:',
':blond-haired_woman_medium_skin_tone:',
':blossom:',
':blowfish:',
':blue_book:',
':blue_circle:',
':blue_heart:',
':boar:',
':bomb:',
':bookmark:',
':bookmark_tabs:',
':books:',
':bottle_with_popping_cork:',
':bouquet:',
':bow_and_arrow:',
':bowling:',
':boxing_glove:',
':boy:',
':boy_dark_skin_tone:',
':boy_light_skin_tone:',
':boy_medium-dark_skin_tone:',
':boy_medium-light_skin_tone:',
':boy_medium_skin_tone:',
':bread:',
':bride_with_veil:',
':bride_with_veil_dark_skin_tone:',
':bride_with_veil_light_skin_tone:',
':bride_with_veil_medium-dark_skin_tone:',
':bride_with_veil_medium-light_skin_tone:',
':bride_with_veil_medium_skin_tone:',
':bridge_at_night:',
':briefcase:',
':bright_button:',
':broken_heart:',
':bug:',
':building_construction:',
':burrito:',
':bus:',
':bus_stop:',
':bust_in_silhouette:',
':busts_in_silhouette:',
':butterfly:',
':cactus:',
':calendar:',
':call_me_hand:',
':call_me_hand_dark_skin_tone:',
':call_me_hand_light_skin_tone:',
':call_me_hand_medium-dark_skin_tone:',
':call_me_hand_medium-light_skin_tone:',
':call_me_hand_medium_skin_tone:',
':camel:',
':camera:',
':camera_with_flash:',
':camping:',
':candle:',
':candy:',
':canoe:',
':card_file_box:',
':card_index:',
':card_index_dividers:',
':carousel_horse:',
':carp_streamer:',
':carrot:',
':castle:',
':cat:',
':cat_face:',
':cat_face_with_tears_of_joy:',
':cat_face_with_wry_smile:',
':chains:',
':chart_decreasing:',
':chart_increasing:',
':chart_increasing_with_yen:',
':cheese_wedge:',
':chequered_flag:',
':cherries:',
':cherry_blossom:',
':chestnut:',
':chicken:',
':children_crossing:',
':chipmunk:',
':chocolate_bar:',
':church:',
':cigarette:',
':cinema:',
':circled_M:',
':circus_tent:',
':cityscape:',
':cityscape_at_dusk:',
':clamp:',
':clapper_board:',
':clapping_hands:',
':clapping_hands_dark_skin_tone:',
':clapping_hands_light_skin_tone:',
':clapping_hands_medium-dark_skin_tone:',
':clapping_hands_medium-light_skin_tone:',
':clapping_hands_medium_skin_tone:',
':classical_building:',
':clinking_beer_mugs:',
':clinking_glasses:',
':clipboard:',
':clockwise_vertical_arrows:',
':closed_book:',
':closed_mailbox_with_lowered_flag:',
':closed_mailbox_with_raised_flag:',
':closed_umbrella:',
':cloud:',
':cloud_with_lightning:',
':cloud_with_lightning_and_rain:',
':cloud_with_rain:',
':cloud_with_snow:',
':clown_face:',
':club_suit:',
':clutch_bag:',
':cocktail_glass:',
':coffin:',
':collision:',
':comet:',
':computer_disk:',
':computer_mouse:',
':confetti_ball:',
':confounded_face:',
':confused_face:',
':construction:',
':construction_worker:',
':construction_worker_dark_skin_tone:',
':construction_worker_light_skin_tone:',
':construction_worker_medium-dark_skin_tone:',
':construction_worker_medium-light_skin_tone:',
':construction_worker_medium_skin_tone:',
':control_knobs:',
':convenience_store:',
':cooked_rice:',
':cookie:',
':cooking:',
':copyright:',
':couch_and_lamp:',
':couple_with_heart:',
':couple_with_heart_man_man:',
':couple_with_heart_woman_man:',
':couple_with_heart_woman_woman:',
':cow:',
':cow_face:',
':cowboy_hat_face:',
':crab:',
':crayon:',
':credit_card:',
':crescent_moon:',
':cricket:',
':crocodile:',
':croissant:',
':cross_mark:',
':cross_mark_button:',
':crossed_fingers:',
':crossed_fingers_dark_skin_tone:',
':crossed_fingers_light_skin_tone:',
':crossed_fingers_medium-dark_skin_tone:',
':crossed_fingers_medium-light_skin_tone:',
':crossed_fingers_medium_skin_tone:',
':crossed_flags:',
':crossed_swords:',
':crown:',
':crying_cat_face:',
':crying_face:',
':crystal_ball:',
':cucumber:',
':curly_loop:',
':currency_exchange:',
':curry_rice:',
':custard:',
':customs:',
':cyclone:',
':dagger:',
':dango:',
':dark_skin_tone:',
':dashing_away:',
':deciduous_tree:',
':deer:',
':delivery_truck:',
':department_store:',
':derelict_house:',
':desert:',
':desert_island:',
':desktop_computer:',
':detective:',
':detective_dark_skin_tone:',
':detective_light_skin_tone:',
':detective_medium-dark_skin_tone:',
':detective_medium-light_skin_tone:',
':detective_medium_skin_tone:',
':diamond_suit:',
':diamond_with_a_dot:',
':dim_button:',
':direct_hit:',
':disappointed_but_relieved_face:',
':disappointed_face:',
':dizzy:',
':dizzy_face:',
':dog:',
':dog_face:',
':dollar_banknote:',
':dolphin:',
':door:',
':dotted_six-pointed_star:',
':double_curly_loop:',
':double_exclamation_mark:',
':doughnut:',
':dove:',
':down-left_arrow:',
':down-right_arrow:',
':down_arrow:',
':down_button:',
':dragon:',
':dragon_face:',
':dress:',
':drooling_face:',
':droplet:',
':drum:',
':duck:',
':dvd:',
':e-mail:',
':eagle:',
':ear:',
':ear_dark_skin_tone:',
':ear_light_skin_tone:',
':ear_medium-dark_skin_tone:',
':ear_medium-light_skin_tone:',
':ear_medium_skin_tone:',
':ear_of_corn:',
':egg:',
':eggplant:',
':eight-pointed_star:',
':eight-spoked_asterisk:',
':eight-thirty:',
':eight_o’clock:',
':eject_button:',
':electric_plug:',
':elephant:',
':eleven-thirty:',
':eleven_o’clock:',
':envelope:',
':envelope_with_arrow:',
':euro_banknote:',
':evergreen_tree:',
':exclamation_mark:',
':exclamation_question_mark:',
':expressionless_face:',
':eye:',
':eye_in_speech_bubble:',
':eyes:',
':face_blowing_a_kiss:',
':face_savouring_delicious_food:',
':face_screaming_in_fear:',
':face_with_cold_sweat:',
':face_with_head-bandage:',
':face_with_medical_mask:',
':face_with_open_mouth:',
':face_with_open_mouth_&_cold_sweat:',
':face_with_rolling_eyes:',
':face_with_steam_from_nose:',
':face_with_stuck-out_tongue:',
':face_with_stuck-out_tongue_&_closed_eyes:',
':face_with_stuck-out_tongue_&_winking_eye:',
':face_with_tears_of_joy:',
':face_with_thermometer:',
':face_without_mouth:',
':factory:',
':fallen_leaf:',
':family:',
':family_man_boy:',
':family_man_boy_boy:',
':family_man_girl:',
':family_man_girl_boy:',
':family_man_girl_girl:',
':family_man_man_boy:',
':family_man_man_boy_boy:',
':family_man_man_girl:',
':family_man_man_girl_boy:',
':family_man_man_girl_girl:',
':family_man_woman_boy:',
':family_man_woman_boy_boy:',
':family_man_woman_girl:',
':family_man_woman_girl_boy:',
':family_man_woman_girl_girl:',
':family_woman_boy:',
':family_woman_boy_boy:',
':family_woman_girl:',
':family_woman_girl_boy:',
':family_woman_girl_girl:',
':family_woman_woman_boy:',
':family_woman_woman_boy_boy:',
':family_woman_woman_girl:',
':family_woman_woman_girl_boy:',
':family_woman_woman_girl_girl:',
':fast-forward_button:',
':fast_down_button:',
':fast_reverse_button:',
':fast_up_button:',
':fax_machine:',
':fearful_face:',
':female_sign:',
':ferris_wheel:',
':ferry:',
':field_hockey:',
':file_cabinet:',
':file_folder:',
':film_frames:',
':film_projector:',
':fire:',
':fire_engine:',
':fireworks:',
':first_quarter_moon:',
':first_quarter_moon_with_face:',
':fish:',
':fish_cake_with_swirl:',
':fishing_pole:',
':five-thirty:',
':five_o’clock:',
':flag_in_hole:',
':flashlight:',
':fleur-de-lis:',
':flexed_biceps:',
':flexed_biceps_dark_skin_tone:',
':flexed_biceps_light_skin_tone:',
':flexed_biceps_medium-dark_skin_tone:',
':flexed_biceps_medium-light_skin_tone:',
':flexed_biceps_medium_skin_tone:',
':floppy_disk:',
':flower_playing_cards:',
':flushed_face:',
':fog:',
':foggy:',
':folded_hands:',
':folded_hands_dark_skin_tone:',
':folded_hands_light_skin_tone:',
':folded_hands_medium-dark_skin_tone:',
':folded_hands_medium-light_skin_tone:',
':folded_hands_medium_skin_tone:',
':footprints:',
':fork_and_knife:',
':fork_and_knife_with_plate:',
':fountain:',
':fountain_pen:',
':four-thirty:',
':four_leaf_clover:',
':four_o’clock:',
':fox_face:',
':framed_picture:',
':french_fries:',
':fried_shrimp:',
':frog_face:',
':front-facing_baby_chick:',
':frowning_face:',
':frowning_face_with_open_mouth:',
':fuel_pump:',
':full_moon:',
':full_moon_with_face:',
':funeral_urn:',
':game_die:',
':gear:',
':gem_stone:',
':ghost:',
':girl:',
':girl_dark_skin_tone:',
':girl_light_skin_tone:',
':girl_medium-dark_skin_tone:',
':girl_medium-light_skin_tone:',
':girl_medium_skin_tone:',
':glass_of_milk:',
':glasses:',
':globe_showing_Americas:',
':globe_showing_Asia-Australia:',
':globe_showing_Europe-Africa:',
':globe_with_meridians:',
':glowing_star:',
':goal_net:',
':goat:',
':goblin:',
':gorilla:',
':graduation_cap:',
':grapes:',
':green_apple:',
':green_book:',
':green_heart:',
':green_salad:',
':grimacing_face:',
':grinning_cat_face_with_smiling_eyes:',
':grinning_face:',
':grinning_face_with_smiling_eyes:',
':growing_heart:',
':guard:',
':guard_dark_skin_tone:',
':guard_light_skin_tone:',
':guard_medium-dark_skin_tone:',
':guard_medium-light_skin_tone:',
':guard_medium_skin_tone:',
':guitar:',
':hamburger:',
':hammer:',
':hammer_and_pick:',
':hammer_and_wrench:',
':hamster_face:',
':handbag:',
':handshake:',
':hatching_chick:',
':headphone:',
':hear-no-evil_monkey:',
':heart_decoration:',
':heart_suit:',
':heart_with_arrow:',
':heart_with_ribbon:',
':heavy_check_mark:',
':heavy_division_sign:',
':heavy_dollar_sign:',
':heavy_heart_exclamation:',
':heavy_large_circle:',
':heavy_minus_sign:',
':heavy_multiplication_x:',
':heavy_plus_sign:',
':helicopter:',
':herb:',
':hibiscus:',
':high-heeled_shoe:',
':high-speed_train:',
':high-speed_train_with_bullet_nose:',
':high_voltage:',
':hole:',
':honey_pot:',
':honeybee:',
':horizontal_traffic_light:',
':horse:',
':horse_face:',
':horse_racing:',
':horse_racing_dark_skin_tone:',
':horse_racing_light_skin_tone:',
':horse_racing_medium-dark_skin_tone:',
':horse_racing_medium-light_skin_tone:',
':horse_racing_medium_skin_tone:',
':hospital:',
':hot_beverage:',
':hot_dog:',
':hot_pepper:',
':hot_springs:',
':hotel:',
':hourglass:',
':hourglass_with_flowing_sand:',
':house:',
':house_with_garden:',
':hugging_face:',
':hundred_points:',
':hushed_face:',
':ice_cream:',
':ice_hockey:',
':ice_skate:',
':inbox_tray:',
':incoming_envelope:',
':index_pointing_up:',
':index_pointing_up_dark_skin_tone:',
':index_pointing_up_light_skin_tone:',
':index_pointing_up_medium-dark_skin_tone:',
':index_pointing_up_medium-light_skin_tone:',
':index_pointing_up_medium_skin_tone:',
':information:',
':input_latin_letters:',
':input_latin_lowercase:',
':input_latin_uppercase:',
':input_numbers:',
':input_symbols:',
':jack-o-lantern:',
':jeans:',
':joker:',
':joystick:',
':kaaba:',
':key:',
':keyboard:',
':keycap_#:',
':keycap_*:',
':keycap_0:',
':keycap_1:',
':keycap_10:',
':keycap_2:',
':keycap_3:',
':keycap_4:',
':keycap_5:',
':keycap_6:',
':keycap_7:',
':keycap_8:',
':keycap_9:',
':kick_scooter:',
':kimono:',
':kiss:',
':kiss_man_man:',
':kiss_mark:',
':kiss_woman_man:',
':kiss_woman_woman:',
':kissing_cat_face_with_closed_eyes:',
':kissing_face:',
':kissing_face_with_closed_eyes:',
':kissing_face_with_smiling_eyes:',
':kitchen_knife:',
':kiwi_fruit:',
':koala:',
':label:',
':lady_beetle:',
':laptop_computer:',
':large_blue_diamond:',
':large_orange_diamond:',
':last_quarter_moon:',
':last_quarter_moon_with_face:',
':last_track_button:',
':latin_cross:',
':leaf_fluttering_in_wind:',
':ledger:',
':left-facing_fist:',
':left-facing_fist_dark_skin_tone:',
':left-facing_fist_light_skin_tone:',
':left-facing_fist_medium-dark_skin_tone:',
':left-facing_fist_medium-light_skin_tone:',
':left-facing_fist_medium_skin_tone:',
':left-pointing_magnifying_glass:',
':left-right_arrow:',
':left_arrow:',
':left_arrow_curving_right:',
':left_luggage:',
':left_speech_bubble:',
':lemon:',
':leopard:',
':level_slider:',
':light_bulb:',
':light_rail:',
':light_skin_tone:',
':link:',
':linked_paperclips:',
':lion_face:',
':lipstick:',
':litter_in_bin_sign:',
':lizard:',
':locked:',
':locked_with_key:',
':locked_with_pen:',
':locomotive:',
':lollipop:',
':loudly_crying_face:',
':loudspeaker:',
':love_hotel:',
':love_letter:',
':lying_face:',
':mahjong_red_dragon:',
':male_sign:',
':man:',
':man_and_woman_holding_hands:',
':man_artist:',
':man_artist_dark_skin_tone:',
':man_artist_light_skin_tone:',
':man_artist_medium-dark_skin_tone:',
':man_artist_medium-light_skin_tone:',
':man_artist_medium_skin_tone:',
':man_astronaut:',
':man_astronaut_dark_skin_tone:',
':man_astronaut_light_skin_tone:',
':man_astronaut_medium-dark_skin_tone:',
':man_astronaut_medium-light_skin_tone:',
':man_astronaut_medium_skin_tone:',
':man_biking:',
':man_biking_dark_skin_tone:',
':man_biking_light_skin_tone:',
':man_biking_medium-dark_skin_tone:',
':man_biking_medium-light_skin_tone:',
':man_biking_medium_skin_tone:',
':man_bouncing_ball:',
':man_bouncing_ball_dark_skin_tone:',
':man_bouncing_ball_light_skin_tone:',
':man_bouncing_ball_medium-dark_skin_tone:',
':man_bouncing_ball_medium-light_skin_tone:',
':man_bouncing_ball_medium_skin_tone:',
':man_bowing:',
':man_bowing_dark_skin_tone:',
':man_bowing_light_skin_tone:',
':man_bowing_medium-dark_skin_tone:',
':man_bowing_medium-light_skin_tone:',
':man_bowing_medium_skin_tone:',
':man_cartwheeling:',
':man_cartwheeling_dark_skin_tone:',
':man_cartwheeling_light_skin_tone:',
':man_cartwheeling_medium-dark_skin_tone:',
':man_cartwheeling_medium-light_skin_tone:',
':man_cartwheeling_medium_skin_tone:',
':man_construction_worker:',
':man_construction_worker_dark_skin_tone:',
':man_construction_worker_light_skin_tone:',
':man_construction_worker_medium-dark_skin_tone:',
':man_construction_worker_medium-light_skin_tone:',
':man_construction_worker_medium_skin_tone:',
':man_cook:',
':man_cook_dark_skin_tone:',
':man_cook_light_skin_tone:',
':man_cook_medium-dark_skin_tone:',
':man_cook_medium-light_skin_tone:',
':man_cook_medium_skin_tone:',
':man_dancing:',
':man_dancing_dark_skin_tone:',
':man_dancing_light_skin_tone:',
':man_dancing_medium-dark_skin_tone:',
':man_dancing_medium-light_skin_tone:',
':man_dancing_medium_skin_tone:',
':man_dark_skin_tone:',
':man_detective:',
':man_detective_dark_skin_tone:',
':man_detective_light_skin_tone:',
':man_detective_medium-dark_skin_tone:',
':man_detective_medium-light_skin_tone:',
':man_detective_medium_skin_tone:',
':man_facepalming:',
':man_facepalming_dark_skin_tone:',
':man_facepalming_light_skin_tone:',
':man_facepalming_medium-dark_skin_tone:',
':man_facepalming_medium-light_skin_tone:',
':man_facepalming_medium_skin_tone:',
':man_factory_worker:',
':man_factory_worker_dark_skin_tone:',
':man_factory_worker_light_skin_tone:',
':man_factory_worker_medium-dark_skin_tone:',
':man_factory_worker_medium-light_skin_tone:',
':man_factory_worker_medium_skin_tone:',
':man_farmer:',
':man_farmer_dark_skin_tone:',
':man_farmer_light_skin_tone:',
':man_farmer_medium-dark_skin_tone:',
':man_farmer_medium-light_skin_tone:',
':man_farmer_medium_skin_tone:',
':man_firefighter:',
':man_firefighter_dark_skin_tone:',
':man_firefighter_light_skin_tone:',
':man_firefighter_medium-dark_skin_tone:',
':man_firefighter_medium-light_skin_tone:',
':man_firefighter_medium_skin_tone:',
':man_frowning:',
':man_frowning_dark_skin_tone:',
':man_frowning_light_skin_tone:',
':man_frowning_medium-dark_skin_tone:',
':man_frowning_medium-light_skin_tone:',
':man_frowning_medium_skin_tone:',
':man_gesturing_NO:',
':man_gesturing_NO_dark_skin_tone:',
':man_gesturing_NO_light_skin_tone:',
':man_gesturing_NO_medium-dark_skin_tone:',
':man_gesturing_NO_medium-light_skin_tone:',
':man_gesturing_NO_medium_skin_tone:',
':man_gesturing_OK:',
':man_gesturing_OK_dark_skin_tone:',
':man_gesturing_OK_light_skin_tone:',
':man_gesturing_OK_medium-dark_skin_tone:',
':man_gesturing_OK_medium-light_skin_tone:',
':man_gesturing_OK_medium_skin_tone:',
':man_getting_haircut:',
':man_getting_haircut_dark_skin_tone:',
':man_getting_haircut_light_skin_tone:',
':man_getting_haircut_medium-dark_skin_tone:',
':man_getting_haircut_medium-light_skin_tone:',
':man_getting_haircut_medium_skin_tone:',
':man_getting_massage:',
':man_getting_massage_dark_skin_tone:',
':man_getting_massage_light_skin_tone:',
':man_getting_massage_medium-dark_skin_tone:',
':man_getting_massage_medium-light_skin_tone:',
':man_getting_massage_medium_skin_tone:',
':man_golfing:',
':man_golfing_dark_skin_tone:',
':man_golfing_light_skin_tone:',
':man_golfing_medium-dark_skin_tone:',
':man_golfing_medium-light_skin_tone:',
':man_golfing_medium_skin_tone:',
':man_guard:',
':man_guard_dark_skin_tone:',
':man_guard_light_skin_tone:',
':man_guard_medium-dark_skin_tone:',
':man_guard_medium-light_skin_tone:',
':man_guard_medium_skin_tone:',
':man_health_worker:',
':man_health_worker_dark_skin_tone:',
':man_health_worker_light_skin_tone:',
':man_health_worker_medium-dark_skin_tone:',
':man_health_worker_medium-light_skin_tone:',
':man_health_worker_medium_skin_tone:',
':man_in_business_suit_levitating:',
':man_in_business_suit_levitating_dark_skin_tone:',
':man_in_business_suit_levitating_light_skin_tone:',
':man_in_business_suit_levitating_medium-dark_skin_tone:',
':man_in_business_suit_levitating_medium-light_skin_tone:',
':man_in_business_suit_levitating_medium_skin_tone:',
':man_in_tuxedo:',
':man_in_tuxedo_dark_skin_tone:',
':man_in_tuxedo_light_skin_tone:',
':man_in_tuxedo_medium-dark_skin_tone:',
':man_in_tuxedo_medium-light_skin_tone:',
':man_in_tuxedo_medium_skin_tone:',
':man_judge:',
':man_judge_dark_skin_tone:',
':man_judge_light_skin_tone:',
':man_judge_medium-dark_skin_tone:',
':man_judge_medium-light_skin_tone:',
':man_judge_medium_skin_tone:',
':man_juggling:',
':man_juggling_dark_skin_tone:',
':man_juggling_light_skin_tone:',
':man_juggling_medium-dark_skin_tone:',
':man_juggling_medium-light_skin_tone:',
':man_juggling_medium_skin_tone:',
':man_lifting_weights:',
':man_lifting_weights_dark_skin_tone:',
':man_lifting_weights_light_skin_tone:',
':man_lifting_weights_medium-dark_skin_tone:',
':man_lifting_weights_medium-light_skin_tone:',
':man_lifting_weights_medium_skin_tone:',
':man_light_skin_tone:',
':man_mechanic:',
':man_mechanic_dark_skin_tone:',
':man_mechanic_light_skin_tone:',
':man_mechanic_medium-dark_skin_tone:',
':man_mechanic_medium-light_skin_tone:',
':man_mechanic_medium_skin_tone:',
':man_medium-dark_skin_tone:',
':man_medium-light_skin_tone:',
':man_medium_skin_tone:',
':man_mountain_biking:',
':man_mountain_biking_dark_skin_tone:',
':man_mountain_biking_light_skin_tone:',
':man_mountain_biking_medium-dark_skin_tone:',
':man_mountain_biking_medium-light_skin_tone:',
':man_mountain_biking_medium_skin_tone:',
':man_office_worker:',
':man_office_worker_dark_skin_tone:',
':man_office_worker_light_skin_tone:',
':man_office_worker_medium-dark_skin_tone:',
':man_office_worker_medium-light_skin_tone:',
':man_office_worker_medium_skin_tone:',
':man_pilot:',
':man_pilot_dark_skin_tone:',
':man_pilot_light_skin_tone:',
':man_pilot_medium-dark_skin_tone:',
':man_pilot_medium-light_skin_tone:',
':man_pilot_medium_skin_tone:',
':man_playing_handball:',
':man_playing_handball_dark_skin_tone:',
':man_playing_handball_light_skin_tone:',
':man_playing_handball_medium-dark_skin_tone:',
':man_playing_handball_medium-light_skin_tone:',
':man_playing_handball_medium_skin_tone:',
':man_playing_water_polo:',
':man_playing_water_polo_dark_skin_tone:',
':man_playing_water_polo_light_skin_tone:',
':man_playing_water_polo_medium-dark_skin_tone:',
':man_playing_water_polo_medium-light_skin_tone:',
':man_playing_water_polo_medium_skin_tone:',
':man_police_officer:',
':man_police_officer_dark_skin_tone:',
':man_police_officer_light_skin_tone:',
':man_police_officer_medium-dark_skin_tone:',
':man_police_officer_medium-light_skin_tone:',
':man_police_officer_medium_skin_tone:',
':man_pouting:',
':man_pouting_dark_skin_tone:',
':man_pouting_light_skin_tone:',
':man_pouting_medium-dark_skin_tone:',
':man_pouting_medium-light_skin_tone:',
':man_pouting_medium_skin_tone:',
':man_raising_hand:',
':man_raising_hand_dark_skin_tone:',
':man_raising_hand_light_skin_tone:',
':man_raising_hand_medium-dark_skin_tone:',
':man_raising_hand_medium-light_skin_tone:',
':man_raising_hand_medium_skin_tone:',
':man_rowing_boat:',
':man_rowing_boat_dark_skin_tone:',
':man_rowing_boat_light_skin_tone:',
':man_rowing_boat_medium-dark_skin_tone:',
':man_rowing_boat_medium-light_skin_tone:',
':man_rowing_boat_medium_skin_tone:',
':man_running:',
':man_running_dark_skin_tone:',
':man_running_light_skin_tone:',
':man_running_medium-dark_skin_tone:',
':man_running_medium-light_skin_tone:',
':man_running_medium_skin_tone:',
':man_scientist:',
':man_scientist_dark_skin_tone:',
':man_scientist_light_skin_tone:',
':man_scientist_medium-dark_skin_tone:',
':man_scientist_medium-light_skin_tone:',
':man_scientist_medium_skin_tone:',
':man_shrugging:',
':man_shrugging_dark_skin_tone:',
':man_shrugging_light_skin_tone:',
':man_shrugging_medium-dark_skin_tone:',
':man_shrugging_medium-light_skin_tone:',
':man_shrugging_medium_skin_tone:',
':man_singer:',
':man_singer_dark_skin_tone:',
':man_singer_light_skin_tone:',
':man_singer_medium-dark_skin_tone:',
':man_singer_medium-light_skin_tone:',
':man_singer_medium_skin_tone:',
':man_student:',
':man_student_dark_skin_tone:',
':man_student_light_skin_tone:',
':man_student_medium-dark_skin_tone:',
':man_student_medium-light_skin_tone:',
':man_student_medium_skin_tone:',
':man_surfing:',
':man_surfing_dark_skin_tone:',
':man_surfing_light_skin_tone:',
':man_surfing_medium-dark_skin_tone:',
':man_surfing_medium-light_skin_tone:',
':man_surfing_medium_skin_tone:',
':man_swimming:',
':man_swimming_dark_skin_tone:',
':man_swimming_light_skin_tone:',
':man_swimming_medium-dark_skin_tone:',
':man_swimming_medium-light_skin_tone:',
':man_swimming_medium_skin_tone:',
':man_teacher:',
':man_teacher_dark_skin_tone:',
':man_teacher_light_skin_tone:',
':man_teacher_medium-dark_skin_tone:',
':man_teacher_medium-light_skin_tone:',
':man_teacher_medium_skin_tone:',
':man_technologist:',
':man_technologist_dark_skin_tone:',
':man_technologist_light_skin_tone:',
':man_technologist_medium-dark_skin_tone:',
':man_technologist_medium-light_skin_tone:',
':man_technologist_medium_skin_tone:',
':man_tipping_hand:',
':man_tipping_hand_dark_skin_tone:',
':man_tipping_hand_light_skin_tone:',
':man_tipping_hand_medium-dark_skin_tone:',
':man_tipping_hand_medium-light_skin_tone:',
':man_tipping_hand_medium_skin_tone:',
':man_walking:',
':man_walking_dark_skin_tone:',
':man_walking_light_skin_tone:',
':man_walking_medium-dark_skin_tone:',
':man_walking_medium-light_skin_tone:',
':man_walking_medium_skin_tone:',
':man_wearing_turban:',
':man_wearing_turban_dark_skin_tone:',
':man_wearing_turban_light_skin_tone:',
':man_wearing_turban_medium-dark_skin_tone:',
':man_wearing_turban_medium-light_skin_tone:',
':man_wearing_turban_medium_skin_tone:',
':man_with_Chinese_cap:',
':man_with_Chinese_cap_dark_skin_tone:',
':man_with_Chinese_cap_light_skin_tone:',
':man_with_Chinese_cap_medium-dark_skin_tone:',
':man_with_Chinese_cap_medium-light_skin_tone:',
':man_with_Chinese_cap_medium_skin_tone:',
':mantelpiece_clock:',
':man’s_shoe:',
':map_of_Japan:',
':maple_leaf:',
':martial_arts_uniform:',
':meat_on_bone:',
':medical_symbol:',
':medium-dark_skin_tone:',
':medium-light_skin_tone:',
':medium_skin_tone:',
':megaphone:',
':melon:',
':memo:',
':men_with_bunny_ears_partying:',
':men_wrestling:',
':menorah:',
':men’s_room:',
':metro:',
':microphone:',
':microscope:',
':middle_finger:',
':middle_finger_dark_skin_tone:',
':middle_finger_light_skin_tone:',
':middle_finger_medium-dark_skin_tone:',
':middle_finger_medium-light_skin_tone:',
':middle_finger_medium_skin_tone:',
':military_medal:',
':milky_way:',
':minibus:',
':moai:',
':mobile_phone:',
':mobile_phone_off:',
':mobile_phone_with_arrow:',
':money-mouth_face:',
':money_bag:',
':money_with_wings:',
':monkey:',
':monkey_face:',
':monorail:',
':moon_viewing_ceremony:',
':mosque:',
':motor_boat:',
':motor_scooter:',
':motorcycle:',
':motorway:',
':mount_fuji:',
':mountain:',
':mountain_cableway:',
':mountain_railway:',
':mouse:',
':mouse_face:',
':mouth:',
':movie_camera:',
':mushroom:',
':musical_keyboard:',
':musical_note:',
':musical_notes:',
':musical_score:',
':muted_speaker:',
':nail_polish:',
':nail_polish_dark_skin_tone:',
':nail_polish_light_skin_tone:',
':nail_polish_medium-dark_skin_tone:',
':nail_polish_medium-light_skin_tone:',
':nail_polish_medium_skin_tone:',
':name_badge:',
':national_park:',
':nauseated_face:',
':necktie:',
':nerd_face:',
':neutral_face:',
':new_moon:',
':new_moon_face:',
':newspaper:',
':next_track_button:',
':night_with_stars:',
':nine-thirty:',
':nine_o’clock:',
':no_bicycles:',
':no_entry:',
':no_littering:',
':no_mobile_phones:',
':no_one_under_eighteen:',
':no_pedestrians:',
':no_smoking:',
':non-potable_water:',
':nose:',
':nose_dark_skin_tone:',
':nose_light_skin_tone:',
':nose_medium-dark_skin_tone:',
':nose_medium-light_skin_tone:',
':nose_medium_skin_tone:',
':notebook:',
':notebook_with_decorative_cover:',
':nut_and_bolt:',
':octopus:',
':oden:',
':office_building:',
':ogre:',
':oil_drum:',
':old_key:',
':old_man:',
':old_man_dark_skin_tone:',
':old_man_light_skin_tone:',
':old_man_medium-dark_skin_tone:',
':old_man_medium-light_skin_tone:',
':old_man_medium_skin_tone:',
':old_woman:',
':old_woman_dark_skin_tone:',
':old_woman_light_skin_tone:',
':old_woman_medium-dark_skin_tone:',
':old_woman_medium-light_skin_tone:',
':old_woman_medium_skin_tone:',
':om:',
':oncoming_automobile:',
':oncoming_bus:',
':oncoming_fist:',
':oncoming_fist_dark_skin_tone:',
':oncoming_fist_light_skin_tone:',
':oncoming_fist_medium-dark_skin_tone:',
':oncoming_fist_medium-light_skin_tone:',
':oncoming_fist_medium_skin_tone:',
':oncoming_police_car:',
':oncoming_taxi:',
':one-thirty:',
':one_o’clock:',
':open_book:',
':open_file_folder:',
':open_hands:',
':open_hands_dark_skin_tone:',
':open_hands_light_skin_tone:',
':open_hands_medium-dark_skin_tone:',
':open_hands_medium-light_skin_tone:',
':open_hands_medium_skin_tone:',
':open_mailbox_with_lowered_flag:',
':open_mailbox_with_raised_flag:',
':optical_disk:',
':orange_book:',
':orthodox_cross:',
':outbox_tray:',
':owl:',
':ox:',
':package:',
':page_facing_up:',
':page_with_curl:',
':pager:',
':paintbrush:',
':palm_tree:',
':pancakes:',
':panda_face:',
':paperclip:',
':part_alternation_mark:',
':party_popper:',
':passenger_ship:',
':passport_control:',
':pause_button:',
':paw_prints:',
':peace_symbol:',
':peach:',
':peanuts:',
':pear:',
':pen:',
':pencil:',
':penguin:',
':pensive_face:',
':people_with_bunny_ears_partying:',
':people_wrestling:',
':performing_arts:',
':persevering_face:',
':person_biking:',
':person_biking_dark_skin_tone:',
':person_biking_light_skin_tone:',
':person_biking_medium-dark_skin_tone:',
':person_biking_medium-light_skin_tone:',
':person_biking_medium_skin_tone:',
':person_bouncing_ball:',
':person_bouncing_ball_dark_skin_tone:',
':person_bouncing_ball_light_skin_tone:',
':person_bouncing_ball_medium-dark_skin_tone:',
':person_bouncing_ball_medium-light_skin_tone:',
':person_bouncing_ball_medium_skin_tone:',
':person_bowing:',
':person_bowing_dark_skin_tone:',
':person_bowing_light_skin_tone:',
':person_bowing_medium-dark_skin_tone:',
':person_bowing_medium-light_skin_tone:',
':person_bowing_medium_skin_tone:',
':person_cartwheeling:',
':person_cartwheeling_dark_skin_tone:',
':person_cartwheeling_light_skin_tone:',
':person_cartwheeling_medium-dark_skin_tone:',
':person_cartwheeling_medium-light_skin_tone:',
':person_cartwheeling_medium_skin_tone:',
':person_facepalming:',
':person_facepalming_dark_skin_tone:',
':person_facepalming_light_skin_tone:',
':person_facepalming_medium-dark_skin_tone:',
':person_facepalming_medium-light_skin_tone:',
':person_facepalming_medium_skin_tone:',
':person_fencing:',
':person_frowning:',
':person_frowning_dark_skin_tone:',
':person_frowning_light_skin_tone:',
':person_frowning_medium-dark_skin_tone:',
':person_frowning_medium-light_skin_tone:',
':person_frowning_medium_skin_tone:',
':person_gesturing_NO:',
':person_gesturing_NO_dark_skin_tone:',
':person_gesturing_NO_light_skin_tone:',
':person_gesturing_NO_medium-dark_skin_tone:',
':person_gesturing_NO_medium-light_skin_tone:',
':person_gesturing_NO_medium_skin_tone:',
':person_gesturing_OK:',
':person_gesturing_OK_dark_skin_tone:',
':person_gesturing_OK_light_skin_tone:',
':person_gesturing_OK_medium-dark_skin_tone:',
':person_gesturing_OK_medium-light_skin_tone:',
':person_gesturing_OK_medium_skin_tone:',
':person_getting_haircut:',
':person_getting_haircut_dark_skin_tone:',
':person_getting_haircut_light_skin_tone:',
':person_getting_haircut_medium-dark_skin_tone:',
':person_getting_haircut_medium-light_skin_tone:',
':person_getting_haircut_medium_skin_tone:',
':person_getting_massage:',
':person_getting_massage_dark_skin_tone:',
':person_getting_massage_light_skin_tone:',
':person_getting_massage_medium-dark_skin_tone:',
':person_getting_massage_medium-light_skin_tone:',
':person_getting_massage_medium_skin_tone:',
':person_golfing:',
':person_golfing_dark_skin_tone:',
':person_golfing_light_skin_tone:',
':person_golfing_medium-dark_skin_tone:',
':person_golfing_medium-light_skin_tone:',
':person_golfing_medium_skin_tone:',
':person_in_bed:',
':person_in_bed_dark_skin_tone:',
':person_in_bed_light_skin_tone:',
':person_in_bed_medium-dark_skin_tone:',
':person_in_bed_medium-light_skin_tone:',
':person_in_bed_medium_skin_tone:',
':person_juggling:',
':person_juggling_dark_skin_tone:',
':person_juggling_light_skin_tone:',
':person_juggling_medium-dark_skin_tone:',
':person_juggling_medium-light_skin_tone:',
':person_juggling_medium_skin_tone:',
':person_lifting_weights:',
':person_lifting_weights_dark_skin_tone:',
':person_lifting_weights_light_skin_tone:',
':person_lifting_weights_medium-dark_skin_tone:',
':person_lifting_weights_medium-light_skin_tone:',
':person_lifting_weights_medium_skin_tone:',
':person_mountain_biking:',
':person_mountain_biking_dark_skin_tone:',
':person_mountain_biking_light_skin_tone:',
':person_mountain_biking_medium-dark_skin_tone:',
':person_mountain_biking_medium-light_skin_tone:',
':person_mountain_biking_medium_skin_tone:',
':person_playing_handball:',
':person_playing_handball_dark_skin_tone:',
':person_playing_handball_light_skin_tone:',
':person_playing_handball_medium-dark_skin_tone:',
':person_playing_handball_medium-light_skin_tone:',
':person_playing_handball_medium_skin_tone:',
':person_playing_water_polo:',
':person_playing_water_polo_dark_skin_tone:',
':person_playing_water_polo_light_skin_tone:',
':person_playing_water_polo_medium-dark_skin_tone:',
':person_playing_water_polo_medium-light_skin_tone:',
':person_playing_water_polo_medium_skin_tone:',
':person_pouting:',
':person_pouting_dark_skin_tone:',
':person_pouting_light_skin_tone:',
':person_pouting_medium-dark_skin_tone:',
':person_pouting_medium-light_skin_tone:',
':person_pouting_medium_skin_tone:',
':person_raising_hand:',
':person_raising_hand_dark_skin_tone:',
':person_raising_hand_light_skin_tone:',
':person_raising_hand_medium-dark_skin_tone:',
':person_raising_hand_medium-light_skin_tone:',
':person_raising_hand_medium_skin_tone:',
':person_rowing_boat:',
':person_rowing_boat_dark_skin_tone:',
':person_rowing_boat_light_skin_tone:',
':person_rowing_boat_medium-dark_skin_tone:',
':person_rowing_boat_medium-light_skin_tone:',
':person_rowing_boat_medium_skin_tone:',
':person_running:',
':person_running_dark_skin_tone:',
':person_running_light_skin_tone:',
':person_running_medium-dark_skin_tone:',
':person_running_medium-light_skin_tone:',
':person_running_medium_skin_tone:',
':person_shrugging:',
':person_shrugging_dark_skin_tone:',
':person_shrugging_light_skin_tone:',
':person_shrugging_medium-dark_skin_tone:',
':person_shrugging_medium-light_skin_tone:',
':person_shrugging_medium_skin_tone:',
':person_surfing:',
':person_surfing_dark_skin_tone:',
':person_surfing_light_skin_tone:',
':person_surfing_medium-dark_skin_tone:',
':person_surfing_medium-light_skin_tone:',
':person_surfing_medium_skin_tone:',
':person_swimming:',
':person_swimming_dark_skin_tone:',
':person_swimming_light_skin_tone:',
':person_swimming_medium-dark_skin_tone:',
':person_swimming_medium-light_skin_tone:',
':person_swimming_medium_skin_tone:',
':person_taking_bath:',
':person_taking_bath_dark_skin_tone:',
':person_taking_bath_light_skin_tone:',
':person_taking_bath_medium-dark_skin_tone:',
':person_taking_bath_medium-light_skin_tone:',
':person_taking_bath_medium_skin_tone:',
':person_tipping_hand:',
':person_tipping_hand_dark_skin_tone:',
':person_tipping_hand_light_skin_tone:',
':person_tipping_hand_medium-dark_skin_tone:',
':person_tipping_hand_medium-light_skin_tone:',
':person_tipping_hand_medium_skin_tone:',
':person_walking:',
':person_walking_dark_skin_tone:',
':person_walking_light_skin_tone:',
':person_walking_medium-dark_skin_tone:',
':person_walking_medium-light_skin_tone:',
':person_walking_medium_skin_tone:',
':person_wearing_turban:',
':person_wearing_turban_dark_skin_tone:',
':person_wearing_turban_light_skin_tone:',
':person_wearing_turban_medium-dark_skin_tone:',
':person_wearing_turban_medium-light_skin_tone:',
':person_wearing_turban_medium_skin_tone:',
':pick:',
':pig:',
':pig_face:',
':pig_nose:',
':pile_of_poo:',
':pill:',
':pine_decoration:',
':pineapple:',
':ping_pong:',
':pistol:',
':pizza:',
':place_of_worship:',
':play_button:',
':play_or_pause_button:',
':police_car:',
':police_car_light:',
':police_officer:',
':police_officer_dark_skin_tone:',
':police_officer_light_skin_tone:',
':police_officer_medium-dark_skin_tone:',
':police_officer_medium-light_skin_tone:',
':police_officer_medium_skin_tone:',
':poodle:',
':pool_8_ball:',
':popcorn:',
':post_office:',
':postal_horn:',
':postbox:',
':pot_of_food:',
':potable_water:',
':potato:',
':poultry_leg:',
':pound_banknote:',
':pouting_cat_face:',
':pouting_face:',
':prayer_beads:',
':pregnant_woman:',
':pregnant_woman_dark_skin_tone:',
':pregnant_woman_light_skin_tone:',
':pregnant_woman_medium-dark_skin_tone:',
':pregnant_woman_medium-light_skin_tone:',
':pregnant_woman_medium_skin_tone:',
':prince:',
':prince_dark_skin_tone:',
':prince_light_skin_tone:',
':prince_medium-dark_skin_tone:',
':prince_medium-light_skin_tone:',
':prince_medium_skin_tone:',
':princess:',
':princess_dark_skin_tone:',
':princess_light_skin_tone:',
':princess_medium-dark_skin_tone:',
':princess_medium-light_skin_tone:',
':princess_medium_skin_tone:',
':printer:',
':prohibited:',
':purple_heart:',
':purse:',
':pushpin:',
':question_mark:',
':rabbit:',
':rabbit_face:',
':racing_car:',
':radio:',
':radio_button:',
':radioactive:',
':railway_car:',
':railway_track:',
':rainbow:',
':rainbow_flag:',
':raised_back_of_hand:',
':raised_back_of_hand_dark_skin_tone:',
':raised_back_of_hand_light_skin_tone:',
':raised_back_of_hand_medium-dark_skin_tone:',
':raised_back_of_hand_medium-light_skin_tone:',
':raised_back_of_hand_medium_skin_tone:',
':raised_fist:',
':raised_fist_dark_skin_tone:',
':raised_fist_light_skin_tone:',
':raised_fist_medium-dark_skin_tone:',
':raised_fist_medium-light_skin_tone:',
':raised_fist_medium_skin_tone:',
':raised_hand:',
':raised_hand_dark_skin_tone:',
':raised_hand_light_skin_tone:',
':raised_hand_medium-dark_skin_tone:',
':raised_hand_medium-light_skin_tone:',
':raised_hand_medium_skin_tone:',
':raised_hand_with_fingers_splayed:',
':raised_hand_with_fingers_splayed_dark_skin_tone:',
':raised_hand_with_fingers_splayed_light_skin_tone:',
':raised_hand_with_fingers_splayed_medium-dark_skin_tone:',
':raised_hand_with_fingers_splayed_medium_skin_tone:',
':raising_hands:',
':raising_hands_dark_skin_tone:',
':raising_hands_light_skin_tone:',
':raising_hands_medium-dark_skin_tone:',
':raising_hands_medium-light_skin_tone:',
':raising_hands_medium_skin_tone:',
':ram:',
':rat:',
':record_button:',
':recycling_symbol:',
':red_apple:',
':red_circle:',
':red_heart:',
':red_paper_lantern:',
':red_triangle_pointed_down:',
':red_triangle_pointed_up:',
':registered:',
':relieved_face:',
':reminder_ribbon:',
':repeat_button:',
':repeat_single_button:',
':rescue_worker’s_helmet:',
':restroom:',
':reverse_button:',
':revolving_hearts:',
':rhinoceros:',
':ribbon:',
':rice_ball:',
':rice_cracker:',
':right-facing_fist:',
':right-facing_fist_dark_skin_tone:',
':right-facing_fist_light_skin_tone:',
':right-facing_fist_medium-dark_skin_tone:',
':right-facing_fist_medium-light_skin_tone:',
':right-facing_fist_medium_skin_tone:',
':right-pointing_magnifying_glass:',
':right_anger_bubble:',
':right_arrow:',
':right_arrow_curving_down:',
':right_arrow_curving_left:',
':right_arrow_curving_up:',
':ring:',
':roasted_sweet_potato:',
':robot_face:',
':rocket:',
':rolled-up_newspaper:',
':roller_coaster:',
':rolling_on_the_floor_laughing:',
':rooster:',
':rose:',
':rosette:',
':round_pushpin:',
':rugby_football:',
':running_shirt:',
':running_shoe:',
':sailboat:',
':sake:',
':satellite:',
':satellite_antenna:',
':saxophone:',
':school:',
':school_backpack:',
':scissors:',
':scorpion:',
':scroll:',
':seat:',
':see-no-evil_monkey:',
':seedling:',
':selfie:',
':selfie_dark_skin_tone:',
':selfie_light_skin_tone:',
':selfie_medium-dark_skin_tone:',
':selfie_medium-light_skin_tone:',
':selfie_medium_skin_tone:',
':seven-thirty:',
':seven_o’clock:',
':shallow_pan_of_food:',
':shamrock:',
':shark:',
':shaved_ice:',
':sheaf_of_rice:',
':sheep:',
':shield:',
':shinto_shrine:',
':ship:',
':shooting_star:',
':shopping_bags:',
':shopping_cart:',
':shortcake:',
':shower:',
':shrimp:',
':shuffle_tracks_button:',
':sign_of_the_horns:',
':sign_of_the_horns_dark_skin_tone:',
':sign_of_the_horns_light_skin_tone:',
':sign_of_the_horns_medium-dark_skin_tone:',
':sign_of_the_horns_medium-light_skin_tone:',
':sign_of_the_horns_medium_skin_tone:',
':six-thirty:',
':six_o’clock:',
':skier:',
':skis:',
':skull:',
':skull_and_crossbones:',
':sleeping_face:',
':sleepy_face:',
':slightly_frowning_face:',
':slightly_smiling_face:',
':slot_machine:',
':small_airplane:',
':small_blue_diamond:',
':small_orange_diamond:',
':smiling_cat_face_with_heart-eyes:',
':smiling_cat_face_with_open_mouth:',
':smiling_face:',
':smiling_face_with_halo:',
':smiling_face_with_heart-eyes:',
':smiling_face_with_horns:',
':smiling_face_with_open_mouth:',
':smiling_face_with_open_mouth_&_closed_eyes:',
':smiling_face_with_open_mouth_&_cold_sweat:',
':smiling_face_with_open_mouth_&_smiling_eyes:',
':smiling_face_with_smiling_eyes:',
':smiling_face_with_sunglasses:',
':smirking_face:',
':snail:',
':snake:',
':sneezing_face:',
':snow-capped_mountain:',
':snowboarder:',
':snowboarder_dark_skin_tone:',
':snowboarder_light_skin_tone:',
':snowboarder_medium-dark_skin_tone:',
':snowboarder_medium-light_skin_tone:',
':snowboarder_medium_skin_tone:',
':snowflake:',
':snowman:',
':snowman_without_snow:',
':soccer_ball:',
':soft_ice_cream:',
':spade_suit:',
':spaghetti:',
':sparkle:',
':sparkler:',
':sparkles:',
':sparkling_heart:',
':speak-no-evil_monkey:',
':speaker_high_volume:',
':speaker_low_volume:',
':speaker_medium_volume:',
':speaking_head:',
':speech_balloon:',
':speedboat:',
':spider:',
':spider_web:',
':spiral_calendar:',
':spiral_notepad:',
':spiral_shell:',
':spoon:',
':sport_utility_vehicle:',
':sports_medal:',
':spouting_whale:',
':squid:',
':stadium:',
':star_and_crescent:',
':star_of_David:',
':station:',
':steaming_bowl:',
':stop_button:',
':stop_sign:',
':stopwatch:',
':straight_ruler:',
':strawberry:',
':studio_microphone:',
':stuffed_flatbread:',
':sun:',
':sun_behind_cloud:',
':sun_behind_large_cloud:',
':sun_behind_rain_cloud:',
':sun_behind_small_cloud:',
':sun_with_face:',
':sunflower:',
':sunglasses:',
':sunrise:',
':sunrise_over_mountains:',
':sunset:',
':sushi:',
':suspension_railway:',
':sweat_droplets:',
':synagogue:',
':syringe:',
':t-shirt:',
':taco:',
':tanabata_tree:',
':tangerine:',
':taxi:',
':teacup_without_handle:',
':tear-off_calendar:',
':telephone:',
':telephone_receiver:',
':telescope:',
':television:',
':ten-thirty:',
':ten_o’clock:',
':tennis:',
':tent:',
':thermometer:',
':thinking_face:',
':thought_balloon:',
':three-thirty:',
':three_o’clock:',
':thumbs_down:',
':thumbs_down_dark_skin_tone:',
':thumbs_down_light_skin_tone:',
':thumbs_down_medium-dark_skin_tone:',
':thumbs_down_medium-light_skin_tone:',
':thumbs_down_medium_skin_tone:',
':thumbs_up:',
':thumbs_up_dark_skin_tone:',
':thumbs_up_light_skin_tone:',
':thumbs_up_medium-dark_skin_tone:',
':thumbs_up_medium-light_skin_tone:',
':thumbs_up_medium_skin_tone:',
':ticket:',
':tiger:',
':tiger_face:',
':timer_clock:',
':tired_face:',
':toilet:',
':tomato:',
':tongue:',
':top_hat:',
':tornado:',
':trackball:',
':tractor:',
':trade_mark:',
':train:',
':tram:',
':tram_car:',
':triangular_flag:',
':triangular_ruler:',
':trident_emblem:',
':trolleybus:',
':trophy:',
':tropical_drink:',
':tropical_fish:',
':trumpet:',
':tulip:',
':tumbler_glass:',
':turkey:',
':turtle:',
':twelve-thirty:',
':twelve_o’clock:',
':two-hump_camel:',
':two-thirty:',
':two_hearts:',
':two_men_holding_hands:',
':two_o’clock:',
':two_women_holding_hands:',
':umbrella:',
':umbrella_on_ground:',
':umbrella_with_rain_drops:',
':unamused_face:',
':unicorn_face:',
':unlocked:',
':up-down_arrow:',
':up-left_arrow:',
':up-right_arrow:',
':up_arrow:',
':up_button:',
':upside-down_face:',
':vertical_traffic_light:',
':vibration_mode:',
':victory_hand:',
':victory_hand_dark_skin_tone:',
':victory_hand_light_skin_tone:',
':victory_hand_medium-dark_skin_tone:',
':victory_hand_medium-light_skin_tone:',
':victory_hand_medium_skin_tone:',
':video_camera:',
':video_game:',
':videocassette:',
':violin:',
':volcano:',
':volleyball:',
':vulcan_salute:',
':vulcan_salute_dark_skin_tone:',
':vulcan_salute_light_skin_tone:',
':vulcan_salute_medium-dark_skin_tone:',
':vulcan_salute_medium-light_skin_tone:',
':vulcan_salute_medium_skin_tone:',
':waning_crescent_moon:',
':waning_gibbous_moon:',
':warning:',
':wastebasket:',
':watch:',
':water_buffalo:',
':water_closet:',
':water_wave:',
':watermelon:',
':waving_hand:',
':waving_hand_dark_skin_tone:',
':waving_hand_light_skin_tone:',
':waving_hand_medium-dark_skin_tone:',
':waving_hand_medium-light_skin_tone:',
':waving_hand_medium_skin_tone:',
':wavy_dash:',
':waxing_crescent_moon:',
':waxing_gibbous_moon:',
':weary_cat_face:',
':weary_face:',
':wedding:',
':whale:',
':wheel_of_dharma:',
':wheelchair_symbol:',
':white_circle:',
':white_exclamation_mark:',
':white_flag:',
':white_flower:',
':white_heavy_check_mark:',
':white_large_square:',
':white_medium-small_square:',
':white_medium_square:',
':white_medium_star:',
':white_question_mark:',
':white_small_square:',
':white_square_button:',
':wilted_flower:',
':wind_chime:',
':wind_face:',
':wine_glass:',
':winking_face:',
':wolf_face:',
':woman:',
':woman_artist:',
':woman_artist_dark_skin_tone:',
':woman_artist_light_skin_tone:',
':woman_artist_medium-dark_skin_tone:',
':woman_artist_medium-light_skin_tone:',
':woman_artist_medium_skin_tone:',
':woman_astronaut:',
':woman_astronaut_dark_skin_tone:',
':woman_astronaut_light_skin_tone:',
':woman_astronaut_medium-dark_skin_tone:',
':woman_astronaut_medium-light_skin_tone:',
':woman_astronaut_medium_skin_tone:',
':woman_biking:',
':woman_biking_dark_skin_tone:',
':woman_biking_light_skin_tone:',
':woman_biking_medium-dark_skin_tone:',
':woman_biking_medium-light_skin_tone:',
':woman_biking_medium_skin_tone:',
':woman_bouncing_ball:',
':woman_bouncing_ball_dark_skin_tone:',
':woman_bouncing_ball_light_skin_tone:',
':woman_bouncing_ball_medium-dark_skin_tone:',
':woman_bouncing_ball_medium-light_skin_tone:',
':woman_bouncing_ball_medium_skin_tone:',
':woman_bowing:',
':woman_bowing_dark_skin_tone:',
':woman_bowing_light_skin_tone:',
':woman_bowing_medium-dark_skin_tone:',
':woman_bowing_medium-light_skin_tone:',
':woman_bowing_medium_skin_tone:',
':woman_cartwheeling:',
':woman_cartwheeling_dark_skin_tone:',
':woman_cartwheeling_light_skin_tone:',
':woman_cartwheeling_medium-dark_skin_tone:',
':woman_cartwheeling_medium-light_skin_tone:',
':woman_cartwheeling_medium_skin_tone:',
':woman_construction_worker:',
':woman_construction_worker_dark_skin_tone:',
':woman_construction_worker_light_skin_tone:',
':woman_construction_worker_medium-dark_skin_tone:',
':woman_construction_worker_medium-light_skin_tone:',
':woman_construction_worker_medium_skin_tone:',
':woman_cook:',
':woman_cook_dark_skin_tone:',
':woman_cook_light_skin_tone:',
':woman_cook_medium-dark_skin_tone:',
':woman_cook_medium-light_skin_tone:',
':woman_cook_medium_skin_tone:',
':woman_dancing:',
':woman_dancing_dark_skin_tone:',
':woman_dancing_light_skin_tone:',
':woman_dancing_medium-dark_skin_tone:',
':woman_dancing_medium-light_skin_tone:',
':woman_dancing_medium_skin_tone:',
':woman_dark_skin_tone:',
':woman_detective:',
':woman_detective_dark_skin_tone:',
':woman_detective_light_skin_tone:',
':woman_detective_medium-dark_skin_tone:',
':woman_detective_medium-light_skin_tone:',
':woman_detective_medium_skin_tone:',
':woman_facepalming:',
':woman_facepalming_dark_skin_tone:',
':woman_facepalming_light_skin_tone:',
':woman_facepalming_medium-dark_skin_tone:',
':woman_facepalming_medium-light_skin_tone:',
':woman_facepalming_medium_skin_tone:',
':woman_factory_worker:',
':woman_factory_worker_dark_skin_tone:',
':woman_factory_worker_light_skin_tone:',
':woman_factory_worker_medium-dark_skin_tone:',
':woman_factory_worker_medium-light_skin_tone:',
':woman_factory_worker_medium_skin_tone:',
':woman_farmer:',
':woman_farmer_dark_skin_tone:',
':woman_farmer_light_skin_tone:',
':woman_farmer_medium-dark_skin_tone:',
':woman_farmer_medium-light_skin_tone:',
':woman_farmer_medium_skin_tone:',
':woman_firefighter:',
':woman_firefighter_dark_skin_tone:',
':woman_firefighter_light_skin_tone:',
':woman_firefighter_medium-dark_skin_tone:',
':woman_firefighter_medium-light_skin_tone:',
':woman_firefighter_medium_skin_tone:',
':woman_frowning:',
':woman_frowning_dark_skin_tone:',
':woman_frowning_light_skin_tone:',
':woman_frowning_medium-dark_skin_tone:',
':woman_frowning_medium-light_skin_tone:',
':woman_frowning_medium_skin_tone:',
':woman_gesturing_NO:',
':woman_gesturing_NO_dark_skin_tone:',
':woman_gesturing_NO_light_skin_tone:',
':woman_gesturing_NO_medium-dark_skin_tone:',
':woman_gesturing_NO_medium-light_skin_tone:',
':woman_gesturing_NO_medium_skin_tone:',
':woman_gesturing_OK:',
':woman_gesturing_OK_dark_skin_tone:',
':woman_gesturing_OK_light_skin_tone:',
':woman_gesturing_OK_medium-dark_skin_tone:',
':woman_gesturing_OK_medium-light_skin_tone:',
':woman_gesturing_OK_medium_skin_tone:',
':woman_getting_haircut:',
':woman_getting_haircut_dark_skin_tone:',
':woman_getting_haircut_light_skin_tone:',
':woman_getting_haircut_medium-dark_skin_tone:',
':woman_getting_haircut_medium-light_skin_tone:',
':woman_getting_haircut_medium_skin_tone:',
':woman_getting_massage:',
':woman_getting_massage_dark_skin_tone:',
':woman_getting_massage_light_skin_tone:',
':woman_getting_massage_medium-dark_skin_tone:',
':woman_getting_massage_medium-light_skin_tone:',
':woman_getting_massage_medium_skin_tone:',
':woman_golfing:',
':woman_golfing_dark_skin_tone:',
':woman_golfing_light_skin_tone:',
':woman_golfing_medium-dark_skin_tone:',
':woman_golfing_medium-light_skin_tone:',
':woman_golfing_medium_skin_tone:',
':woman_guard:',
':woman_guard_dark_skin_tone:',
':woman_guard_light_skin_tone:',
':woman_guard_medium-dark_skin_tone:',
':woman_guard_medium-light_skin_tone:',
':woman_guard_medium_skin_tone:',
':woman_health_worker:',
':woman_health_worker_dark_skin_tone:',
':woman_health_worker_light_skin_tone:',
':woman_health_worker_medium-dark_skin_tone:',
':woman_health_worker_medium-light_skin_tone:',
':woman_health_worker_medium_skin_tone:',
':woman_judge:',
':woman_judge_dark_skin_tone:',
':woman_judge_light_skin_tone:',
':woman_judge_medium-dark_skin_tone:',
':woman_judge_medium-light_skin_tone:',
':woman_judge_medium_skin_tone:',
':woman_juggling:',
':woman_juggling_dark_skin_tone:',
':woman_juggling_light_skin_tone:',
':woman_juggling_medium-dark_skin_tone:',
':woman_juggling_medium-light_skin_tone:',
':woman_juggling_medium_skin_tone:',
':woman_lifting_weights:',
':woman_lifting_weights_dark_skin_tone:',
':woman_lifting_weights_light_skin_tone:',
':woman_lifting_weights_medium-dark_skin_tone:',
':woman_lifting_weights_medium-light_skin_tone:',
':woman_lifting_weights_medium_skin_tone:',
':woman_light_skin_tone:',
':woman_mechanic:',
':woman_mechanic_dark_skin_tone:',
':woman_mechanic_light_skin_tone:',
':woman_mechanic_medium-dark_skin_tone:',
':woman_mechanic_medium-light_skin_tone:',
':woman_mechanic_medium_skin_tone:',
':woman_medium-dark_skin_tone:',
':woman_medium-light_skin_tone:',
':woman_medium_skin_tone:',
':woman_mountain_biking:',
':woman_mountain_biking_dark_skin_tone:',
':woman_mountain_biking_light_skin_tone:',
':woman_mountain_biking_medium-dark_skin_tone:',
':woman_mountain_biking_medium-light_skin_tone:',
':woman_mountain_biking_medium_skin_tone:',
':woman_office_worker:',
':woman_office_worker_dark_skin_tone:',
':woman_office_worker_light_skin_tone:',
':woman_office_worker_medium-dark_skin_tone:',
':woman_office_worker_medium-light_skin_tone:',
':woman_office_worker_medium_skin_tone:',
':woman_pilot:',
':woman_pilot_dark_skin_tone:',
':woman_pilot_light_skin_tone:',
':woman_pilot_medium-dark_skin_tone:',
':woman_pilot_medium-light_skin_tone:',
':woman_pilot_medium_skin_tone:',
':woman_playing_handball:',
':woman_playing_handball_dark_skin_tone:',
':woman_playing_handball_light_skin_tone:',
':woman_playing_handball_medium-dark_skin_tone:',
':woman_playing_handball_medium-light_skin_tone:',
':woman_playing_handball_medium_skin_tone:',
':woman_playing_water_polo:',
':woman_playing_water_polo_dark_skin_tone:',
':woman_playing_water_polo_light_skin_tone:',
':woman_playing_water_polo_medium-dark_skin_tone:',
':woman_playing_water_polo_medium-light_skin_tone:',
':woman_playing_water_polo_medium_skin_tone:',
':woman_police_officer:',
':woman_police_officer_dark_skin_tone:',
':woman_police_officer_light_skin_tone:',
':woman_police_officer_medium-dark_skin_tone:',
':woman_police_officer_medium-light_skin_tone:',
':woman_police_officer_medium_skin_tone:',
':woman_pouting:',
':woman_pouting_dark_skin_tone:',
':woman_pouting_light_skin_tone:',
':woman_pouting_medium-dark_skin_tone:',
':woman_pouting_medium-light_skin_tone:',
':woman_pouting_medium_skin_tone:',
':woman_raising_hand:',
':woman_raising_hand_dark_skin_tone:',
':woman_raising_hand_light_skin_tone:',
':woman_raising_hand_medium-dark_skin_tone:',
':woman_raising_hand_medium-light_skin_tone:',
':woman_raising_hand_medium_skin_tone:',
':woman_rowing_boat:',
':woman_rowing_boat_dark_skin_tone:',
':woman_rowing_boat_light_skin_tone:',
':woman_rowing_boat_medium-dark_skin_tone:',
':woman_rowing_boat_medium-light_skin_tone:',
':woman_rowing_boat_medium_skin_tone:',
':woman_running:',
':woman_running_dark_skin_tone:',
':woman_running_light_skin_tone:',
':woman_running_medium-dark_skin_tone:',
':woman_running_medium-light_skin_tone:',
':woman_running_medium_skin_tone:',
':woman_scientist:',
':woman_scientist_dark_skin_tone:',
':woman_scientist_light_skin_tone:',
':woman_scientist_medium-dark_skin_tone:',
':woman_scientist_medium-light_skin_tone:',
':woman_scientist_medium_skin_tone:',
':woman_shrugging:',
':woman_shrugging_dark_skin_tone:',
':woman_shrugging_light_skin_tone:',
':woman_shrugging_medium-dark_skin_tone:',
':woman_shrugging_medium-light_skin_tone:',
':woman_shrugging_medium_skin_tone:',
':woman_singer:',
':woman_singer_dark_skin_tone:',
':woman_singer_light_skin_tone:',
':woman_singer_medium-dark_skin_tone:',
':woman_singer_medium-light_skin_tone:',
':woman_singer_medium_skin_tone:',
':woman_student:',
':woman_student_dark_skin_tone:',
':woman_student_light_skin_tone:',
':woman_student_medium-dark_skin_tone:',
':woman_student_medium-light_skin_tone:',
':woman_student_medium_skin_tone:',
':woman_surfing:',
':woman_surfing_dark_skin_tone:',
':woman_surfing_light_skin_tone:',
':woman_surfing_medium-dark_skin_tone:',
':woman_surfing_medium-light_skin_tone:',
':woman_surfing_medium_skin_tone:',
':woman_swimming:',
':woman_swimming_dark_skin_tone:',
':woman_swimming_light_skin_tone:',
':woman_swimming_medium-dark_skin_tone:',
':woman_swimming_medium-light_skin_tone:',
':woman_swimming_medium_skin_tone:',
':woman_teacher:',
':woman_teacher_dark_skin_tone:',
':woman_teacher_light_skin_tone:',
':woman_teacher_medium-dark_skin_tone:',
':woman_teacher_medium-light_skin_tone:',
':woman_teacher_medium_skin_tone:',
':woman_technologist:',
':woman_technologist_dark_skin_tone:',
':woman_technologist_light_skin_tone:',
':woman_technologist_medium-dark_skin_tone:',
':woman_technologist_medium-light_skin_tone:',
':woman_technologist_medium_skin_tone:',
':woman_tipping_hand:',
':woman_tipping_hand_dark_skin_tone:',
':woman_tipping_hand_light_skin_tone:',
':woman_tipping_hand_medium-dark_skin_tone:',
':woman_tipping_hand_medium-light_skin_tone:',
':woman_tipping_hand_medium_skin_tone:',
':woman_walking:',
':woman_walking_dark_skin_tone:',
':woman_walking_light_skin_tone:',
':woman_walking_medium-dark_skin_tone:',
':woman_walking_medium-light_skin_tone:',
':woman_walking_medium_skin_tone:',
':woman_wearing_turban:',
':woman_wearing_turban_dark_skin_tone:',
':woman_wearing_turban_light_skin_tone:',
':woman_wearing_turban_medium-dark_skin_tone:',
':woman_wearing_turban_medium-light_skin_tone:',
':woman_wearing_turban_medium_skin_tone:',
':woman’s_boot:',
':woman’s_clothes:',
':woman’s_hat:',
':woman’s_sandal:',
':women_with_bunny_ears_partying:',
':women_wrestling:',
':women’s_room:',
':world_map:',
':worried_face:',
':wrapped_gift:',
':wrench:',
':writing_hand:',
':writing_hand_dark_skin_tone:',
':writing_hand_light_skin_tone:',
':writing_hand_medium-dark_skin_tone:',
':writing_hand_medium-light_skin_tone:',
':writing_hand_medium_skin_tone:',
':yellow_heart:',
':yen_banknote:',
':yin_yang:',
':zipper-mouth_face:',
':zzz:',
':admission_tickets:',
':aerial_tramway:',
':airplane:',
':airplane_arriving:',
':airplane_departure:',
':alarm_clock:',
':alembic:',
':space_invader:',
':ambulance:',
':football:',
':amphora:',
':anchor:',
':anger:',
':angry:',
':anguished:',
':ant:',
':signal_strength:',
':arrows_counterclockwise:',
':aquarius:',
':aries:',
':arrow_heading_down:',
':arrow_heading_up:',
':articulated_lorry:',
':art:',
':astonished:',
':athletic_shoe:',
':atom_symbol:',
':eggplant:',
':atm:',
':car:',
':red_car:',
':baby:',
':angel:',
':baby_bottle:',
':baby_chick:',
':baby_symbol:',
':back:',
':camel:',
':badminton_racquet_and_shuttlecock:',
':baggage_claim:',
':balloon:',
':ballot_box_with_ballot:',
':ballot_box_with_check:',
':banana:',
':bank:',
':dollar:',
':euro:',
':pound:',
':yen:',
':bar_chart:',
':barber:',
':baseball:',
':basketball:',
':bath:',
':bathtub:',
':battery:',
':beach_with_umbrella:',
':bear:',
':heartbeat:',
':bed:',
':beer:',
':bell:',
':no_bell:',
':bellhop_bell:',
':bento:',
':bike:',
':bicyclist:',
':bikini:',
':8ball:',
':biohazard_sign:',
':bird:',
':birthday:',
':black_circle_for_record:',
':clubs:',
':diamonds:',
':arrow_double_down:',
':hearts:',
':black_large_square:',
':rewind:',
':black_left__pointing_double_triangle_with_vertical_bar:',
':arrow_backward:',
':black_medium_small_square:',
':black_medium_square:',
':black_nib:',
':question:',
':fast_forward:',
':black_right__pointing_double_triangle_with_vertical_bar:',
':arrow_forward:',
':black_right__pointing_triangle_with_double_vertical_bar:',
':arrow_right:',
':scissors:',
':black_small_square:',
':spades:',
':black_square_button:',
':black_square_for_stop:',
':sunny:',
':phone:',
':telephone:',
':recycle:',
':arrow_double_up:',
':blossom:',
':blowfish:',
':blue_book:',
':blue_heart:',
':boar:',
':bomb:',
':bookmark:',
':bookmark_tabs:',
':books:',
':bottle_with_popping_cork:',
':bouquet:',
':bow_and_arrow:',
':bowling:',
':boy:',
':bread:',
':bride_with_veil:',
':bridge_at_night:',
':briefcase:',
':broken_heart:',
':bug:',
':building_construction:',
':burrito:',
':bus:',
':busstop:',
':bust_in_silhouette:',
':busts_in_silhouette:',
':cactus:',
':date:',
':camera:',
':camera_with_flash:',
':camping:',
':cancer:',
':candle:',
':candy:',
':capricorn:',
':card_file_box:',
':card_index:',
':card_index_dividers:',
':carousel_horse:',
':flags:',
':cat2:',
':cat:',
':joy_cat:',
':smirk_cat:',
':chains:',
':chart_with_downwards_trend:',
':chart_with_upwards_trend:',
':chart:',
':mega:',
':cheese_wedge:',
':checkered_flag:',
':cherries:',
':cherry_blossom:',
':chestnut:',
':chicken:',
':children_crossing:',
':chipmunk:',
':chocolate_bar:',
':christmas_tree:',
':church:',
':cinema:',
':accept:',
':ideograph_advantage:',
':congratulations:',
':secret:',
':m:',
':circus_tent:',
':cityscape:',
':city_sunset:',
':clapper:',
':clap:',
':classical_building:',
':beers:',
':clipboard:',
':clock830:',
':clock8:',
':clock1130:',
':clock11:',
':clock530:',
':clock5:',
':clock430:',
':clock4:',
':clock930:',
':clock9:',
':clock130:',
':clock1:',
':clock730:',
':clock7:',
':clock630:',
':clock6:',
':clock1030:',
':clock10:',
':clock330:',
':clock3:',
':clock1230:',
':clock12:',
':clock230:',
':clock2:',
':arrows_clockwise:',
':repeat:',
':repeat_one:',
':closed_book:',
':closed_lock_with_key:',
':mailbox_closed:',
':mailbox:',
':closed_umbrella:',
':cloud:',
':cloud_with_lightning:',
':cloud_with_rain:',
':cloud_with_snow:',
':cloud_with_tornado:',
':cocktail:',
':coffin:',
':boom:',
':collision:',
':comet:',
':compression:',
':confetti_ball:',
':confounded:',
':confused:',
':construction:',
':construction_worker:',
':control_knobs:',
':convenience_store:',
':rice:',
':cookie:',
':egg:',
':copyright:',
':couch_and_lamp:',
':couple_with_heart:',
':cow2:',
':cow:',
':crab:',
':credit_card:',
':crescent_moon:',
':cricket_bat_and_ball:',
':crocodile:',
':x:',
':crossed_flags:',
':crossed_swords:',
':crown:',
':crying_cat_face:',
':cry:',
':crystal_ball:',
':curly_loop:',
':currency_exchange:',
':curry:',
':custard:',
':customs:',
':cyclone:',
':dagger_knife:',
':dancer:',
':dango:',
':dark_sunglasses:',
':dash:',
':deciduous_tree:',
':truck:',
':department_store:',
':derelict_house_building:',
':desert:',
':desert_island:',
':desktop_computer:',
':diamond_shape_with_a_dot_inside:',
':dart:',
':disappointed_relieved:',
':disappointed:',
':dizzy_face:',
':dizzy:',
':do_not_litter:',
':dog2:',
':dog:',
':dolphin:',
':flipper:',
':door:',
':loop:',
':bangbang:',
':double_vertical_bar:',
':doughnut:',
':dove_of_peace:',
':small_red_triangle_down:',
':arrow_down_small:',
':arrow_down:',
':dragon:',
':dragon_face:',
':dress:',
':dromedary_camel:',
':droplet:',
':dvd:',
':e__mail:',
':ear:',
':corn:',
':ear_of_rice:',
':earth_americas:',
':earth_asia:',
':earth_africa:',
':eight_pointed_black_star:',
':eight_spoked_asterisk:',
':eject_symbol:',
':bulb:',
':electric_plug:',
':flashlight:',
':elephant:',
':emoji_modifier_fitzpatrick_type__1__2:',
':emoji_modifier_fitzpatrick_type__3:',
':emoji_modifier_fitzpatrick_type__4:',
':emoji_modifier_fitzpatrick_type__5:',
':emoji_modifier_fitzpatrick_type__6:',
':end:',
':email:',
':envelope:',
':envelope_with_arrow:',
':european_castle:',
':european_post_office:',
':evergreen_tree:',
':interrobang:',
':expressionless:',
':alien:',
':eye:',
':eyeglasses:',
':eyes:',
':massage:',
':yum:',
':scream:',
':kissing_heart:',
':sweat:',
':face_with_head__bandage:',
':triumph:',
':mask:',
':no_good:',
':ok_woman:',
':open_mouth:',
':cold_sweat:',
':face_with_rolling_eyes:',
':stuck_out_tongue:',
':stuck_out_tongue_closed_eyes:',
':stuck_out_tongue_winking_eye:',
':joy:',
':face_with_thermometer:',
':no_mouth:',
':factory:',
':fallen_leaf:',
':family:',
':santa:',
':fax:',
':fearful:',
':ferris_wheel:',
':ferry:',
':field_hockey_stick_and_ball:',
':file_cabinet:',
':file_folder:',
':film_frames:',
':film_projector:',
':fire:',
':fire_engine:',
':sparkler:',
':fireworks:',
':first_quarter_moon:',
':first_quarter_moon_with_face:',
':fish:',
':fish_cake:',
':fishing_pole_and_fish:',
':facepunch:',
':punch:',
':flag_for_Afghanistan:',
':flag_for_Albania:',
':flag_for_Algeria:',
':flag_for_American_Samoa:',
':flag_for_Andorra:',
':flag_for_Angola:',
':flag_for_Anguilla:',
':flag_for_Antarctica:',
':flag_for_Antigua_&_Barbuda:',
':flag_for_Argentina:',
':flag_for_Armenia:',
':flag_for_Aruba:',
':flag_for_Ascension_Island:',
':flag_for_Australia:',
':flag_for_Austria:',
':flag_for_Azerbaijan:',
':flag_for_Bahamas:',
':flag_for_Bahrain:',
':flag_for_Bangladesh:',
':flag_for_Barbados:',
':flag_for_Belarus:',
':flag_for_Belgium:',
':flag_for_Belize:',
':flag_for_Benin:',
':flag_for_Bermuda:',
':flag_for_Bhutan:',
':flag_for_Bolivia:',
':flag_for_Bosnia_&_Herzegovina:',
':flag_for_Botswana:',
':flag_for_Bouvet_Island:',
':flag_for_Brazil:',
':flag_for_British_Indian_Ocean_Territory:',
':flag_for_British_Virgin_Islands:',
':flag_for_Brunei:',
':flag_for_Bulgaria:',
':flag_for_Burkina_Faso:',
':flag_for_Burundi:',
':flag_for_Cambodia:',
':flag_for_Cameroon:',
':flag_for_Canada:',
':flag_for_Canary_Islands:',
':flag_for_Cape_Verde:',
':flag_for_Caribbean_Netherlands:',
':flag_for_Cayman_Islands:',
':flag_for_Central_African_Republic:',
':flag_for_Ceuta_&_Melilla:',
':flag_for_Chad:',
':flag_for_Chile:',
':flag_for_China:',
':flag_for_Christmas_Island:',
':flag_for_Clipperton_Island:',
':flag_for_Cocos__Islands:',
':flag_for_Colombia:',
':flag_for_Comoros:',
':flag_for_Congo____Brazzaville:',
':flag_for_Congo____Kinshasa:',
':flag_for_Cook_Islands:',
':flag_for_Costa_Rica:',
':flag_for_Croatia:',
':flag_for_Cuba:',
':flag_for_Curaçao:',
':flag_for_Cyprus:',
':flag_for_Czech_Republic:',
':flag_for_Côte_d’Ivoire:',
':flag_for_Denmark:',
':flag_for_Diego_Garcia:',
':flag_for_Djibouti:',
':flag_for_Dominica:',
':flag_for_Dominican_Republic:',
':flag_for_Ecuador:',
':flag_for_Egypt:',
':flag_for_El_Salvador:',
':flag_for_Equatorial_Guinea:',
':flag_for_Eritrea:',
':flag_for_Estonia:',
':flag_for_Ethiopia:',
':flag_for_European_Union:',
':flag_for_Falkland_Islands:',
':flag_for_Faroe_Islands:',
':flag_for_Fiji:',
':flag_for_Finland:',
':flag_for_France:',
':flag_for_French_Guiana:',
':flag_for_French_Polynesia:',
':flag_for_French_Southern_Territories:',
':flag_for_Gabon:',
':flag_for_Gambia:',
':flag_for_Georgia:',
':flag_for_Germany:',
':flag_for_Ghana:',
':flag_for_Gibraltar:',
':flag_for_Greece:',
':flag_for_Greenland:',
':flag_for_Grenada:',
':flag_for_Guadeloupe:',
':flag_for_Guam:',
':flag_for_Guatemala:',
':flag_for_Guernsey:',
':flag_for_Guinea:',
':flag_for_Guinea__Bissau:',
':flag_for_Guyana:',
':flag_for_Haiti:',
':flag_for_Heard_&_McDonald_Islands:',
':flag_for_Honduras:',
':flag_for_Hong_Kong:',
':flag_for_Hungary:',
':flag_for_Iceland:',
':flag_for_India:',
':flag_for_Indonesia:',
':flag_for_Iran:',
':flag_for_Iraq:',
':flag_for_Ireland:',
':flag_for_Isle_of_Man:',
':flag_for_Israel:',
':flag_for_Italy:',
':flag_for_Jamaica:',
':flag_for_Japan:',
':flag_for_Jersey:',
':flag_for_Jordan:',
':flag_for_Kazakhstan:',
':flag_for_Kenya:',
':flag_for_Kiribati:',
':flag_for_Kosovo:',
':flag_for_Kuwait:',
':flag_for_Kyrgyzstan:',
':flag_for_Laos:',
':flag_for_Latvia:',
':flag_for_Lebanon:',
':flag_for_Lesotho:',
':flag_for_Liberia:',
':flag_for_Libya:',
':flag_for_Liechtenstein:',
':flag_for_Lithuania:',
':flag_for_Luxembourg:',
':flag_for_Macau:',
':flag_for_Macedonia:',
':flag_for_Madagascar:',
':flag_for_Malawi:',
':flag_for_Malaysia:',
':flag_for_Maldives:',
':flag_for_Mali:',
':flag_for_Malta:',
':flag_for_Marshall_Islands:',
':flag_for_Martinique:',
':flag_for_Mauritania:',
':flag_for_Mauritius:',
':flag_for_Mayotte:',
':flag_for_Mexico:',
':flag_for_Micronesia:',
':flag_for_Moldova:',
':flag_for_Monaco:',
':flag_for_Mongolia:',
':flag_for_Montenegro:',
':flag_for_Montserrat:',
':flag_for_Morocco:',
':flag_for_Mozambique:',
':flag_for_Myanmar:',
':flag_for_Namibia:',
':flag_for_Nauru:',
':flag_for_Nepal:',
':flag_for_Netherlands:',
':flag_for_New_Caledonia:',
':flag_for_New_Zealand:',
':flag_for_Nicaragua:',
':flag_for_Niger:',
':flag_for_Nigeria:',
':flag_for_Niue:',
':flag_for_Norfolk_Island:',
':flag_for_North_Korea:',
':flag_for_Northern_Mariana_Islands:',
':flag_for_Norway:',
':flag_for_Oman:',
':flag_for_Pakistan:',
':flag_for_Palau:',
':flag_for_Palestinian_Territories:',
':flag_for_Panama:',
':flag_for_Papua_New_Guinea:',
':flag_for_Paraguay:',
':flag_for_Peru:',
':flag_for_Philippines:',
':flag_for_Pitcairn_Islands:',
':flag_for_Poland:',
':flag_for_Portugal:',
':flag_for_Puerto_Rico:',
':flag_for_Qatar:',
':flag_for_Romania:',
':flag_for_Russia:',
':flag_for_Rwanda:',
':flag_for_Réunion:',
':flag_for_Samoa:',
':flag_for_San_Marino:',
':flag_for_Saudi_Arabia:',
':flag_for_Senegal:',
':flag_for_Serbia:',
':flag_for_Seychelles:',
':flag_for_Sierra_Leone:',
':flag_for_Singapore:',
':flag_for_Sint_Maarten:',
':flag_for_Slovakia:',
':flag_for_Slovenia:',
':flag_for_Solomon_Islands:',
':flag_for_Somalia:',
':flag_for_South_Africa:',
':flag_for_South_Georgia_&_South_Sandwich_Islands:',
':flag_for_South_Korea:',
':flag_for_South_Sudan:',
':flag_for_Spain:',
':flag_for_Sri_Lanka:',
':flag_for_St._Barthélemy:',
':flag_for_St._Helena:',
':flag_for_St._Kitts_&_Nevis:',
':flag_for_St._Lucia:',
':flag_for_St._Martin:',
':flag_for_St._Pierre_&_Miquelon:',
':flag_for_St._Vincent_&_Grenadines:',
':flag_for_Sudan:',
':flag_for_Suriname:',
':flag_for_Svalbard_&_Jan_Mayen:',
':flag_for_Swaziland:',
':flag_for_Sweden:',
':flag_for_Switzerland:',
':flag_for_Syria:',
':flag_for_São_Tomé_&_Príncipe:',
':flag_for_Taiwan:',
':flag_for_Tajikistan:',
':flag_for_Tanzania:',
':flag_for_Thailand:',
':flag_for_Timor__Leste:',
':flag_for_Togo:',
':flag_for_Tokelau:',
':flag_for_Tonga:',
':flag_for_Trinidad_&_Tobago:',
':flag_for_Tristan_da_Cunha:',
':flag_for_Tunisia:',
':flag_for_Turkey:',
':flag_for_Turkmenistan:',
':flag_for_Turks_&_Caicos_Islands:',
':flag_for_Tuvalu:',
':flag_for_U.S._Outlying_Islands:',
':flag_for_U.S._Virgin_Islands:',
':flag_for_Uganda:',
':flag_for_Ukraine:',
':flag_for_United_Arab_Emirates:',
':flag_for_United_Kingdom:',
':flag_for_United_States:',
':flag_for_Uruguay:',
':flag_for_Uzbekistan:',
':flag_for_Vanuatu:',
':flag_for_Vatican_City:',
':flag_for_Venezuela:',
':flag_for_Vietnam:',
':flag_for_Wallis_&_Futuna:',
':flag_for_Western_Sahara:',
':flag_for_Yemen:',
':flag_for_Zambia:',
':flag_for_Zimbabwe:',
':flag_for_Åland_Islands:',
':golf:',
':fleur__de__lis:',
':muscle:',
':floppy_disk:',
':flower_playing_cards:',
':flushed:',
':fog:',
':foggy:',
':footprints:',
':fork_and_knife:',
':fork_and_knife_with_plate:',
':fountain:',
':four_leaf_clover:',
':frame_with_picture:',
':fries:',
':fried_shrimp:',
':frog:',
':hatched_chick:',
':frowning:',
':fuelpump:',
':full_moon:',
':full_moon_with_face:',
':funeral_urn:',
':game_die:',
':gear:',
':gem:',
':gemini:',
':ghost:',
':girl:',
':globe_with_meridians:',
':star2:',
':goat:',
':golfer:',
':mortar_board:',
':grapes:',
':green_apple:',
':green_book:',
':green_heart:',
':grimacing:',
':smile_cat:',
':grinning:',
':grin:',
':heartpulse:',
':guardsman:',
':guitar:',
':haircut:',
':hamburger:',
':hammer:',
':hammer_and_pick:',
':hammer_and_wrench:',
':hamster:',
':handbag:',
':raising_hand:',
':hatching_chick:',
':headphones:',
':hear_no_evil:',
':heart_decoration:',
':cupid:',
':gift_heart:',
':heart:',
':heavy_check_mark:',
':heavy_division_sign:',
':heavy_dollar_sign:',
':exclamation:',
':heavy_exclamation_mark:',
':heavy_heart_exclamation_mark_ornament:',
':o:',
':heavy_minus_sign:',
':heavy_multiplication_x:',
':heavy_plus_sign:',
':helicopter:',
':helm_symbol:',
':helmet_with_white_cross:',
':herb:',
':hibiscus:',
':high_heel:',
':bullettrain_side:',
':bullettrain_front:',
':high_brightness:',
':zap:',
':hocho:',
':knife:',
':hole:',
':honey_pot:',
':bee:',
':traffic_light:',
':racehorse:',
':horse:',
':horse_racing:',
':hospital:',
':coffee:',
':hot_dog:',
':hot_pepper:',
':hotsprings:',
':hotel:',
':hourglass:',
':hourglass_flowing_sand:',
':house:',
':house_buildings:',
':house_with_garden:',
':hugging_face:',
':100:',
':hushed:',
':ice_cream:',
':ice_hockey_stick_and_puck:',
':ice_skate:',
':imp:',
':inbox_tray:',
':incoming_envelope:',
':information_desk_person:',
':information_source:',
':capital_abcd:',
':abc:',
':abcd:',
':1234:',
':symbols:',
':izakaya_lantern:',
':lantern:',
':jack_o_lantern:',
':japanese_castle:',
':dolls:',
':japanese_goblin:',
':japanese_ogre:',
':post_office:',
':beginner:',
':jeans:',
':joystick:',
':kaaba:',
':key:',
':keyboard:',
':keycap_asterisk:',
':keycap_digit_eight:',
':keycap_digit_five:',
':keycap_digit_four:',
':keycap_digit_nine:',
':keycap_digit_one:',
':keycap_digit_seven:',
':keycap_digit_six:',
':keycap_digit_three:',
':keycap_digit_two:',
':keycap_digit_zero:',
':keycap_number_sign:',
':keycap_ten:',
':kimono:',
':couplekiss:',
':kiss:',
':kissing_cat:',
':kissing:',
':kissing_closed_eyes:',
':kissing_smiling_eyes:',
':koala:',
':label:',
':beetle:',
':large_blue_circle:',
':large_blue_diamond:',
':large_orange_diamond:',
':red_circle:',
':last_quarter_moon:',
':last_quarter_moon_with_face:',
':latin_cross:',
':leaves:',
':ledger:',
':mag:',
':left_luggage:',
':left_right_arrow:',
':leftwards_arrow_with_hook:',
':arrow_left:',
':lemon:',
':leo:',
':leopard:',
':level_slider:',
':libra:',
':light_rail:',
':link:',
':linked_paperclips:',
':lion_face:',
':lipstick:',
':lock:',
':lock_with_ink_pen:',
':lollipop:',
':sob:',
':love_hotel:',
':love_letter:',
':low_brightness:',
':lower_left_ballpoint_pen:',
':lower_left_crayon:',
':lower_left_fountain_pen:',
':lower_left_paintbrush:',
':mahjong:',
':man:',
':couple:',
':man_in_business_suit_levitating:',
':man_with_gua_pi_mao:',
':man_with_turban:',
':mans_shoe:',
':shoe:',
':mantelpiece_clock:',
':maple_leaf:',
':meat_on_bone:',
':black_circle:',
':white_circle:',
':melon:',
':memo:',
':pencil:',
':menorah_with_nine_branches:',
':mens:',
':metro:',
':microphone:',
':microscope:',
':military_medal:',
':milky_way:',
':minibus:',
':minidisc:',
':iphone:',
':mobile_phone_off:',
':calling:',
':money__mouth_face:',
':moneybag:',
':money_with_wings:',
':monkey:',
':monkey_face:',
':monorail:',
':rice_scene:',
':mosque:',
':motor_boat:',
':motorway:',
':mount_fuji:',
':mountain:',
':mountain_bicyclist:',
':mountain_cableway:',
':mountain_railway:',
':mouse2:',
':mouse:',
':lips:',
':movie_camera:',
':moyai:',
':notes:',
':mushroom:',
':musical_keyboard:',
':musical_note:',
':musical_score:',
':nail_care:',
':name_badge:',
':national_park:',
':necktie:',
':ab:',
':negative_squared_cross_mark:',
':a:',
':b:',
':o2:',
':parking:',
':nerd_face:',
':neutral_face:',
':new_moon:',
':honeybee:',
':new_moon_with_face:',
':newspaper:',
':night_with_stars:',
':no_bicycles:',
':no_entry:',
':no_entry_sign:',
':no_mobile_phones:',
':underage:',
':no_pedestrians:',
':no_smoking:',
':non__potable_water:',
':arrow_upper_right:',
':arrow_upper_left:',
':nose:',
':notebook:',
':notebook_with_decorative_cover:',
':nut_and_bolt:',
':octopus:',
':oden:',
':office:',
':oil_drum:',
':ok_hand:',
':old_key:',
':older_man:',
':older_woman:',
':om_symbol:',
':on:',
':oncoming_automobile:',
':oncoming_bus:',
':oncoming_police_car:',
':oncoming_taxi:',
':book:',
':open_book:',
':open_file_folder:',
':open_hands:',
':unlock:',
':mailbox_with_no_mail:',
':mailbox_with_mail:',
':ophiuchus:',
':cd:',
':orange_book:',
':orthodox_cross:',
':outbox_tray:',
':ox:',
':package:',
':page_facing_up:',
':page_with_curl:',
':pager:',
':palm_tree:',
':panda_face:',
':paperclip:',
':part_alternation_mark:',
':tada:',
':passenger_ship:',
':passport_control:',
':feet:',
':paw_prints:',
':peace_symbol:',
':peach:',
':pear:',
':walking:',
':pencil2:',
':penguin:',
':pensive:',
':performing_arts:',
':persevere:',
':bow:',
':person_frowning:',
':raised_hands:',
':person_with_ball:',
':person_with_blond_hair:',
':pray:',
':person_with_pouting_face:',
':computer:',
':pick:',
':pig2:',
':pig:',
':pig_nose:',
':hankey:',
':poop:',
':shit:',
':pill:',
':bamboo:',
':pineapple:',
':pisces:',
':gun:',
':place_of_worship:',
':black_joker:',
':police_car:',
':rotating_light:',
':cop:',
':poodle:',
':popcorn:',
':postal_horn:',
':postbox:',
':stew:',
':potable_water:',
':pouch:',
':poultry_leg:',
':pouting_cat:',
':rage:',
':prayer_beads:',
':princess:',
':printer:',
':loudspeaker:',
':purple_heart:',
':purse:',
':pushpin:',
':put_litter_in_its_place:',
':rabbit2:',
':rabbit:',
':racing_car:',
':racing_motorcycle:',
':radio:',
':radio_button:',
':radioactive_sign:',
':railway_car:',
':railway_track:',
':rainbow:',
':fist:',
':hand:',
':raised_hand:',
':raised_hand_with_fingers_splayed:',
':raised_hand_with_part_between_middle_and_ring_fingers:',
':ram:',
':rat:',
':blue_car:',
':apple:',
':registered:',
':relieved:',
':reminder_ribbon:',
':restroom:',
':reversed_hand_with_middle_finger_extended:',
':revolving_hearts:',
':ribbon:',
':rice_ball:',
':rice_cracker:',
':mag_right:',
':right_anger_bubble:',
':arrow_right_hook:',
':ring:',
':sweet_potato:',
':robot_face:',
':rocket:',
':rolled__up_newspaper:',
':roller_coaster:',
':rooster:',
':rose:',
':rosette:',
':round_pushpin:',
':rowboat:',
':rugby_football:',
':runner:',
':running:',
':running_shirt_with_sash:',
':sagittarius:',
':boat:',
':sailboat:',
':sake:',
':satellite:',
':saxophone:',
':scales:',
':school:',
':school_satchel:',
':scorpion:',
':scorpius:',
':scroll:',
':seat:',
':see_no_evil:',
':seedling:',
':shamrock:',
':shaved_ice:',
':sheep:',
':shield:',
':shinto_shrine:',
':ship:',
':stars:',
':shopping_bags:',
':cake:',
':shower:',
':sign_of_the_horns:',
':japan:',
':six_pointed_star:',
':ski:',
':skier:',
':skull:',
':skull_and_crossbones:',
':sleeping_accommodation:',
':sleeping:',
':zzz:',
':sleepy:',
':sleuth_or_spy:',
':pizza:',
':slightly_frowning_face:',
':slightly_smiling_face:',
':slot_machine:',
':small_airplane:',
':small_blue_diamond:',
':small_orange_diamond:',
':heart_eyes_cat:',
':smiley_cat:',
':innocent:',
':heart_eyes:',
':smiling_imp:',
':smiley:',
':sweat_smile:',
':smile:',
':laughing:',
':satisfied:',
':blush:',
':sunglasses:',
':smirk:',
':smoking:',
':snail:',
':snake:',
':snow_capped_mountain:',
':snowboarder:',
':snowflake:',
':snowman:',
':soccer:',
':icecream:',
':soon:',
':arrow_lower_right:',
':arrow_lower_left:',
':spaghetti:',
':sparkle:',
':sparkles:',
':sparkling_heart:',
':speak_no_evil:',
':speaker:',
':mute:',
':sound:',
':loud_sound:',
':speaking_head_in_silhouette:',
':speech_balloon:',
':speedboat:',
':spider:',
':spider_web:',
':spiral_calendar_pad:',
':spiral_note_pad:',
':shell:',
':sweat_drops:',
':sports_medal:',
':whale:',
':u5272:',
':u5408:',
':u55b6:',
':u6307:',
':u6708:',
':u6709:',
':u6e80:',
':u7121:',
':u7533:',
':u7981:',
':u7a7a:',
':cl:',
':cool:',
':free:',
':id:',
':koko:',
':sa:',
':new:',
':ng:',
':ok:',
':sos:',
':up:',
':vs:',
':stadium:',
':star_and_crescent:',
':star_of_david:',
':station:',
':statue_of_liberty:',
':steam_locomotive:',
':ramen:',
':stopwatch:',
':straight_ruler:',
':strawberry:',
':studio_microphone:',
':partly_sunny:',
':sun_with_face:',
':sunflower:',
':sunrise:',
':sunrise_over_mountains:',
':city_sunrise:',
':surfer:',
':sushi:',
':suspension_railway:',
':swimmer:',
':synagogue:',
':syringe:',
':shirt:',
':tshirt:',
':table_tennis_paddle_and_ball:',
':taco:',
':tanabata_tree:',
':tangerine:',
':taurus:',
':taxi:',
':tea:',
':calendar:',
':telephone_receiver:',
':telescope:',
':tv:',
':tennis:',
':tent:',
':thermometer:',
':thinking_face:',
':thought_balloon:',
':three_button_mouse:',
':+1:',
':thumbsup:',
':__1:',
':thumbsdown:',
':thunder_cloud_and_rain:',
':ticket:',
':tiger2:',
':tiger:',
':timer_clock:',
':tired_face:',
':toilet:',
':tokyo_tower:',
':tomato:',
':tongue:',
':tophat:',
':top:',
':trackball:',
':tractor:',
':tm:',
':train2:',
':tram:',
':train:',
':triangular_flag_on_post:',
':triangular_ruler:',
':trident:',
':trolleybus:',
':trophy:',
':tropical_drink:',
':tropical_fish:',
':trumpet:',
':tulip:',
':turkey:',
':turtle:',
':twisted_rightwards_arrows:',
':two_hearts:',
':two_men_holding_hands:',
':two_women_holding_hands:',
':umbrella:',
':umbrella_on_ground:',
':unamused:',
':unicorn_face:',
':small_red_triangle:',
':arrow_up_small:',
':arrow_up_down:',
':upside__down_face:',
':arrow_up:',
':vertical_traffic_light:',
':vibration_mode:',
':v:',
':video_camera:',
':video_game:',
':vhs:',
':violin:',
':virgo:',
':volcano:',
':volleyball:',
':waning_crescent_moon:',
':waning_gibbous_moon:',
':warning:',
':wastebasket:',
':watch:',
':water_buffalo:',
':wc:',
':ocean:',
':watermelon:',
':waving_black_flag:',
':wave:',
':waving_white_flag:',
':wavy_dash:',
':waxing_crescent_moon:',
':moon:',
':waxing_gibbous_moon:',
':scream_cat:',
':weary:',
':wedding:',
':weight_lifter:',
':whale2:',
':wheel_of_dharma:',
':wheelchair:',
':point_down:',
':grey_exclamation:',
':white_flower:',
':white_frowning_face:',
':white_check_mark:',
':white_large_square:',
':point_left:',
':white_medium_small_square:',
':white_medium_square:',
':star:',
':grey_question:',
':point_right:',
':white_small_square:',
':relaxed:',
':white_square_button:',
':white_sun_behind_cloud:',
':white_sun_behind_cloud_with_rain:',
':white_sun_with_small_cloud:',
':point_up_2:',
':point_up:',
':wind_blowing_face:',
':wind_chime:',
':wine_glass:',
':wink:',
':wolf:',
':woman:',
':dancers:',
':boot:',
':womans_clothes:',
':womans_hat:',
':sandal:',
':womens:',
':world_map:',
':worried:',
':gift:',
':wrench:',
':writing_hand:',
':yellow_heart:',
':yin_yang:',
':zipper__mouth_face:',
]
| 27.460726 | 64 | 0.630646 | EMOJI_LIST = [
':1st_place_medal:',
':2nd_place_medal:',
':3rd_place_medal:',
':AB_button_(blood_type):',
':ATM_sign:',
':A_button_(blood_type):',
':Afghanistan:',
':Albania:',
':Algeria:',
':American_Samoa:',
':Andorra:',
':Angola:',
':Anguilla:',
':Antarctica:',
':Antigua_&_Barbuda:',
':Aquarius:',
':Argentina:',
':Aries:',
':Armenia:',
':Aruba:',
':Ascension_Island:',
':Australia:',
':Austria:',
':Azerbaijan:',
':BACK_arrow:',
':B_button_(blood_type):',
':Bahamas:',
':Bahrain:',
':Bangladesh:',
':Barbados:',
':Belarus:',
':Belgium:',
':Belize:',
':Benin:',
':Bermuda:',
':Bhutan:',
':Bolivia:',
':Bosnia_&_Herzegovina:',
':Botswana:',
':Bouvet_Island:',
':Brazil:',
':British_Indian_Ocean_Territory:',
':British_Virgin_Islands:',
':Brunei:',
':Bulgaria:',
':Burkina_Faso:',
':Burundi:',
':CL_button:',
':COOL_button:',
':Cambodia:',
':Cameroon:',
':Canada:',
':Canary_Islands:',
':Cancer:',
':Cape_Verde:',
':Capricorn:',
':Caribbean_Netherlands:',
':Cayman_Islands:',
':Central_African_Republic:',
':Ceuta_&_Melilla:',
':Chad:',
':Chile:',
':China:',
':Christmas_Island:',
':Christmas_tree:',
':Clipperton_Island:',
':Cocos_(Keeling)_Islands:',
':Colombia:',
':Comoros:',
':Congo_-_Brazzaville:',
':Congo_-_Kinshasa:',
':Cook_Islands:',
':Costa_Rica:',
':Croatia:',
':Cuba:',
':Curaçao:',
':Cyprus:',
':Czech_Republic:',
':Côte_d’Ivoire:',
':Denmark:',
':Diego_Garcia:',
':Djibouti:',
':Dominica:',
':Dominican_Republic:',
':END_arrow:',
':Ecuador:',
':Egypt:',
':El_Salvador:',
':Equatorial_Guinea:',
':Eritrea:',
':Estonia:',
':Ethiopia:',
':European_Union:',
':FREE_button:',
':Falkland_Islands:',
':Faroe_Islands:',
':Fiji:',
':Finland:',
':France:',
':French_Guiana:',
':French_Polynesia:',
':French_Southern_Territories:',
':Gabon:',
':Gambia:',
':Gemini:',
':Georgia:',
':Germany:',
':Ghana:',
':Gibraltar:',
':Greece:',
':Greenland:',
':Grenada:',
':Guadeloupe:',
':Guam:',
':Guatemala:',
':Guernsey:',
':Guinea:',
':Guinea-Bissau:',
':Guyana:',
':Haiti:',
':Heard_&_McDonald_Islands:',
':Honduras:',
':Hong_Kong_SAR_China:',
':Hungary:',
':ID_button:',
':Iceland:',
':India:',
':Indonesia:',
':Iran:',
':Iraq:',
':Ireland:',
':Isle_of_Man:',
':Israel:',
':Italy:',
':Jamaica:',
':Japan:',
':Japanese_acceptable_button:',
':Japanese_application_button:',
':Japanese_bargain_button:',
':Japanese_castle:',
':Japanese_congratulations_button:',
':Japanese_discount_button:',
':Japanese_dolls:',
':Japanese_free_of_charge_button:',
':Japanese_here_button:',
':Japanese_monthly_amount_button:',
':Japanese_no_vacancy_button:',
':Japanese_not_free_of_charge_button:',
':Japanese_open_for_business_button:',
':Japanese_passing_grade_button:',
':Japanese_post_office:',
':Japanese_prohibited_button:',
':Japanese_reserved_button:',
':Japanese_secret_button:',
':Japanese_service_charge_button:',
':Japanese_symbol_for_beginner:',
':Japanese_vacancy_button:',
':Jersey:',
':Jordan:',
':Kazakhstan:',
':Kenya:',
':Kiribati:',
':Kosovo:',
':Kuwait:',
':Kyrgyzstan:',
':Laos:',
':Latvia:',
':Lebanon:',
':Leo:',
':Lesotho:',
':Liberia:',
':Libra:',
':Libya:',
':Liechtenstein:',
':Lithuania:',
':Luxembourg:',
':Macau_SAR_China:',
':Macedonia:',
':Madagascar:',
':Malawi:',
':Malaysia:',
':Maldives:',
':Mali:',
':Malta:',
':Marshall_Islands:',
':Martinique:',
':Mauritania:',
':Mauritius:',
':Mayotte:',
':Mexico:',
':Micronesia:',
':Moldova:',
':Monaco:',
':Mongolia:',
':Montenegro:',
':Montserrat:',
':Morocco:',
':Mozambique:',
':Mrs._Claus:',
':Mrs._Claus_dark_skin_tone:',
':Mrs._Claus_light_skin_tone:',
':Mrs._Claus_medium-dark_skin_tone:',
':Mrs._Claus_medium-light_skin_tone:',
':Mrs._Claus_medium_skin_tone:',
':Myanmar_(Burma):',
':NEW_button:',
':NG_button:',
':Namibia:',
':Nauru:',
':Nepal:',
':Netherlands:',
':New_Caledonia:',
':New_Zealand:',
':Nicaragua:',
':Niger:',
':Nigeria:',
':Niue:',
':Norfolk_Island:',
':North_Korea:',
':Northern_Mariana_Islands:',
':Norway:',
':OK_button:',
':OK_hand:',
':OK_hand_dark_skin_tone:',
':OK_hand_light_skin_tone:',
':OK_hand_medium-dark_skin_tone:',
':OK_hand_medium-light_skin_tone:',
':OK_hand_medium_skin_tone:',
':ON!_arrow:',
':O_button_(blood_type):',
':Oman:',
':Ophiuchus:',
':P_button:',
':Pakistan:',
':Palau:',
':Palestinian_Territories:',
':Panama:',
':Papua_New_Guinea:',
':Paraguay:',
':Peru:',
':Philippines:',
':Pisces:',
':Pitcairn_Islands:',
':Poland:',
':Portugal:',
':Puerto_Rico:',
':Qatar:',
':Romania:',
':Russia:',
':Rwanda:',
':Réunion:',
':SOON_arrow:',
':SOS_button:',
':Sagittarius:',
':Samoa:',
':San_Marino:',
':Santa_Claus:',
':Santa_Claus_dark_skin_tone:',
':Santa_Claus_light_skin_tone:',
':Santa_Claus_medium-dark_skin_tone:',
':Santa_Claus_medium-light_skin_tone:',
':Santa_Claus_medium_skin_tone:',
':Saudi_Arabia:',
':Scorpius:',
':Senegal:',
':Serbia:',
':Seychelles:',
':Sierra_Leone:',
':Singapore:',
':Sint_Maarten:',
':Slovakia:',
':Slovenia:',
':Solomon_Islands:',
':Somalia:',
':South_Africa:',
':South_Georgia_&_South_Sandwich_Islands:',
':South_Korea:',
':South_Sudan:',
':Spain:',
':Sri_Lanka:',
':St._Barthélemy:',
':St._Helena:',
':St._Kitts_&_Nevis:',
':St._Lucia:',
':St._Martin:',
':St._Pierre_&_Miquelon:',
':St._Vincent_&_Grenadines:',
':Statue_of_Liberty:',
':Sudan:',
':Suriname:',
':Svalbard_&_Jan_Mayen:',
':Swaziland:',
':Sweden:',
':Switzerland:',
':Syria:',
':São_Tomé_&_Príncipe:',
':TOP_arrow:',
':Taiwan:',
':Tajikistan:',
':Tanzania:',
':Taurus:',
':Thailand:',
':Timor-Leste:',
':Togo:',
':Tokelau:',
':Tokyo_tower:',
':Tonga:',
':Trinidad_&_Tobago:',
':Tristan_da_Cunha:',
':Tunisia:',
':Turkey:',
':Turkmenistan:',
':Turks_&_Caicos_Islands:',
':Tuvalu:',
':U.S._Outlying_Islands:',
':U.S._Virgin_Islands:',
':UP!_button:',
':Uganda:',
':Ukraine:',
':United_Arab_Emirates:',
':United_Kingdom:',
':United_Nations:',
':United_States:',
':Uruguay:',
':Uzbekistan:',
':VS_button:',
':Vanuatu:',
':Vatican_City:',
':Venezuela:',
':Vietnam:',
':Virgo:',
':Wallis_&_Futuna:',
':Western_Sahara:',
':Yemen:',
':Zambia:',
':Zimbabwe:',
':admission_tickets:',
':aerial_tramway:',
':airplane:',
':airplane_arrival:',
':airplane_departure:',
':alarm_clock:',
':alembic:',
':alien:',
':alien_monster:',
':ambulance:',
':american_football:',
':amphora:',
':anchor:',
':anger_symbol:',
':angry_face:',
':angry_face_with_horns:',
':anguished_face:',
':ant:',
':antenna_bars:',
':anticlockwise_arrows_button:',
':articulated_lorry:',
':artist_palette:',
':astonished_face:',
':atom_symbol:',
':automobile:',
':avocado:',
':baby:',
':baby_angel:',
':baby_angel_dark_skin_tone:',
':baby_angel_light_skin_tone:',
':baby_angel_medium-dark_skin_tone:',
':baby_angel_medium-light_skin_tone:',
':baby_angel_medium_skin_tone:',
':baby_bottle:',
':baby_chick:',
':baby_dark_skin_tone:',
':baby_light_skin_tone:',
':baby_medium-dark_skin_tone:',
':baby_medium-light_skin_tone:',
':baby_medium_skin_tone:',
':baby_symbol:',
':backhand_index_pointing_down:',
':backhand_index_pointing_down_dark_skin_tone:',
':backhand_index_pointing_down_light_skin_tone:',
':backhand_index_pointing_down_medium-dark_skin_tone:',
':backhand_index_pointing_down_medium-light_skin_tone:',
':backhand_index_pointing_down_medium_skin_tone:',
':backhand_index_pointing_left:',
':backhand_index_pointing_left_dark_skin_tone:',
':backhand_index_pointing_left_light_skin_tone:',
':backhand_index_pointing_left_medium-dark_skin_tone:',
':backhand_index_pointing_left_medium-light_skin_tone:',
':backhand_index_pointing_left_medium_skin_tone:',
':backhand_index_pointing_right:',
':backhand_index_pointing_right_dark_skin_tone:',
':backhand_index_pointing_right_light_skin_tone:',
':backhand_index_pointing_right_medium-dark_skin_tone:',
':backhand_index_pointing_right_medium-light_skin_tone:',
':backhand_index_pointing_right_medium_skin_tone:',
':backhand_index_pointing_up:',
':backhand_index_pointing_up_dark_skin_tone:',
':backhand_index_pointing_up_light_skin_tone:',
':backhand_index_pointing_up_medium-dark_skin_tone:',
':backhand_index_pointing_up_medium-light_skin_tone:',
':backhand_index_pointing_up_medium_skin_tone:',
':bacon:',
':badminton:',
':baggage_claim:',
':baguette_bread:',
':balance_scale:',
':balloon:',
':ballot_box_with_ballot:',
':ballot_box_with_check:',
':banana:',
':bank:',
':bar_chart:',
':barber_pole:',
':baseball:',
':basketball:',
':bat:',
':bathtub:',
':battery:',
':beach_with_umbrella:',
':bear_face:',
':beating_heart:',
':bed:',
':beer_mug:',
':bell:',
':bell_with_slash:',
':bellhop_bell:',
':bento_box:',
':bicycle:',
':bikini:',
':biohazard:',
':bird:',
':birthday_cake:',
':black_circle:',
':black_flag:',
':black_heart:',
':black_large_square:',
':black_medium-small_square:',
':black_medium_square:',
':black_nib:',
':black_small_square:',
':black_square_button:',
':blond-haired_man:',
':blond-haired_man_dark_skin_tone:',
':blond-haired_man_light_skin_tone:',
':blond-haired_man_medium-dark_skin_tone:',
':blond-haired_man_medium-light_skin_tone:',
':blond-haired_man_medium_skin_tone:',
':blond-haired_person:',
':blond-haired_person_dark_skin_tone:',
':blond-haired_person_light_skin_tone:',
':blond-haired_person_medium-dark_skin_tone:',
':blond-haired_person_medium-light_skin_tone:',
':blond-haired_person_medium_skin_tone:',
':blond-haired_woman:',
':blond-haired_woman_dark_skin_tone:',
':blond-haired_woman_light_skin_tone:',
':blond-haired_woman_medium-dark_skin_tone:',
':blond-haired_woman_medium-light_skin_tone:',
':blond-haired_woman_medium_skin_tone:',
':blossom:',
':blowfish:',
':blue_book:',
':blue_circle:',
':blue_heart:',
':boar:',
':bomb:',
':bookmark:',
':bookmark_tabs:',
':books:',
':bottle_with_popping_cork:',
':bouquet:',
':bow_and_arrow:',
':bowling:',
':boxing_glove:',
':boy:',
':boy_dark_skin_tone:',
':boy_light_skin_tone:',
':boy_medium-dark_skin_tone:',
':boy_medium-light_skin_tone:',
':boy_medium_skin_tone:',
':bread:',
':bride_with_veil:',
':bride_with_veil_dark_skin_tone:',
':bride_with_veil_light_skin_tone:',
':bride_with_veil_medium-dark_skin_tone:',
':bride_with_veil_medium-light_skin_tone:',
':bride_with_veil_medium_skin_tone:',
':bridge_at_night:',
':briefcase:',
':bright_button:',
':broken_heart:',
':bug:',
':building_construction:',
':burrito:',
':bus:',
':bus_stop:',
':bust_in_silhouette:',
':busts_in_silhouette:',
':butterfly:',
':cactus:',
':calendar:',
':call_me_hand:',
':call_me_hand_dark_skin_tone:',
':call_me_hand_light_skin_tone:',
':call_me_hand_medium-dark_skin_tone:',
':call_me_hand_medium-light_skin_tone:',
':call_me_hand_medium_skin_tone:',
':camel:',
':camera:',
':camera_with_flash:',
':camping:',
':candle:',
':candy:',
':canoe:',
':card_file_box:',
':card_index:',
':card_index_dividers:',
':carousel_horse:',
':carp_streamer:',
':carrot:',
':castle:',
':cat:',
':cat_face:',
':cat_face_with_tears_of_joy:',
':cat_face_with_wry_smile:',
':chains:',
':chart_decreasing:',
':chart_increasing:',
':chart_increasing_with_yen:',
':cheese_wedge:',
':chequered_flag:',
':cherries:',
':cherry_blossom:',
':chestnut:',
':chicken:',
':children_crossing:',
':chipmunk:',
':chocolate_bar:',
':church:',
':cigarette:',
':cinema:',
':circled_M:',
':circus_tent:',
':cityscape:',
':cityscape_at_dusk:',
':clamp:',
':clapper_board:',
':clapping_hands:',
':clapping_hands_dark_skin_tone:',
':clapping_hands_light_skin_tone:',
':clapping_hands_medium-dark_skin_tone:',
':clapping_hands_medium-light_skin_tone:',
':clapping_hands_medium_skin_tone:',
':classical_building:',
':clinking_beer_mugs:',
':clinking_glasses:',
':clipboard:',
':clockwise_vertical_arrows:',
':closed_book:',
':closed_mailbox_with_lowered_flag:',
':closed_mailbox_with_raised_flag:',
':closed_umbrella:',
':cloud:',
':cloud_with_lightning:',
':cloud_with_lightning_and_rain:',
':cloud_with_rain:',
':cloud_with_snow:',
':clown_face:',
':club_suit:',
':clutch_bag:',
':cocktail_glass:',
':coffin:',
':collision:',
':comet:',
':computer_disk:',
':computer_mouse:',
':confetti_ball:',
':confounded_face:',
':confused_face:',
':construction:',
':construction_worker:',
':construction_worker_dark_skin_tone:',
':construction_worker_light_skin_tone:',
':construction_worker_medium-dark_skin_tone:',
':construction_worker_medium-light_skin_tone:',
':construction_worker_medium_skin_tone:',
':control_knobs:',
':convenience_store:',
':cooked_rice:',
':cookie:',
':cooking:',
':copyright:',
':couch_and_lamp:',
':couple_with_heart:',
':couple_with_heart_man_man:',
':couple_with_heart_woman_man:',
':couple_with_heart_woman_woman:',
':cow:',
':cow_face:',
':cowboy_hat_face:',
':crab:',
':crayon:',
':credit_card:',
':crescent_moon:',
':cricket:',
':crocodile:',
':croissant:',
':cross_mark:',
':cross_mark_button:',
':crossed_fingers:',
':crossed_fingers_dark_skin_tone:',
':crossed_fingers_light_skin_tone:',
':crossed_fingers_medium-dark_skin_tone:',
':crossed_fingers_medium-light_skin_tone:',
':crossed_fingers_medium_skin_tone:',
':crossed_flags:',
':crossed_swords:',
':crown:',
':crying_cat_face:',
':crying_face:',
':crystal_ball:',
':cucumber:',
':curly_loop:',
':currency_exchange:',
':curry_rice:',
':custard:',
':customs:',
':cyclone:',
':dagger:',
':dango:',
':dark_skin_tone:',
':dashing_away:',
':deciduous_tree:',
':deer:',
':delivery_truck:',
':department_store:',
':derelict_house:',
':desert:',
':desert_island:',
':desktop_computer:',
':detective:',
':detective_dark_skin_tone:',
':detective_light_skin_tone:',
':detective_medium-dark_skin_tone:',
':detective_medium-light_skin_tone:',
':detective_medium_skin_tone:',
':diamond_suit:',
':diamond_with_a_dot:',
':dim_button:',
':direct_hit:',
':disappointed_but_relieved_face:',
':disappointed_face:',
':dizzy:',
':dizzy_face:',
':dog:',
':dog_face:',
':dollar_banknote:',
':dolphin:',
':door:',
':dotted_six-pointed_star:',
':double_curly_loop:',
':double_exclamation_mark:',
':doughnut:',
':dove:',
':down-left_arrow:',
':down-right_arrow:',
':down_arrow:',
':down_button:',
':dragon:',
':dragon_face:',
':dress:',
':drooling_face:',
':droplet:',
':drum:',
':duck:',
':dvd:',
':e-mail:',
':eagle:',
':ear:',
':ear_dark_skin_tone:',
':ear_light_skin_tone:',
':ear_medium-dark_skin_tone:',
':ear_medium-light_skin_tone:',
':ear_medium_skin_tone:',
':ear_of_corn:',
':egg:',
':eggplant:',
':eight-pointed_star:',
':eight-spoked_asterisk:',
':eight-thirty:',
':eight_o’clock:',
':eject_button:',
':electric_plug:',
':elephant:',
':eleven-thirty:',
':eleven_o’clock:',
':envelope:',
':envelope_with_arrow:',
':euro_banknote:',
':evergreen_tree:',
':exclamation_mark:',
':exclamation_question_mark:',
':expressionless_face:',
':eye:',
':eye_in_speech_bubble:',
':eyes:',
':face_blowing_a_kiss:',
':face_savouring_delicious_food:',
':face_screaming_in_fear:',
':face_with_cold_sweat:',
':face_with_head-bandage:',
':face_with_medical_mask:',
':face_with_open_mouth:',
':face_with_open_mouth_&_cold_sweat:',
':face_with_rolling_eyes:',
':face_with_steam_from_nose:',
':face_with_stuck-out_tongue:',
':face_with_stuck-out_tongue_&_closed_eyes:',
':face_with_stuck-out_tongue_&_winking_eye:',
':face_with_tears_of_joy:',
':face_with_thermometer:',
':face_without_mouth:',
':factory:',
':fallen_leaf:',
':family:',
':family_man_boy:',
':family_man_boy_boy:',
':family_man_girl:',
':family_man_girl_boy:',
':family_man_girl_girl:',
':family_man_man_boy:',
':family_man_man_boy_boy:',
':family_man_man_girl:',
':family_man_man_girl_boy:',
':family_man_man_girl_girl:',
':family_man_woman_boy:',
':family_man_woman_boy_boy:',
':family_man_woman_girl:',
':family_man_woman_girl_boy:',
':family_man_woman_girl_girl:',
':family_woman_boy:',
':family_woman_boy_boy:',
':family_woman_girl:',
':family_woman_girl_boy:',
':family_woman_girl_girl:',
':family_woman_woman_boy:',
':family_woman_woman_boy_boy:',
':family_woman_woman_girl:',
':family_woman_woman_girl_boy:',
':family_woman_woman_girl_girl:',
':fast-forward_button:',
':fast_down_button:',
':fast_reverse_button:',
':fast_up_button:',
':fax_machine:',
':fearful_face:',
':female_sign:',
':ferris_wheel:',
':ferry:',
':field_hockey:',
':file_cabinet:',
':file_folder:',
':film_frames:',
':film_projector:',
':fire:',
':fire_engine:',
':fireworks:',
':first_quarter_moon:',
':first_quarter_moon_with_face:',
':fish:',
':fish_cake_with_swirl:',
':fishing_pole:',
':five-thirty:',
':five_o’clock:',
':flag_in_hole:',
':flashlight:',
':fleur-de-lis:',
':flexed_biceps:',
':flexed_biceps_dark_skin_tone:',
':flexed_biceps_light_skin_tone:',
':flexed_biceps_medium-dark_skin_tone:',
':flexed_biceps_medium-light_skin_tone:',
':flexed_biceps_medium_skin_tone:',
':floppy_disk:',
':flower_playing_cards:',
':flushed_face:',
':fog:',
':foggy:',
':folded_hands:',
':folded_hands_dark_skin_tone:',
':folded_hands_light_skin_tone:',
':folded_hands_medium-dark_skin_tone:',
':folded_hands_medium-light_skin_tone:',
':folded_hands_medium_skin_tone:',
':footprints:',
':fork_and_knife:',
':fork_and_knife_with_plate:',
':fountain:',
':fountain_pen:',
':four-thirty:',
':four_leaf_clover:',
':four_o’clock:',
':fox_face:',
':framed_picture:',
':french_fries:',
':fried_shrimp:',
':frog_face:',
':front-facing_baby_chick:',
':frowning_face:',
':frowning_face_with_open_mouth:',
':fuel_pump:',
':full_moon:',
':full_moon_with_face:',
':funeral_urn:',
':game_die:',
':gear:',
':gem_stone:',
':ghost:',
':girl:',
':girl_dark_skin_tone:',
':girl_light_skin_tone:',
':girl_medium-dark_skin_tone:',
':girl_medium-light_skin_tone:',
':girl_medium_skin_tone:',
':glass_of_milk:',
':glasses:',
':globe_showing_Americas:',
':globe_showing_Asia-Australia:',
':globe_showing_Europe-Africa:',
':globe_with_meridians:',
':glowing_star:',
':goal_net:',
':goat:',
':goblin:',
':gorilla:',
':graduation_cap:',
':grapes:',
':green_apple:',
':green_book:',
':green_heart:',
':green_salad:',
':grimacing_face:',
':grinning_cat_face_with_smiling_eyes:',
':grinning_face:',
':grinning_face_with_smiling_eyes:',
':growing_heart:',
':guard:',
':guard_dark_skin_tone:',
':guard_light_skin_tone:',
':guard_medium-dark_skin_tone:',
':guard_medium-light_skin_tone:',
':guard_medium_skin_tone:',
':guitar:',
':hamburger:',
':hammer:',
':hammer_and_pick:',
':hammer_and_wrench:',
':hamster_face:',
':handbag:',
':handshake:',
':hatching_chick:',
':headphone:',
':hear-no-evil_monkey:',
':heart_decoration:',
':heart_suit:',
':heart_with_arrow:',
':heart_with_ribbon:',
':heavy_check_mark:',
':heavy_division_sign:',
':heavy_dollar_sign:',
':heavy_heart_exclamation:',
':heavy_large_circle:',
':heavy_minus_sign:',
':heavy_multiplication_x:',
':heavy_plus_sign:',
':helicopter:',
':herb:',
':hibiscus:',
':high-heeled_shoe:',
':high-speed_train:',
':high-speed_train_with_bullet_nose:',
':high_voltage:',
':hole:',
':honey_pot:',
':honeybee:',
':horizontal_traffic_light:',
':horse:',
':horse_face:',
':horse_racing:',
':horse_racing_dark_skin_tone:',
':horse_racing_light_skin_tone:',
':horse_racing_medium-dark_skin_tone:',
':horse_racing_medium-light_skin_tone:',
':horse_racing_medium_skin_tone:',
':hospital:',
':hot_beverage:',
':hot_dog:',
':hot_pepper:',
':hot_springs:',
':hotel:',
':hourglass:',
':hourglass_with_flowing_sand:',
':house:',
':house_with_garden:',
':hugging_face:',
':hundred_points:',
':hushed_face:',
':ice_cream:',
':ice_hockey:',
':ice_skate:',
':inbox_tray:',
':incoming_envelope:',
':index_pointing_up:',
':index_pointing_up_dark_skin_tone:',
':index_pointing_up_light_skin_tone:',
':index_pointing_up_medium-dark_skin_tone:',
':index_pointing_up_medium-light_skin_tone:',
':index_pointing_up_medium_skin_tone:',
':information:',
':input_latin_letters:',
':input_latin_lowercase:',
':input_latin_uppercase:',
':input_numbers:',
':input_symbols:',
':jack-o-lantern:',
':jeans:',
':joker:',
':joystick:',
':kaaba:',
':key:',
':keyboard:',
':keycap_#:',
':keycap_*:',
':keycap_0:',
':keycap_1:',
':keycap_10:',
':keycap_2:',
':keycap_3:',
':keycap_4:',
':keycap_5:',
':keycap_6:',
':keycap_7:',
':keycap_8:',
':keycap_9:',
':kick_scooter:',
':kimono:',
':kiss:',
':kiss_man_man:',
':kiss_mark:',
':kiss_woman_man:',
':kiss_woman_woman:',
':kissing_cat_face_with_closed_eyes:',
':kissing_face:',
':kissing_face_with_closed_eyes:',
':kissing_face_with_smiling_eyes:',
':kitchen_knife:',
':kiwi_fruit:',
':koala:',
':label:',
':lady_beetle:',
':laptop_computer:',
':large_blue_diamond:',
':large_orange_diamond:',
':last_quarter_moon:',
':last_quarter_moon_with_face:',
':last_track_button:',
':latin_cross:',
':leaf_fluttering_in_wind:',
':ledger:',
':left-facing_fist:',
':left-facing_fist_dark_skin_tone:',
':left-facing_fist_light_skin_tone:',
':left-facing_fist_medium-dark_skin_tone:',
':left-facing_fist_medium-light_skin_tone:',
':left-facing_fist_medium_skin_tone:',
':left-pointing_magnifying_glass:',
':left-right_arrow:',
':left_arrow:',
':left_arrow_curving_right:',
':left_luggage:',
':left_speech_bubble:',
':lemon:',
':leopard:',
':level_slider:',
':light_bulb:',
':light_rail:',
':light_skin_tone:',
':link:',
':linked_paperclips:',
':lion_face:',
':lipstick:',
':litter_in_bin_sign:',
':lizard:',
':locked:',
':locked_with_key:',
':locked_with_pen:',
':locomotive:',
':lollipop:',
':loudly_crying_face:',
':loudspeaker:',
':love_hotel:',
':love_letter:',
':lying_face:',
':mahjong_red_dragon:',
':male_sign:',
':man:',
':man_and_woman_holding_hands:',
':man_artist:',
':man_artist_dark_skin_tone:',
':man_artist_light_skin_tone:',
':man_artist_medium-dark_skin_tone:',
':man_artist_medium-light_skin_tone:',
':man_artist_medium_skin_tone:',
':man_astronaut:',
':man_astronaut_dark_skin_tone:',
':man_astronaut_light_skin_tone:',
':man_astronaut_medium-dark_skin_tone:',
':man_astronaut_medium-light_skin_tone:',
':man_astronaut_medium_skin_tone:',
':man_biking:',
':man_biking_dark_skin_tone:',
':man_biking_light_skin_tone:',
':man_biking_medium-dark_skin_tone:',
':man_biking_medium-light_skin_tone:',
':man_biking_medium_skin_tone:',
':man_bouncing_ball:',
':man_bouncing_ball_dark_skin_tone:',
':man_bouncing_ball_light_skin_tone:',
':man_bouncing_ball_medium-dark_skin_tone:',
':man_bouncing_ball_medium-light_skin_tone:',
':man_bouncing_ball_medium_skin_tone:',
':man_bowing:',
':man_bowing_dark_skin_tone:',
':man_bowing_light_skin_tone:',
':man_bowing_medium-dark_skin_tone:',
':man_bowing_medium-light_skin_tone:',
':man_bowing_medium_skin_tone:',
':man_cartwheeling:',
':man_cartwheeling_dark_skin_tone:',
':man_cartwheeling_light_skin_tone:',
':man_cartwheeling_medium-dark_skin_tone:',
':man_cartwheeling_medium-light_skin_tone:',
':man_cartwheeling_medium_skin_tone:',
':man_construction_worker:',
':man_construction_worker_dark_skin_tone:',
':man_construction_worker_light_skin_tone:',
':man_construction_worker_medium-dark_skin_tone:',
':man_construction_worker_medium-light_skin_tone:',
':man_construction_worker_medium_skin_tone:',
':man_cook:',
':man_cook_dark_skin_tone:',
':man_cook_light_skin_tone:',
':man_cook_medium-dark_skin_tone:',
':man_cook_medium-light_skin_tone:',
':man_cook_medium_skin_tone:',
':man_dancing:',
':man_dancing_dark_skin_tone:',
':man_dancing_light_skin_tone:',
':man_dancing_medium-dark_skin_tone:',
':man_dancing_medium-light_skin_tone:',
':man_dancing_medium_skin_tone:',
':man_dark_skin_tone:',
':man_detective:',
':man_detective_dark_skin_tone:',
':man_detective_light_skin_tone:',
':man_detective_medium-dark_skin_tone:',
':man_detective_medium-light_skin_tone:',
':man_detective_medium_skin_tone:',
':man_facepalming:',
':man_facepalming_dark_skin_tone:',
':man_facepalming_light_skin_tone:',
':man_facepalming_medium-dark_skin_tone:',
':man_facepalming_medium-light_skin_tone:',
':man_facepalming_medium_skin_tone:',
':man_factory_worker:',
':man_factory_worker_dark_skin_tone:',
':man_factory_worker_light_skin_tone:',
':man_factory_worker_medium-dark_skin_tone:',
':man_factory_worker_medium-light_skin_tone:',
':man_factory_worker_medium_skin_tone:',
':man_farmer:',
':man_farmer_dark_skin_tone:',
':man_farmer_light_skin_tone:',
':man_farmer_medium-dark_skin_tone:',
':man_farmer_medium-light_skin_tone:',
':man_farmer_medium_skin_tone:',
':man_firefighter:',
':man_firefighter_dark_skin_tone:',
':man_firefighter_light_skin_tone:',
':man_firefighter_medium-dark_skin_tone:',
':man_firefighter_medium-light_skin_tone:',
':man_firefighter_medium_skin_tone:',
':man_frowning:',
':man_frowning_dark_skin_tone:',
':man_frowning_light_skin_tone:',
':man_frowning_medium-dark_skin_tone:',
':man_frowning_medium-light_skin_tone:',
':man_frowning_medium_skin_tone:',
':man_gesturing_NO:',
':man_gesturing_NO_dark_skin_tone:',
':man_gesturing_NO_light_skin_tone:',
':man_gesturing_NO_medium-dark_skin_tone:',
':man_gesturing_NO_medium-light_skin_tone:',
':man_gesturing_NO_medium_skin_tone:',
':man_gesturing_OK:',
':man_gesturing_OK_dark_skin_tone:',
':man_gesturing_OK_light_skin_tone:',
':man_gesturing_OK_medium-dark_skin_tone:',
':man_gesturing_OK_medium-light_skin_tone:',
':man_gesturing_OK_medium_skin_tone:',
':man_getting_haircut:',
':man_getting_haircut_dark_skin_tone:',
':man_getting_haircut_light_skin_tone:',
':man_getting_haircut_medium-dark_skin_tone:',
':man_getting_haircut_medium-light_skin_tone:',
':man_getting_haircut_medium_skin_tone:',
':man_getting_massage:',
':man_getting_massage_dark_skin_tone:',
':man_getting_massage_light_skin_tone:',
':man_getting_massage_medium-dark_skin_tone:',
':man_getting_massage_medium-light_skin_tone:',
':man_getting_massage_medium_skin_tone:',
':man_golfing:',
':man_golfing_dark_skin_tone:',
':man_golfing_light_skin_tone:',
':man_golfing_medium-dark_skin_tone:',
':man_golfing_medium-light_skin_tone:',
':man_golfing_medium_skin_tone:',
':man_guard:',
':man_guard_dark_skin_tone:',
':man_guard_light_skin_tone:',
':man_guard_medium-dark_skin_tone:',
':man_guard_medium-light_skin_tone:',
':man_guard_medium_skin_tone:',
':man_health_worker:',
':man_health_worker_dark_skin_tone:',
':man_health_worker_light_skin_tone:',
':man_health_worker_medium-dark_skin_tone:',
':man_health_worker_medium-light_skin_tone:',
':man_health_worker_medium_skin_tone:',
':man_in_business_suit_levitating:',
':man_in_business_suit_levitating_dark_skin_tone:',
':man_in_business_suit_levitating_light_skin_tone:',
':man_in_business_suit_levitating_medium-dark_skin_tone:',
':man_in_business_suit_levitating_medium-light_skin_tone:',
':man_in_business_suit_levitating_medium_skin_tone:',
':man_in_tuxedo:',
':man_in_tuxedo_dark_skin_tone:',
':man_in_tuxedo_light_skin_tone:',
':man_in_tuxedo_medium-dark_skin_tone:',
':man_in_tuxedo_medium-light_skin_tone:',
':man_in_tuxedo_medium_skin_tone:',
':man_judge:',
':man_judge_dark_skin_tone:',
':man_judge_light_skin_tone:',
':man_judge_medium-dark_skin_tone:',
':man_judge_medium-light_skin_tone:',
':man_judge_medium_skin_tone:',
':man_juggling:',
':man_juggling_dark_skin_tone:',
':man_juggling_light_skin_tone:',
':man_juggling_medium-dark_skin_tone:',
':man_juggling_medium-light_skin_tone:',
':man_juggling_medium_skin_tone:',
':man_lifting_weights:',
':man_lifting_weights_dark_skin_tone:',
':man_lifting_weights_light_skin_tone:',
':man_lifting_weights_medium-dark_skin_tone:',
':man_lifting_weights_medium-light_skin_tone:',
':man_lifting_weights_medium_skin_tone:',
':man_light_skin_tone:',
':man_mechanic:',
':man_mechanic_dark_skin_tone:',
':man_mechanic_light_skin_tone:',
':man_mechanic_medium-dark_skin_tone:',
':man_mechanic_medium-light_skin_tone:',
':man_mechanic_medium_skin_tone:',
':man_medium-dark_skin_tone:',
':man_medium-light_skin_tone:',
':man_medium_skin_tone:',
':man_mountain_biking:',
':man_mountain_biking_dark_skin_tone:',
':man_mountain_biking_light_skin_tone:',
':man_mountain_biking_medium-dark_skin_tone:',
':man_mountain_biking_medium-light_skin_tone:',
':man_mountain_biking_medium_skin_tone:',
':man_office_worker:',
':man_office_worker_dark_skin_tone:',
':man_office_worker_light_skin_tone:',
':man_office_worker_medium-dark_skin_tone:',
':man_office_worker_medium-light_skin_tone:',
':man_office_worker_medium_skin_tone:',
':man_pilot:',
':man_pilot_dark_skin_tone:',
':man_pilot_light_skin_tone:',
':man_pilot_medium-dark_skin_tone:',
':man_pilot_medium-light_skin_tone:',
':man_pilot_medium_skin_tone:',
':man_playing_handball:',
':man_playing_handball_dark_skin_tone:',
':man_playing_handball_light_skin_tone:',
':man_playing_handball_medium-dark_skin_tone:',
':man_playing_handball_medium-light_skin_tone:',
':man_playing_handball_medium_skin_tone:',
':man_playing_water_polo:',
':man_playing_water_polo_dark_skin_tone:',
':man_playing_water_polo_light_skin_tone:',
':man_playing_water_polo_medium-dark_skin_tone:',
':man_playing_water_polo_medium-light_skin_tone:',
':man_playing_water_polo_medium_skin_tone:',
':man_police_officer:',
':man_police_officer_dark_skin_tone:',
':man_police_officer_light_skin_tone:',
':man_police_officer_medium-dark_skin_tone:',
':man_police_officer_medium-light_skin_tone:',
':man_police_officer_medium_skin_tone:',
':man_pouting:',
':man_pouting_dark_skin_tone:',
':man_pouting_light_skin_tone:',
':man_pouting_medium-dark_skin_tone:',
':man_pouting_medium-light_skin_tone:',
':man_pouting_medium_skin_tone:',
':man_raising_hand:',
':man_raising_hand_dark_skin_tone:',
':man_raising_hand_light_skin_tone:',
':man_raising_hand_medium-dark_skin_tone:',
':man_raising_hand_medium-light_skin_tone:',
':man_raising_hand_medium_skin_tone:',
':man_rowing_boat:',
':man_rowing_boat_dark_skin_tone:',
':man_rowing_boat_light_skin_tone:',
':man_rowing_boat_medium-dark_skin_tone:',
':man_rowing_boat_medium-light_skin_tone:',
':man_rowing_boat_medium_skin_tone:',
':man_running:',
':man_running_dark_skin_tone:',
':man_running_light_skin_tone:',
':man_running_medium-dark_skin_tone:',
':man_running_medium-light_skin_tone:',
':man_running_medium_skin_tone:',
':man_scientist:',
':man_scientist_dark_skin_tone:',
':man_scientist_light_skin_tone:',
':man_scientist_medium-dark_skin_tone:',
':man_scientist_medium-light_skin_tone:',
':man_scientist_medium_skin_tone:',
':man_shrugging:',
':man_shrugging_dark_skin_tone:',
':man_shrugging_light_skin_tone:',
':man_shrugging_medium-dark_skin_tone:',
':man_shrugging_medium-light_skin_tone:',
':man_shrugging_medium_skin_tone:',
':man_singer:',
':man_singer_dark_skin_tone:',
':man_singer_light_skin_tone:',
':man_singer_medium-dark_skin_tone:',
':man_singer_medium-light_skin_tone:',
':man_singer_medium_skin_tone:',
':man_student:',
':man_student_dark_skin_tone:',
':man_student_light_skin_tone:',
':man_student_medium-dark_skin_tone:',
':man_student_medium-light_skin_tone:',
':man_student_medium_skin_tone:',
':man_surfing:',
':man_surfing_dark_skin_tone:',
':man_surfing_light_skin_tone:',
':man_surfing_medium-dark_skin_tone:',
':man_surfing_medium-light_skin_tone:',
':man_surfing_medium_skin_tone:',
':man_swimming:',
':man_swimming_dark_skin_tone:',
':man_swimming_light_skin_tone:',
':man_swimming_medium-dark_skin_tone:',
':man_swimming_medium-light_skin_tone:',
':man_swimming_medium_skin_tone:',
':man_teacher:',
':man_teacher_dark_skin_tone:',
':man_teacher_light_skin_tone:',
':man_teacher_medium-dark_skin_tone:',
':man_teacher_medium-light_skin_tone:',
':man_teacher_medium_skin_tone:',
':man_technologist:',
':man_technologist_dark_skin_tone:',
':man_technologist_light_skin_tone:',
':man_technologist_medium-dark_skin_tone:',
':man_technologist_medium-light_skin_tone:',
':man_technologist_medium_skin_tone:',
':man_tipping_hand:',
':man_tipping_hand_dark_skin_tone:',
':man_tipping_hand_light_skin_tone:',
':man_tipping_hand_medium-dark_skin_tone:',
':man_tipping_hand_medium-light_skin_tone:',
':man_tipping_hand_medium_skin_tone:',
':man_walking:',
':man_walking_dark_skin_tone:',
':man_walking_light_skin_tone:',
':man_walking_medium-dark_skin_tone:',
':man_walking_medium-light_skin_tone:',
':man_walking_medium_skin_tone:',
':man_wearing_turban:',
':man_wearing_turban_dark_skin_tone:',
':man_wearing_turban_light_skin_tone:',
':man_wearing_turban_medium-dark_skin_tone:',
':man_wearing_turban_medium-light_skin_tone:',
':man_wearing_turban_medium_skin_tone:',
':man_with_Chinese_cap:',
':man_with_Chinese_cap_dark_skin_tone:',
':man_with_Chinese_cap_light_skin_tone:',
':man_with_Chinese_cap_medium-dark_skin_tone:',
':man_with_Chinese_cap_medium-light_skin_tone:',
':man_with_Chinese_cap_medium_skin_tone:',
':mantelpiece_clock:',
':man’s_shoe:',
':map_of_Japan:',
':maple_leaf:',
':martial_arts_uniform:',
':meat_on_bone:',
':medical_symbol:',
':medium-dark_skin_tone:',
':medium-light_skin_tone:',
':medium_skin_tone:',
':megaphone:',
':melon:',
':memo:',
':men_with_bunny_ears_partying:',
':men_wrestling:',
':menorah:',
':men’s_room:',
':metro:',
':microphone:',
':microscope:',
':middle_finger:',
':middle_finger_dark_skin_tone:',
':middle_finger_light_skin_tone:',
':middle_finger_medium-dark_skin_tone:',
':middle_finger_medium-light_skin_tone:',
':middle_finger_medium_skin_tone:',
':military_medal:',
':milky_way:',
':minibus:',
':moai:',
':mobile_phone:',
':mobile_phone_off:',
':mobile_phone_with_arrow:',
':money-mouth_face:',
':money_bag:',
':money_with_wings:',
':monkey:',
':monkey_face:',
':monorail:',
':moon_viewing_ceremony:',
':mosque:',
':motor_boat:',
':motor_scooter:',
':motorcycle:',
':motorway:',
':mount_fuji:',
':mountain:',
':mountain_cableway:',
':mountain_railway:',
':mouse:',
':mouse_face:',
':mouth:',
':movie_camera:',
':mushroom:',
':musical_keyboard:',
':musical_note:',
':musical_notes:',
':musical_score:',
':muted_speaker:',
':nail_polish:',
':nail_polish_dark_skin_tone:',
':nail_polish_light_skin_tone:',
':nail_polish_medium-dark_skin_tone:',
':nail_polish_medium-light_skin_tone:',
':nail_polish_medium_skin_tone:',
':name_badge:',
':national_park:',
':nauseated_face:',
':necktie:',
':nerd_face:',
':neutral_face:',
':new_moon:',
':new_moon_face:',
':newspaper:',
':next_track_button:',
':night_with_stars:',
':nine-thirty:',
':nine_o’clock:',
':no_bicycles:',
':no_entry:',
':no_littering:',
':no_mobile_phones:',
':no_one_under_eighteen:',
':no_pedestrians:',
':no_smoking:',
':non-potable_water:',
':nose:',
':nose_dark_skin_tone:',
':nose_light_skin_tone:',
':nose_medium-dark_skin_tone:',
':nose_medium-light_skin_tone:',
':nose_medium_skin_tone:',
':notebook:',
':notebook_with_decorative_cover:',
':nut_and_bolt:',
':octopus:',
':oden:',
':office_building:',
':ogre:',
':oil_drum:',
':old_key:',
':old_man:',
':old_man_dark_skin_tone:',
':old_man_light_skin_tone:',
':old_man_medium-dark_skin_tone:',
':old_man_medium-light_skin_tone:',
':old_man_medium_skin_tone:',
':old_woman:',
':old_woman_dark_skin_tone:',
':old_woman_light_skin_tone:',
':old_woman_medium-dark_skin_tone:',
':old_woman_medium-light_skin_tone:',
':old_woman_medium_skin_tone:',
':om:',
':oncoming_automobile:',
':oncoming_bus:',
':oncoming_fist:',
':oncoming_fist_dark_skin_tone:',
':oncoming_fist_light_skin_tone:',
':oncoming_fist_medium-dark_skin_tone:',
':oncoming_fist_medium-light_skin_tone:',
':oncoming_fist_medium_skin_tone:',
':oncoming_police_car:',
':oncoming_taxi:',
':one-thirty:',
':one_o’clock:',
':open_book:',
':open_file_folder:',
':open_hands:',
':open_hands_dark_skin_tone:',
':open_hands_light_skin_tone:',
':open_hands_medium-dark_skin_tone:',
':open_hands_medium-light_skin_tone:',
':open_hands_medium_skin_tone:',
':open_mailbox_with_lowered_flag:',
':open_mailbox_with_raised_flag:',
':optical_disk:',
':orange_book:',
':orthodox_cross:',
':outbox_tray:',
':owl:',
':ox:',
':package:',
':page_facing_up:',
':page_with_curl:',
':pager:',
':paintbrush:',
':palm_tree:',
':pancakes:',
':panda_face:',
':paperclip:',
':part_alternation_mark:',
':party_popper:',
':passenger_ship:',
':passport_control:',
':pause_button:',
':paw_prints:',
':peace_symbol:',
':peach:',
':peanuts:',
':pear:',
':pen:',
':pencil:',
':penguin:',
':pensive_face:',
':people_with_bunny_ears_partying:',
':people_wrestling:',
':performing_arts:',
':persevering_face:',
':person_biking:',
':person_biking_dark_skin_tone:',
':person_biking_light_skin_tone:',
':person_biking_medium-dark_skin_tone:',
':person_biking_medium-light_skin_tone:',
':person_biking_medium_skin_tone:',
':person_bouncing_ball:',
':person_bouncing_ball_dark_skin_tone:',
':person_bouncing_ball_light_skin_tone:',
':person_bouncing_ball_medium-dark_skin_tone:',
':person_bouncing_ball_medium-light_skin_tone:',
':person_bouncing_ball_medium_skin_tone:',
':person_bowing:',
':person_bowing_dark_skin_tone:',
':person_bowing_light_skin_tone:',
':person_bowing_medium-dark_skin_tone:',
':person_bowing_medium-light_skin_tone:',
':person_bowing_medium_skin_tone:',
':person_cartwheeling:',
':person_cartwheeling_dark_skin_tone:',
':person_cartwheeling_light_skin_tone:',
':person_cartwheeling_medium-dark_skin_tone:',
':person_cartwheeling_medium-light_skin_tone:',
':person_cartwheeling_medium_skin_tone:',
':person_facepalming:',
':person_facepalming_dark_skin_tone:',
':person_facepalming_light_skin_tone:',
':person_facepalming_medium-dark_skin_tone:',
':person_facepalming_medium-light_skin_tone:',
':person_facepalming_medium_skin_tone:',
':person_fencing:',
':person_frowning:',
':person_frowning_dark_skin_tone:',
':person_frowning_light_skin_tone:',
':person_frowning_medium-dark_skin_tone:',
':person_frowning_medium-light_skin_tone:',
':person_frowning_medium_skin_tone:',
':person_gesturing_NO:',
':person_gesturing_NO_dark_skin_tone:',
':person_gesturing_NO_light_skin_tone:',
':person_gesturing_NO_medium-dark_skin_tone:',
':person_gesturing_NO_medium-light_skin_tone:',
':person_gesturing_NO_medium_skin_tone:',
':person_gesturing_OK:',
':person_gesturing_OK_dark_skin_tone:',
':person_gesturing_OK_light_skin_tone:',
':person_gesturing_OK_medium-dark_skin_tone:',
':person_gesturing_OK_medium-light_skin_tone:',
':person_gesturing_OK_medium_skin_tone:',
':person_getting_haircut:',
':person_getting_haircut_dark_skin_tone:',
':person_getting_haircut_light_skin_tone:',
':person_getting_haircut_medium-dark_skin_tone:',
':person_getting_haircut_medium-light_skin_tone:',
':person_getting_haircut_medium_skin_tone:',
':person_getting_massage:',
':person_getting_massage_dark_skin_tone:',
':person_getting_massage_light_skin_tone:',
':person_getting_massage_medium-dark_skin_tone:',
':person_getting_massage_medium-light_skin_tone:',
':person_getting_massage_medium_skin_tone:',
':person_golfing:',
':person_golfing_dark_skin_tone:',
':person_golfing_light_skin_tone:',
':person_golfing_medium-dark_skin_tone:',
':person_golfing_medium-light_skin_tone:',
':person_golfing_medium_skin_tone:',
':person_in_bed:',
':person_in_bed_dark_skin_tone:',
':person_in_bed_light_skin_tone:',
':person_in_bed_medium-dark_skin_tone:',
':person_in_bed_medium-light_skin_tone:',
':person_in_bed_medium_skin_tone:',
':person_juggling:',
':person_juggling_dark_skin_tone:',
':person_juggling_light_skin_tone:',
':person_juggling_medium-dark_skin_tone:',
':person_juggling_medium-light_skin_tone:',
':person_juggling_medium_skin_tone:',
':person_lifting_weights:',
':person_lifting_weights_dark_skin_tone:',
':person_lifting_weights_light_skin_tone:',
':person_lifting_weights_medium-dark_skin_tone:',
':person_lifting_weights_medium-light_skin_tone:',
':person_lifting_weights_medium_skin_tone:',
':person_mountain_biking:',
':person_mountain_biking_dark_skin_tone:',
':person_mountain_biking_light_skin_tone:',
':person_mountain_biking_medium-dark_skin_tone:',
':person_mountain_biking_medium-light_skin_tone:',
':person_mountain_biking_medium_skin_tone:',
':person_playing_handball:',
':person_playing_handball_dark_skin_tone:',
':person_playing_handball_light_skin_tone:',
':person_playing_handball_medium-dark_skin_tone:',
':person_playing_handball_medium-light_skin_tone:',
':person_playing_handball_medium_skin_tone:',
':person_playing_water_polo:',
':person_playing_water_polo_dark_skin_tone:',
':person_playing_water_polo_light_skin_tone:',
':person_playing_water_polo_medium-dark_skin_tone:',
':person_playing_water_polo_medium-light_skin_tone:',
':person_playing_water_polo_medium_skin_tone:',
':person_pouting:',
':person_pouting_dark_skin_tone:',
':person_pouting_light_skin_tone:',
':person_pouting_medium-dark_skin_tone:',
':person_pouting_medium-light_skin_tone:',
':person_pouting_medium_skin_tone:',
':person_raising_hand:',
':person_raising_hand_dark_skin_tone:',
':person_raising_hand_light_skin_tone:',
':person_raising_hand_medium-dark_skin_tone:',
':person_raising_hand_medium-light_skin_tone:',
':person_raising_hand_medium_skin_tone:',
':person_rowing_boat:',
':person_rowing_boat_dark_skin_tone:',
':person_rowing_boat_light_skin_tone:',
':person_rowing_boat_medium-dark_skin_tone:',
':person_rowing_boat_medium-light_skin_tone:',
':person_rowing_boat_medium_skin_tone:',
':person_running:',
':person_running_dark_skin_tone:',
':person_running_light_skin_tone:',
':person_running_medium-dark_skin_tone:',
':person_running_medium-light_skin_tone:',
':person_running_medium_skin_tone:',
':person_shrugging:',
':person_shrugging_dark_skin_tone:',
':person_shrugging_light_skin_tone:',
':person_shrugging_medium-dark_skin_tone:',
':person_shrugging_medium-light_skin_tone:',
':person_shrugging_medium_skin_tone:',
':person_surfing:',
':person_surfing_dark_skin_tone:',
':person_surfing_light_skin_tone:',
':person_surfing_medium-dark_skin_tone:',
':person_surfing_medium-light_skin_tone:',
':person_surfing_medium_skin_tone:',
':person_swimming:',
':person_swimming_dark_skin_tone:',
':person_swimming_light_skin_tone:',
':person_swimming_medium-dark_skin_tone:',
':person_swimming_medium-light_skin_tone:',
':person_swimming_medium_skin_tone:',
':person_taking_bath:',
':person_taking_bath_dark_skin_tone:',
':person_taking_bath_light_skin_tone:',
':person_taking_bath_medium-dark_skin_tone:',
':person_taking_bath_medium-light_skin_tone:',
':person_taking_bath_medium_skin_tone:',
':person_tipping_hand:',
':person_tipping_hand_dark_skin_tone:',
':person_tipping_hand_light_skin_tone:',
':person_tipping_hand_medium-dark_skin_tone:',
':person_tipping_hand_medium-light_skin_tone:',
':person_tipping_hand_medium_skin_tone:',
':person_walking:',
':person_walking_dark_skin_tone:',
':person_walking_light_skin_tone:',
':person_walking_medium-dark_skin_tone:',
':person_walking_medium-light_skin_tone:',
':person_walking_medium_skin_tone:',
':person_wearing_turban:',
':person_wearing_turban_dark_skin_tone:',
':person_wearing_turban_light_skin_tone:',
':person_wearing_turban_medium-dark_skin_tone:',
':person_wearing_turban_medium-light_skin_tone:',
':person_wearing_turban_medium_skin_tone:',
':pick:',
':pig:',
':pig_face:',
':pig_nose:',
':pile_of_poo:',
':pill:',
':pine_decoration:',
':pineapple:',
':ping_pong:',
':pistol:',
':pizza:',
':place_of_worship:',
':play_button:',
':play_or_pause_button:',
':police_car:',
':police_car_light:',
':police_officer:',
':police_officer_dark_skin_tone:',
':police_officer_light_skin_tone:',
':police_officer_medium-dark_skin_tone:',
':police_officer_medium-light_skin_tone:',
':police_officer_medium_skin_tone:',
':poodle:',
':pool_8_ball:',
':popcorn:',
':post_office:',
':postal_horn:',
':postbox:',
':pot_of_food:',
':potable_water:',
':potato:',
':poultry_leg:',
':pound_banknote:',
':pouting_cat_face:',
':pouting_face:',
':prayer_beads:',
':pregnant_woman:',
':pregnant_woman_dark_skin_tone:',
':pregnant_woman_light_skin_tone:',
':pregnant_woman_medium-dark_skin_tone:',
':pregnant_woman_medium-light_skin_tone:',
':pregnant_woman_medium_skin_tone:',
':prince:',
':prince_dark_skin_tone:',
':prince_light_skin_tone:',
':prince_medium-dark_skin_tone:',
':prince_medium-light_skin_tone:',
':prince_medium_skin_tone:',
':princess:',
':princess_dark_skin_tone:',
':princess_light_skin_tone:',
':princess_medium-dark_skin_tone:',
':princess_medium-light_skin_tone:',
':princess_medium_skin_tone:',
':printer:',
':prohibited:',
':purple_heart:',
':purse:',
':pushpin:',
':question_mark:',
':rabbit:',
':rabbit_face:',
':racing_car:',
':radio:',
':radio_button:',
':radioactive:',
':railway_car:',
':railway_track:',
':rainbow:',
':rainbow_flag:',
':raised_back_of_hand:',
':raised_back_of_hand_dark_skin_tone:',
':raised_back_of_hand_light_skin_tone:',
':raised_back_of_hand_medium-dark_skin_tone:',
':raised_back_of_hand_medium-light_skin_tone:',
':raised_back_of_hand_medium_skin_tone:',
':raised_fist:',
':raised_fist_dark_skin_tone:',
':raised_fist_light_skin_tone:',
':raised_fist_medium-dark_skin_tone:',
':raised_fist_medium-light_skin_tone:',
':raised_fist_medium_skin_tone:',
':raised_hand:',
':raised_hand_dark_skin_tone:',
':raised_hand_light_skin_tone:',
':raised_hand_medium-dark_skin_tone:',
':raised_hand_medium-light_skin_tone:',
':raised_hand_medium_skin_tone:',
':raised_hand_with_fingers_splayed:',
':raised_hand_with_fingers_splayed_dark_skin_tone:',
':raised_hand_with_fingers_splayed_light_skin_tone:',
':raised_hand_with_fingers_splayed_medium-dark_skin_tone:',
':raised_hand_with_fingers_splayed_medium_skin_tone:',
':raising_hands:',
':raising_hands_dark_skin_tone:',
':raising_hands_light_skin_tone:',
':raising_hands_medium-dark_skin_tone:',
':raising_hands_medium-light_skin_tone:',
':raising_hands_medium_skin_tone:',
':ram:',
':rat:',
':record_button:',
':recycling_symbol:',
':red_apple:',
':red_circle:',
':red_heart:',
':red_paper_lantern:',
':red_triangle_pointed_down:',
':red_triangle_pointed_up:',
':registered:',
':relieved_face:',
':reminder_ribbon:',
':repeat_button:',
':repeat_single_button:',
':rescue_worker’s_helmet:',
':restroom:',
':reverse_button:',
':revolving_hearts:',
':rhinoceros:',
':ribbon:',
':rice_ball:',
':rice_cracker:',
':right-facing_fist:',
':right-facing_fist_dark_skin_tone:',
':right-facing_fist_light_skin_tone:',
':right-facing_fist_medium-dark_skin_tone:',
':right-facing_fist_medium-light_skin_tone:',
':right-facing_fist_medium_skin_tone:',
':right-pointing_magnifying_glass:',
':right_anger_bubble:',
':right_arrow:',
':right_arrow_curving_down:',
':right_arrow_curving_left:',
':right_arrow_curving_up:',
':ring:',
':roasted_sweet_potato:',
':robot_face:',
':rocket:',
':rolled-up_newspaper:',
':roller_coaster:',
':rolling_on_the_floor_laughing:',
':rooster:',
':rose:',
':rosette:',
':round_pushpin:',
':rugby_football:',
':running_shirt:',
':running_shoe:',
':sailboat:',
':sake:',
':satellite:',
':satellite_antenna:',
':saxophone:',
':school:',
':school_backpack:',
':scissors:',
':scorpion:',
':scroll:',
':seat:',
':see-no-evil_monkey:',
':seedling:',
':selfie:',
':selfie_dark_skin_tone:',
':selfie_light_skin_tone:',
':selfie_medium-dark_skin_tone:',
':selfie_medium-light_skin_tone:',
':selfie_medium_skin_tone:',
':seven-thirty:',
':seven_o’clock:',
':shallow_pan_of_food:',
':shamrock:',
':shark:',
':shaved_ice:',
':sheaf_of_rice:',
':sheep:',
':shield:',
':shinto_shrine:',
':ship:',
':shooting_star:',
':shopping_bags:',
':shopping_cart:',
':shortcake:',
':shower:',
':shrimp:',
':shuffle_tracks_button:',
':sign_of_the_horns:',
':sign_of_the_horns_dark_skin_tone:',
':sign_of_the_horns_light_skin_tone:',
':sign_of_the_horns_medium-dark_skin_tone:',
':sign_of_the_horns_medium-light_skin_tone:',
':sign_of_the_horns_medium_skin_tone:',
':six-thirty:',
':six_o’clock:',
':skier:',
':skis:',
':skull:',
':skull_and_crossbones:',
':sleeping_face:',
':sleepy_face:',
':slightly_frowning_face:',
':slightly_smiling_face:',
':slot_machine:',
':small_airplane:',
':small_blue_diamond:',
':small_orange_diamond:',
':smiling_cat_face_with_heart-eyes:',
':smiling_cat_face_with_open_mouth:',
':smiling_face:',
':smiling_face_with_halo:',
':smiling_face_with_heart-eyes:',
':smiling_face_with_horns:',
':smiling_face_with_open_mouth:',
':smiling_face_with_open_mouth_&_closed_eyes:',
':smiling_face_with_open_mouth_&_cold_sweat:',
':smiling_face_with_open_mouth_&_smiling_eyes:',
':smiling_face_with_smiling_eyes:',
':smiling_face_with_sunglasses:',
':smirking_face:',
':snail:',
':snake:',
':sneezing_face:',
':snow-capped_mountain:',
':snowboarder:',
':snowboarder_dark_skin_tone:',
':snowboarder_light_skin_tone:',
':snowboarder_medium-dark_skin_tone:',
':snowboarder_medium-light_skin_tone:',
':snowboarder_medium_skin_tone:',
':snowflake:',
':snowman:',
':snowman_without_snow:',
':soccer_ball:',
':soft_ice_cream:',
':spade_suit:',
':spaghetti:',
':sparkle:',
':sparkler:',
':sparkles:',
':sparkling_heart:',
':speak-no-evil_monkey:',
':speaker_high_volume:',
':speaker_low_volume:',
':speaker_medium_volume:',
':speaking_head:',
':speech_balloon:',
':speedboat:',
':spider:',
':spider_web:',
':spiral_calendar:',
':spiral_notepad:',
':spiral_shell:',
':spoon:',
':sport_utility_vehicle:',
':sports_medal:',
':spouting_whale:',
':squid:',
':stadium:',
':star_and_crescent:',
':star_of_David:',
':station:',
':steaming_bowl:',
':stop_button:',
':stop_sign:',
':stopwatch:',
':straight_ruler:',
':strawberry:',
':studio_microphone:',
':stuffed_flatbread:',
':sun:',
':sun_behind_cloud:',
':sun_behind_large_cloud:',
':sun_behind_rain_cloud:',
':sun_behind_small_cloud:',
':sun_with_face:',
':sunflower:',
':sunglasses:',
':sunrise:',
':sunrise_over_mountains:',
':sunset:',
':sushi:',
':suspension_railway:',
':sweat_droplets:',
':synagogue:',
':syringe:',
':t-shirt:',
':taco:',
':tanabata_tree:',
':tangerine:',
':taxi:',
':teacup_without_handle:',
':tear-off_calendar:',
':telephone:',
':telephone_receiver:',
':telescope:',
':television:',
':ten-thirty:',
':ten_o’clock:',
':tennis:',
':tent:',
':thermometer:',
':thinking_face:',
':thought_balloon:',
':three-thirty:',
':three_o’clock:',
':thumbs_down:',
':thumbs_down_dark_skin_tone:',
':thumbs_down_light_skin_tone:',
':thumbs_down_medium-dark_skin_tone:',
':thumbs_down_medium-light_skin_tone:',
':thumbs_down_medium_skin_tone:',
':thumbs_up:',
':thumbs_up_dark_skin_tone:',
':thumbs_up_light_skin_tone:',
':thumbs_up_medium-dark_skin_tone:',
':thumbs_up_medium-light_skin_tone:',
':thumbs_up_medium_skin_tone:',
':ticket:',
':tiger:',
':tiger_face:',
':timer_clock:',
':tired_face:',
':toilet:',
':tomato:',
':tongue:',
':top_hat:',
':tornado:',
':trackball:',
':tractor:',
':trade_mark:',
':train:',
':tram:',
':tram_car:',
':triangular_flag:',
':triangular_ruler:',
':trident_emblem:',
':trolleybus:',
':trophy:',
':tropical_drink:',
':tropical_fish:',
':trumpet:',
':tulip:',
':tumbler_glass:',
':turkey:',
':turtle:',
':twelve-thirty:',
':twelve_o’clock:',
':two-hump_camel:',
':two-thirty:',
':two_hearts:',
':two_men_holding_hands:',
':two_o’clock:',
':two_women_holding_hands:',
':umbrella:',
':umbrella_on_ground:',
':umbrella_with_rain_drops:',
':unamused_face:',
':unicorn_face:',
':unlocked:',
':up-down_arrow:',
':up-left_arrow:',
':up-right_arrow:',
':up_arrow:',
':up_button:',
':upside-down_face:',
':vertical_traffic_light:',
':vibration_mode:',
':victory_hand:',
':victory_hand_dark_skin_tone:',
':victory_hand_light_skin_tone:',
':victory_hand_medium-dark_skin_tone:',
':victory_hand_medium-light_skin_tone:',
':victory_hand_medium_skin_tone:',
':video_camera:',
':video_game:',
':videocassette:',
':violin:',
':volcano:',
':volleyball:',
':vulcan_salute:',
':vulcan_salute_dark_skin_tone:',
':vulcan_salute_light_skin_tone:',
':vulcan_salute_medium-dark_skin_tone:',
':vulcan_salute_medium-light_skin_tone:',
':vulcan_salute_medium_skin_tone:',
':waning_crescent_moon:',
':waning_gibbous_moon:',
':warning:',
':wastebasket:',
':watch:',
':water_buffalo:',
':water_closet:',
':water_wave:',
':watermelon:',
':waving_hand:',
':waving_hand_dark_skin_tone:',
':waving_hand_light_skin_tone:',
':waving_hand_medium-dark_skin_tone:',
':waving_hand_medium-light_skin_tone:',
':waving_hand_medium_skin_tone:',
':wavy_dash:',
':waxing_crescent_moon:',
':waxing_gibbous_moon:',
':weary_cat_face:',
':weary_face:',
':wedding:',
':whale:',
':wheel_of_dharma:',
':wheelchair_symbol:',
':white_circle:',
':white_exclamation_mark:',
':white_flag:',
':white_flower:',
':white_heavy_check_mark:',
':white_large_square:',
':white_medium-small_square:',
':white_medium_square:',
':white_medium_star:',
':white_question_mark:',
':white_small_square:',
':white_square_button:',
':wilted_flower:',
':wind_chime:',
':wind_face:',
':wine_glass:',
':winking_face:',
':wolf_face:',
':woman:',
':woman_artist:',
':woman_artist_dark_skin_tone:',
':woman_artist_light_skin_tone:',
':woman_artist_medium-dark_skin_tone:',
':woman_artist_medium-light_skin_tone:',
':woman_artist_medium_skin_tone:',
':woman_astronaut:',
':woman_astronaut_dark_skin_tone:',
':woman_astronaut_light_skin_tone:',
':woman_astronaut_medium-dark_skin_tone:',
':woman_astronaut_medium-light_skin_tone:',
':woman_astronaut_medium_skin_tone:',
':woman_biking:',
':woman_biking_dark_skin_tone:',
':woman_biking_light_skin_tone:',
':woman_biking_medium-dark_skin_tone:',
':woman_biking_medium-light_skin_tone:',
':woman_biking_medium_skin_tone:',
':woman_bouncing_ball:',
':woman_bouncing_ball_dark_skin_tone:',
':woman_bouncing_ball_light_skin_tone:',
':woman_bouncing_ball_medium-dark_skin_tone:',
':woman_bouncing_ball_medium-light_skin_tone:',
':woman_bouncing_ball_medium_skin_tone:',
':woman_bowing:',
':woman_bowing_dark_skin_tone:',
':woman_bowing_light_skin_tone:',
':woman_bowing_medium-dark_skin_tone:',
':woman_bowing_medium-light_skin_tone:',
':woman_bowing_medium_skin_tone:',
':woman_cartwheeling:',
':woman_cartwheeling_dark_skin_tone:',
':woman_cartwheeling_light_skin_tone:',
':woman_cartwheeling_medium-dark_skin_tone:',
':woman_cartwheeling_medium-light_skin_tone:',
':woman_cartwheeling_medium_skin_tone:',
':woman_construction_worker:',
':woman_construction_worker_dark_skin_tone:',
':woman_construction_worker_light_skin_tone:',
':woman_construction_worker_medium-dark_skin_tone:',
':woman_construction_worker_medium-light_skin_tone:',
':woman_construction_worker_medium_skin_tone:',
':woman_cook:',
':woman_cook_dark_skin_tone:',
':woman_cook_light_skin_tone:',
':woman_cook_medium-dark_skin_tone:',
':woman_cook_medium-light_skin_tone:',
':woman_cook_medium_skin_tone:',
':woman_dancing:',
':woman_dancing_dark_skin_tone:',
':woman_dancing_light_skin_tone:',
':woman_dancing_medium-dark_skin_tone:',
':woman_dancing_medium-light_skin_tone:',
':woman_dancing_medium_skin_tone:',
':woman_dark_skin_tone:',
':woman_detective:',
':woman_detective_dark_skin_tone:',
':woman_detective_light_skin_tone:',
':woman_detective_medium-dark_skin_tone:',
':woman_detective_medium-light_skin_tone:',
':woman_detective_medium_skin_tone:',
':woman_facepalming:',
':woman_facepalming_dark_skin_tone:',
':woman_facepalming_light_skin_tone:',
':woman_facepalming_medium-dark_skin_tone:',
':woman_facepalming_medium-light_skin_tone:',
':woman_facepalming_medium_skin_tone:',
':woman_factory_worker:',
':woman_factory_worker_dark_skin_tone:',
':woman_factory_worker_light_skin_tone:',
':woman_factory_worker_medium-dark_skin_tone:',
':woman_factory_worker_medium-light_skin_tone:',
':woman_factory_worker_medium_skin_tone:',
':woman_farmer:',
':woman_farmer_dark_skin_tone:',
':woman_farmer_light_skin_tone:',
':woman_farmer_medium-dark_skin_tone:',
':woman_farmer_medium-light_skin_tone:',
':woman_farmer_medium_skin_tone:',
':woman_firefighter:',
':woman_firefighter_dark_skin_tone:',
':woman_firefighter_light_skin_tone:',
':woman_firefighter_medium-dark_skin_tone:',
':woman_firefighter_medium-light_skin_tone:',
':woman_firefighter_medium_skin_tone:',
':woman_frowning:',
':woman_frowning_dark_skin_tone:',
':woman_frowning_light_skin_tone:',
':woman_frowning_medium-dark_skin_tone:',
':woman_frowning_medium-light_skin_tone:',
':woman_frowning_medium_skin_tone:',
':woman_gesturing_NO:',
':woman_gesturing_NO_dark_skin_tone:',
':woman_gesturing_NO_light_skin_tone:',
':woman_gesturing_NO_medium-dark_skin_tone:',
':woman_gesturing_NO_medium-light_skin_tone:',
':woman_gesturing_NO_medium_skin_tone:',
':woman_gesturing_OK:',
':woman_gesturing_OK_dark_skin_tone:',
':woman_gesturing_OK_light_skin_tone:',
':woman_gesturing_OK_medium-dark_skin_tone:',
':woman_gesturing_OK_medium-light_skin_tone:',
':woman_gesturing_OK_medium_skin_tone:',
':woman_getting_haircut:',
':woman_getting_haircut_dark_skin_tone:',
':woman_getting_haircut_light_skin_tone:',
':woman_getting_haircut_medium-dark_skin_tone:',
':woman_getting_haircut_medium-light_skin_tone:',
':woman_getting_haircut_medium_skin_tone:',
':woman_getting_massage:',
':woman_getting_massage_dark_skin_tone:',
':woman_getting_massage_light_skin_tone:',
':woman_getting_massage_medium-dark_skin_tone:',
':woman_getting_massage_medium-light_skin_tone:',
':woman_getting_massage_medium_skin_tone:',
':woman_golfing:',
':woman_golfing_dark_skin_tone:',
':woman_golfing_light_skin_tone:',
':woman_golfing_medium-dark_skin_tone:',
':woman_golfing_medium-light_skin_tone:',
':woman_golfing_medium_skin_tone:',
':woman_guard:',
':woman_guard_dark_skin_tone:',
':woman_guard_light_skin_tone:',
':woman_guard_medium-dark_skin_tone:',
':woman_guard_medium-light_skin_tone:',
':woman_guard_medium_skin_tone:',
':woman_health_worker:',
':woman_health_worker_dark_skin_tone:',
':woman_health_worker_light_skin_tone:',
':woman_health_worker_medium-dark_skin_tone:',
':woman_health_worker_medium-light_skin_tone:',
':woman_health_worker_medium_skin_tone:',
':woman_judge:',
':woman_judge_dark_skin_tone:',
':woman_judge_light_skin_tone:',
':woman_judge_medium-dark_skin_tone:',
':woman_judge_medium-light_skin_tone:',
':woman_judge_medium_skin_tone:',
':woman_juggling:',
':woman_juggling_dark_skin_tone:',
':woman_juggling_light_skin_tone:',
':woman_juggling_medium-dark_skin_tone:',
':woman_juggling_medium-light_skin_tone:',
':woman_juggling_medium_skin_tone:',
':woman_lifting_weights:',
':woman_lifting_weights_dark_skin_tone:',
':woman_lifting_weights_light_skin_tone:',
':woman_lifting_weights_medium-dark_skin_tone:',
':woman_lifting_weights_medium-light_skin_tone:',
':woman_lifting_weights_medium_skin_tone:',
':woman_light_skin_tone:',
':woman_mechanic:',
':woman_mechanic_dark_skin_tone:',
':woman_mechanic_light_skin_tone:',
':woman_mechanic_medium-dark_skin_tone:',
':woman_mechanic_medium-light_skin_tone:',
':woman_mechanic_medium_skin_tone:',
':woman_medium-dark_skin_tone:',
':woman_medium-light_skin_tone:',
':woman_medium_skin_tone:',
':woman_mountain_biking:',
':woman_mountain_biking_dark_skin_tone:',
':woman_mountain_biking_light_skin_tone:',
':woman_mountain_biking_medium-dark_skin_tone:',
':woman_mountain_biking_medium-light_skin_tone:',
':woman_mountain_biking_medium_skin_tone:',
':woman_office_worker:',
':woman_office_worker_dark_skin_tone:',
':woman_office_worker_light_skin_tone:',
':woman_office_worker_medium-dark_skin_tone:',
':woman_office_worker_medium-light_skin_tone:',
':woman_office_worker_medium_skin_tone:',
':woman_pilot:',
':woman_pilot_dark_skin_tone:',
':woman_pilot_light_skin_tone:',
':woman_pilot_medium-dark_skin_tone:',
':woman_pilot_medium-light_skin_tone:',
':woman_pilot_medium_skin_tone:',
':woman_playing_handball:',
':woman_playing_handball_dark_skin_tone:',
':woman_playing_handball_light_skin_tone:',
':woman_playing_handball_medium-dark_skin_tone:',
':woman_playing_handball_medium-light_skin_tone:',
':woman_playing_handball_medium_skin_tone:',
':woman_playing_water_polo:',
':woman_playing_water_polo_dark_skin_tone:',
':woman_playing_water_polo_light_skin_tone:',
':woman_playing_water_polo_medium-dark_skin_tone:',
':woman_playing_water_polo_medium-light_skin_tone:',
':woman_playing_water_polo_medium_skin_tone:',
':woman_police_officer:',
':woman_police_officer_dark_skin_tone:',
':woman_police_officer_light_skin_tone:',
':woman_police_officer_medium-dark_skin_tone:',
':woman_police_officer_medium-light_skin_tone:',
':woman_police_officer_medium_skin_tone:',
':woman_pouting:',
':woman_pouting_dark_skin_tone:',
':woman_pouting_light_skin_tone:',
':woman_pouting_medium-dark_skin_tone:',
':woman_pouting_medium-light_skin_tone:',
':woman_pouting_medium_skin_tone:',
':woman_raising_hand:',
':woman_raising_hand_dark_skin_tone:',
':woman_raising_hand_light_skin_tone:',
':woman_raising_hand_medium-dark_skin_tone:',
':woman_raising_hand_medium-light_skin_tone:',
':woman_raising_hand_medium_skin_tone:',
':woman_rowing_boat:',
':woman_rowing_boat_dark_skin_tone:',
':woman_rowing_boat_light_skin_tone:',
':woman_rowing_boat_medium-dark_skin_tone:',
':woman_rowing_boat_medium-light_skin_tone:',
':woman_rowing_boat_medium_skin_tone:',
':woman_running:',
':woman_running_dark_skin_tone:',
':woman_running_light_skin_tone:',
':woman_running_medium-dark_skin_tone:',
':woman_running_medium-light_skin_tone:',
':woman_running_medium_skin_tone:',
':woman_scientist:',
':woman_scientist_dark_skin_tone:',
':woman_scientist_light_skin_tone:',
':woman_scientist_medium-dark_skin_tone:',
':woman_scientist_medium-light_skin_tone:',
':woman_scientist_medium_skin_tone:',
':woman_shrugging:',
':woman_shrugging_dark_skin_tone:',
':woman_shrugging_light_skin_tone:',
':woman_shrugging_medium-dark_skin_tone:',
':woman_shrugging_medium-light_skin_tone:',
':woman_shrugging_medium_skin_tone:',
':woman_singer:',
':woman_singer_dark_skin_tone:',
':woman_singer_light_skin_tone:',
':woman_singer_medium-dark_skin_tone:',
':woman_singer_medium-light_skin_tone:',
':woman_singer_medium_skin_tone:',
':woman_student:',
':woman_student_dark_skin_tone:',
':woman_student_light_skin_tone:',
':woman_student_medium-dark_skin_tone:',
':woman_student_medium-light_skin_tone:',
':woman_student_medium_skin_tone:',
':woman_surfing:',
':woman_surfing_dark_skin_tone:',
':woman_surfing_light_skin_tone:',
':woman_surfing_medium-dark_skin_tone:',
':woman_surfing_medium-light_skin_tone:',
':woman_surfing_medium_skin_tone:',
':woman_swimming:',
':woman_swimming_dark_skin_tone:',
':woman_swimming_light_skin_tone:',
':woman_swimming_medium-dark_skin_tone:',
':woman_swimming_medium-light_skin_tone:',
':woman_swimming_medium_skin_tone:',
':woman_teacher:',
':woman_teacher_dark_skin_tone:',
':woman_teacher_light_skin_tone:',
':woman_teacher_medium-dark_skin_tone:',
':woman_teacher_medium-light_skin_tone:',
':woman_teacher_medium_skin_tone:',
':woman_technologist:',
':woman_technologist_dark_skin_tone:',
':woman_technologist_light_skin_tone:',
':woman_technologist_medium-dark_skin_tone:',
':woman_technologist_medium-light_skin_tone:',
':woman_technologist_medium_skin_tone:',
':woman_tipping_hand:',
':woman_tipping_hand_dark_skin_tone:',
':woman_tipping_hand_light_skin_tone:',
':woman_tipping_hand_medium-dark_skin_tone:',
':woman_tipping_hand_medium-light_skin_tone:',
':woman_tipping_hand_medium_skin_tone:',
':woman_walking:',
':woman_walking_dark_skin_tone:',
':woman_walking_light_skin_tone:',
':woman_walking_medium-dark_skin_tone:',
':woman_walking_medium-light_skin_tone:',
':woman_walking_medium_skin_tone:',
':woman_wearing_turban:',
':woman_wearing_turban_dark_skin_tone:',
':woman_wearing_turban_light_skin_tone:',
':woman_wearing_turban_medium-dark_skin_tone:',
':woman_wearing_turban_medium-light_skin_tone:',
':woman_wearing_turban_medium_skin_tone:',
':woman’s_boot:',
':woman’s_clothes:',
':woman’s_hat:',
':woman’s_sandal:',
':women_with_bunny_ears_partying:',
':women_wrestling:',
':women’s_room:',
':world_map:',
':worried_face:',
':wrapped_gift:',
':wrench:',
':writing_hand:',
':writing_hand_dark_skin_tone:',
':writing_hand_light_skin_tone:',
':writing_hand_medium-dark_skin_tone:',
':writing_hand_medium-light_skin_tone:',
':writing_hand_medium_skin_tone:',
':yellow_heart:',
':yen_banknote:',
':yin_yang:',
':zipper-mouth_face:',
':zzz:',
':admission_tickets:',
':aerial_tramway:',
':airplane:',
':airplane_arriving:',
':airplane_departure:',
':alarm_clock:',
':alembic:',
':space_invader:',
':ambulance:',
':football:',
':amphora:',
':anchor:',
':anger:',
':angry:',
':anguished:',
':ant:',
':signal_strength:',
':arrows_counterclockwise:',
':aquarius:',
':aries:',
':arrow_heading_down:',
':arrow_heading_up:',
':articulated_lorry:',
':art:',
':astonished:',
':athletic_shoe:',
':atom_symbol:',
':eggplant:',
':atm:',
':car:',
':red_car:',
':baby:',
':angel:',
':baby_bottle:',
':baby_chick:',
':baby_symbol:',
':back:',
':camel:',
':badminton_racquet_and_shuttlecock:',
':baggage_claim:',
':balloon:',
':ballot_box_with_ballot:',
':ballot_box_with_check:',
':banana:',
':bank:',
':dollar:',
':euro:',
':pound:',
':yen:',
':bar_chart:',
':barber:',
':baseball:',
':basketball:',
':bath:',
':bathtub:',
':battery:',
':beach_with_umbrella:',
':bear:',
':heartbeat:',
':bed:',
':beer:',
':bell:',
':no_bell:',
':bellhop_bell:',
':bento:',
':bike:',
':bicyclist:',
':bikini:',
':8ball:',
':biohazard_sign:',
':bird:',
':birthday:',
':black_circle_for_record:',
':clubs:',
':diamonds:',
':arrow_double_down:',
':hearts:',
':black_large_square:',
':rewind:',
':black_left__pointing_double_triangle_with_vertical_bar:',
':arrow_backward:',
':black_medium_small_square:',
':black_medium_square:',
':black_nib:',
':question:',
':fast_forward:',
':black_right__pointing_double_triangle_with_vertical_bar:',
':arrow_forward:',
':black_right__pointing_triangle_with_double_vertical_bar:',
':arrow_right:',
':scissors:',
':black_small_square:',
':spades:',
':black_square_button:',
':black_square_for_stop:',
':sunny:',
':phone:',
':telephone:',
':recycle:',
':arrow_double_up:',
':blossom:',
':blowfish:',
':blue_book:',
':blue_heart:',
':boar:',
':bomb:',
':bookmark:',
':bookmark_tabs:',
':books:',
':bottle_with_popping_cork:',
':bouquet:',
':bow_and_arrow:',
':bowling:',
':boy:',
':bread:',
':bride_with_veil:',
':bridge_at_night:',
':briefcase:',
':broken_heart:',
':bug:',
':building_construction:',
':burrito:',
':bus:',
':busstop:',
':bust_in_silhouette:',
':busts_in_silhouette:',
':cactus:',
':date:',
':camera:',
':camera_with_flash:',
':camping:',
':cancer:',
':candle:',
':candy:',
':capricorn:',
':card_file_box:',
':card_index:',
':card_index_dividers:',
':carousel_horse:',
':flags:',
':cat2:',
':cat:',
':joy_cat:',
':smirk_cat:',
':chains:',
':chart_with_downwards_trend:',
':chart_with_upwards_trend:',
':chart:',
':mega:',
':cheese_wedge:',
':checkered_flag:',
':cherries:',
':cherry_blossom:',
':chestnut:',
':chicken:',
':children_crossing:',
':chipmunk:',
':chocolate_bar:',
':christmas_tree:',
':church:',
':cinema:',
':accept:',
':ideograph_advantage:',
':congratulations:',
':secret:',
':m:',
':circus_tent:',
':cityscape:',
':city_sunset:',
':clapper:',
':clap:',
':classical_building:',
':beers:',
':clipboard:',
':clock830:',
':clock8:',
':clock1130:',
':clock11:',
':clock530:',
':clock5:',
':clock430:',
':clock4:',
':clock930:',
':clock9:',
':clock130:',
':clock1:',
':clock730:',
':clock7:',
':clock630:',
':clock6:',
':clock1030:',
':clock10:',
':clock330:',
':clock3:',
':clock1230:',
':clock12:',
':clock230:',
':clock2:',
':arrows_clockwise:',
':repeat:',
':repeat_one:',
':closed_book:',
':closed_lock_with_key:',
':mailbox_closed:',
':mailbox:',
':closed_umbrella:',
':cloud:',
':cloud_with_lightning:',
':cloud_with_rain:',
':cloud_with_snow:',
':cloud_with_tornado:',
':cocktail:',
':coffin:',
':boom:',
':collision:',
':comet:',
':compression:',
':confetti_ball:',
':confounded:',
':confused:',
':construction:',
':construction_worker:',
':control_knobs:',
':convenience_store:',
':rice:',
':cookie:',
':egg:',
':copyright:',
':couch_and_lamp:',
':couple_with_heart:',
':cow2:',
':cow:',
':crab:',
':credit_card:',
':crescent_moon:',
':cricket_bat_and_ball:',
':crocodile:',
':x:',
':crossed_flags:',
':crossed_swords:',
':crown:',
':crying_cat_face:',
':cry:',
':crystal_ball:',
':curly_loop:',
':currency_exchange:',
':curry:',
':custard:',
':customs:',
':cyclone:',
':dagger_knife:',
':dancer:',
':dango:',
':dark_sunglasses:',
':dash:',
':deciduous_tree:',
':truck:',
':department_store:',
':derelict_house_building:',
':desert:',
':desert_island:',
':desktop_computer:',
':diamond_shape_with_a_dot_inside:',
':dart:',
':disappointed_relieved:',
':disappointed:',
':dizzy_face:',
':dizzy:',
':do_not_litter:',
':dog2:',
':dog:',
':dolphin:',
':flipper:',
':door:',
':loop:',
':bangbang:',
':double_vertical_bar:',
':doughnut:',
':dove_of_peace:',
':small_red_triangle_down:',
':arrow_down_small:',
':arrow_down:',
':dragon:',
':dragon_face:',
':dress:',
':dromedary_camel:',
':droplet:',
':dvd:',
':e__mail:',
':ear:',
':corn:',
':ear_of_rice:',
':earth_americas:',
':earth_asia:',
':earth_africa:',
':eight_pointed_black_star:',
':eight_spoked_asterisk:',
':eject_symbol:',
':bulb:',
':electric_plug:',
':flashlight:',
':elephant:',
':emoji_modifier_fitzpatrick_type__1__2:',
':emoji_modifier_fitzpatrick_type__3:',
':emoji_modifier_fitzpatrick_type__4:',
':emoji_modifier_fitzpatrick_type__5:',
':emoji_modifier_fitzpatrick_type__6:',
':end:',
':email:',
':envelope:',
':envelope_with_arrow:',
':european_castle:',
':european_post_office:',
':evergreen_tree:',
':interrobang:',
':expressionless:',
':alien:',
':eye:',
':eyeglasses:',
':eyes:',
':massage:',
':yum:',
':scream:',
':kissing_heart:',
':sweat:',
':face_with_head__bandage:',
':triumph:',
':mask:',
':no_good:',
':ok_woman:',
':open_mouth:',
':cold_sweat:',
':face_with_rolling_eyes:',
':stuck_out_tongue:',
':stuck_out_tongue_closed_eyes:',
':stuck_out_tongue_winking_eye:',
':joy:',
':face_with_thermometer:',
':no_mouth:',
':factory:',
':fallen_leaf:',
':family:',
':santa:',
':fax:',
':fearful:',
':ferris_wheel:',
':ferry:',
':field_hockey_stick_and_ball:',
':file_cabinet:',
':file_folder:',
':film_frames:',
':film_projector:',
':fire:',
':fire_engine:',
':sparkler:',
':fireworks:',
':first_quarter_moon:',
':first_quarter_moon_with_face:',
':fish:',
':fish_cake:',
':fishing_pole_and_fish:',
':facepunch:',
':punch:',
':flag_for_Afghanistan:',
':flag_for_Albania:',
':flag_for_Algeria:',
':flag_for_American_Samoa:',
':flag_for_Andorra:',
':flag_for_Angola:',
':flag_for_Anguilla:',
':flag_for_Antarctica:',
':flag_for_Antigua_&_Barbuda:',
':flag_for_Argentina:',
':flag_for_Armenia:',
':flag_for_Aruba:',
':flag_for_Ascension_Island:',
':flag_for_Australia:',
':flag_for_Austria:',
':flag_for_Azerbaijan:',
':flag_for_Bahamas:',
':flag_for_Bahrain:',
':flag_for_Bangladesh:',
':flag_for_Barbados:',
':flag_for_Belarus:',
':flag_for_Belgium:',
':flag_for_Belize:',
':flag_for_Benin:',
':flag_for_Bermuda:',
':flag_for_Bhutan:',
':flag_for_Bolivia:',
':flag_for_Bosnia_&_Herzegovina:',
':flag_for_Botswana:',
':flag_for_Bouvet_Island:',
':flag_for_Brazil:',
':flag_for_British_Indian_Ocean_Territory:',
':flag_for_British_Virgin_Islands:',
':flag_for_Brunei:',
':flag_for_Bulgaria:',
':flag_for_Burkina_Faso:',
':flag_for_Burundi:',
':flag_for_Cambodia:',
':flag_for_Cameroon:',
':flag_for_Canada:',
':flag_for_Canary_Islands:',
':flag_for_Cape_Verde:',
':flag_for_Caribbean_Netherlands:',
':flag_for_Cayman_Islands:',
':flag_for_Central_African_Republic:',
':flag_for_Ceuta_&_Melilla:',
':flag_for_Chad:',
':flag_for_Chile:',
':flag_for_China:',
':flag_for_Christmas_Island:',
':flag_for_Clipperton_Island:',
':flag_for_Cocos__Islands:',
':flag_for_Colombia:',
':flag_for_Comoros:',
':flag_for_Congo____Brazzaville:',
':flag_for_Congo____Kinshasa:',
':flag_for_Cook_Islands:',
':flag_for_Costa_Rica:',
':flag_for_Croatia:',
':flag_for_Cuba:',
':flag_for_Curaçao:',
':flag_for_Cyprus:',
':flag_for_Czech_Republic:',
':flag_for_Côte_d’Ivoire:',
':flag_for_Denmark:',
':flag_for_Diego_Garcia:',
':flag_for_Djibouti:',
':flag_for_Dominica:',
':flag_for_Dominican_Republic:',
':flag_for_Ecuador:',
':flag_for_Egypt:',
':flag_for_El_Salvador:',
':flag_for_Equatorial_Guinea:',
':flag_for_Eritrea:',
':flag_for_Estonia:',
':flag_for_Ethiopia:',
':flag_for_European_Union:',
':flag_for_Falkland_Islands:',
':flag_for_Faroe_Islands:',
':flag_for_Fiji:',
':flag_for_Finland:',
':flag_for_France:',
':flag_for_French_Guiana:',
':flag_for_French_Polynesia:',
':flag_for_French_Southern_Territories:',
':flag_for_Gabon:',
':flag_for_Gambia:',
':flag_for_Georgia:',
':flag_for_Germany:',
':flag_for_Ghana:',
':flag_for_Gibraltar:',
':flag_for_Greece:',
':flag_for_Greenland:',
':flag_for_Grenada:',
':flag_for_Guadeloupe:',
':flag_for_Guam:',
':flag_for_Guatemala:',
':flag_for_Guernsey:',
':flag_for_Guinea:',
':flag_for_Guinea__Bissau:',
':flag_for_Guyana:',
':flag_for_Haiti:',
':flag_for_Heard_&_McDonald_Islands:',
':flag_for_Honduras:',
':flag_for_Hong_Kong:',
':flag_for_Hungary:',
':flag_for_Iceland:',
':flag_for_India:',
':flag_for_Indonesia:',
':flag_for_Iran:',
':flag_for_Iraq:',
':flag_for_Ireland:',
':flag_for_Isle_of_Man:',
':flag_for_Israel:',
':flag_for_Italy:',
':flag_for_Jamaica:',
':flag_for_Japan:',
':flag_for_Jersey:',
':flag_for_Jordan:',
':flag_for_Kazakhstan:',
':flag_for_Kenya:',
':flag_for_Kiribati:',
':flag_for_Kosovo:',
':flag_for_Kuwait:',
':flag_for_Kyrgyzstan:',
':flag_for_Laos:',
':flag_for_Latvia:',
':flag_for_Lebanon:',
':flag_for_Lesotho:',
':flag_for_Liberia:',
':flag_for_Libya:',
':flag_for_Liechtenstein:',
':flag_for_Lithuania:',
':flag_for_Luxembourg:',
':flag_for_Macau:',
':flag_for_Macedonia:',
':flag_for_Madagascar:',
':flag_for_Malawi:',
':flag_for_Malaysia:',
':flag_for_Maldives:',
':flag_for_Mali:',
':flag_for_Malta:',
':flag_for_Marshall_Islands:',
':flag_for_Martinique:',
':flag_for_Mauritania:',
':flag_for_Mauritius:',
':flag_for_Mayotte:',
':flag_for_Mexico:',
':flag_for_Micronesia:',
':flag_for_Moldova:',
':flag_for_Monaco:',
':flag_for_Mongolia:',
':flag_for_Montenegro:',
':flag_for_Montserrat:',
':flag_for_Morocco:',
':flag_for_Mozambique:',
':flag_for_Myanmar:',
':flag_for_Namibia:',
':flag_for_Nauru:',
':flag_for_Nepal:',
':flag_for_Netherlands:',
':flag_for_New_Caledonia:',
':flag_for_New_Zealand:',
':flag_for_Nicaragua:',
':flag_for_Niger:',
':flag_for_Nigeria:',
':flag_for_Niue:',
':flag_for_Norfolk_Island:',
':flag_for_North_Korea:',
':flag_for_Northern_Mariana_Islands:',
':flag_for_Norway:',
':flag_for_Oman:',
':flag_for_Pakistan:',
':flag_for_Palau:',
':flag_for_Palestinian_Territories:',
':flag_for_Panama:',
':flag_for_Papua_New_Guinea:',
':flag_for_Paraguay:',
':flag_for_Peru:',
':flag_for_Philippines:',
':flag_for_Pitcairn_Islands:',
':flag_for_Poland:',
':flag_for_Portugal:',
':flag_for_Puerto_Rico:',
':flag_for_Qatar:',
':flag_for_Romania:',
':flag_for_Russia:',
':flag_for_Rwanda:',
':flag_for_Réunion:',
':flag_for_Samoa:',
':flag_for_San_Marino:',
':flag_for_Saudi_Arabia:',
':flag_for_Senegal:',
':flag_for_Serbia:',
':flag_for_Seychelles:',
':flag_for_Sierra_Leone:',
':flag_for_Singapore:',
':flag_for_Sint_Maarten:',
':flag_for_Slovakia:',
':flag_for_Slovenia:',
':flag_for_Solomon_Islands:',
':flag_for_Somalia:',
':flag_for_South_Africa:',
':flag_for_South_Georgia_&_South_Sandwich_Islands:',
':flag_for_South_Korea:',
':flag_for_South_Sudan:',
':flag_for_Spain:',
':flag_for_Sri_Lanka:',
':flag_for_St._Barthélemy:',
':flag_for_St._Helena:',
':flag_for_St._Kitts_&_Nevis:',
':flag_for_St._Lucia:',
':flag_for_St._Martin:',
':flag_for_St._Pierre_&_Miquelon:',
':flag_for_St._Vincent_&_Grenadines:',
':flag_for_Sudan:',
':flag_for_Suriname:',
':flag_for_Svalbard_&_Jan_Mayen:',
':flag_for_Swaziland:',
':flag_for_Sweden:',
':flag_for_Switzerland:',
':flag_for_Syria:',
':flag_for_São_Tomé_&_Príncipe:',
':flag_for_Taiwan:',
':flag_for_Tajikistan:',
':flag_for_Tanzania:',
':flag_for_Thailand:',
':flag_for_Timor__Leste:',
':flag_for_Togo:',
':flag_for_Tokelau:',
':flag_for_Tonga:',
':flag_for_Trinidad_&_Tobago:',
':flag_for_Tristan_da_Cunha:',
':flag_for_Tunisia:',
':flag_for_Turkey:',
':flag_for_Turkmenistan:',
':flag_for_Turks_&_Caicos_Islands:',
':flag_for_Tuvalu:',
':flag_for_U.S._Outlying_Islands:',
':flag_for_U.S._Virgin_Islands:',
':flag_for_Uganda:',
':flag_for_Ukraine:',
':flag_for_United_Arab_Emirates:',
':flag_for_United_Kingdom:',
':flag_for_United_States:',
':flag_for_Uruguay:',
':flag_for_Uzbekistan:',
':flag_for_Vanuatu:',
':flag_for_Vatican_City:',
':flag_for_Venezuela:',
':flag_for_Vietnam:',
':flag_for_Wallis_&_Futuna:',
':flag_for_Western_Sahara:',
':flag_for_Yemen:',
':flag_for_Zambia:',
':flag_for_Zimbabwe:',
':flag_for_Åland_Islands:',
':golf:',
':fleur__de__lis:',
':muscle:',
':floppy_disk:',
':flower_playing_cards:',
':flushed:',
':fog:',
':foggy:',
':footprints:',
':fork_and_knife:',
':fork_and_knife_with_plate:',
':fountain:',
':four_leaf_clover:',
':frame_with_picture:',
':fries:',
':fried_shrimp:',
':frog:',
':hatched_chick:',
':frowning:',
':fuelpump:',
':full_moon:',
':full_moon_with_face:',
':funeral_urn:',
':game_die:',
':gear:',
':gem:',
':gemini:',
':ghost:',
':girl:',
':globe_with_meridians:',
':star2:',
':goat:',
':golfer:',
':mortar_board:',
':grapes:',
':green_apple:',
':green_book:',
':green_heart:',
':grimacing:',
':smile_cat:',
':grinning:',
':grin:',
':heartpulse:',
':guardsman:',
':guitar:',
':haircut:',
':hamburger:',
':hammer:',
':hammer_and_pick:',
':hammer_and_wrench:',
':hamster:',
':handbag:',
':raising_hand:',
':hatching_chick:',
':headphones:',
':hear_no_evil:',
':heart_decoration:',
':cupid:',
':gift_heart:',
':heart:',
':heavy_check_mark:',
':heavy_division_sign:',
':heavy_dollar_sign:',
':exclamation:',
':heavy_exclamation_mark:',
':heavy_heart_exclamation_mark_ornament:',
':o:',
':heavy_minus_sign:',
':heavy_multiplication_x:',
':heavy_plus_sign:',
':helicopter:',
':helm_symbol:',
':helmet_with_white_cross:',
':herb:',
':hibiscus:',
':high_heel:',
':bullettrain_side:',
':bullettrain_front:',
':high_brightness:',
':zap:',
':hocho:',
':knife:',
':hole:',
':honey_pot:',
':bee:',
':traffic_light:',
':racehorse:',
':horse:',
':horse_racing:',
':hospital:',
':coffee:',
':hot_dog:',
':hot_pepper:',
':hotsprings:',
':hotel:',
':hourglass:',
':hourglass_flowing_sand:',
':house:',
':house_buildings:',
':house_with_garden:',
':hugging_face:',
':100:',
':hushed:',
':ice_cream:',
':ice_hockey_stick_and_puck:',
':ice_skate:',
':imp:',
':inbox_tray:',
':incoming_envelope:',
':information_desk_person:',
':information_source:',
':capital_abcd:',
':abc:',
':abcd:',
':1234:',
':symbols:',
':izakaya_lantern:',
':lantern:',
':jack_o_lantern:',
':japanese_castle:',
':dolls:',
':japanese_goblin:',
':japanese_ogre:',
':post_office:',
':beginner:',
':jeans:',
':joystick:',
':kaaba:',
':key:',
':keyboard:',
':keycap_asterisk:',
':keycap_digit_eight:',
':keycap_digit_five:',
':keycap_digit_four:',
':keycap_digit_nine:',
':keycap_digit_one:',
':keycap_digit_seven:',
':keycap_digit_six:',
':keycap_digit_three:',
':keycap_digit_two:',
':keycap_digit_zero:',
':keycap_number_sign:',
':keycap_ten:',
':kimono:',
':couplekiss:',
':kiss:',
':kissing_cat:',
':kissing:',
':kissing_closed_eyes:',
':kissing_smiling_eyes:',
':koala:',
':label:',
':beetle:',
':large_blue_circle:',
':large_blue_diamond:',
':large_orange_diamond:',
':red_circle:',
':last_quarter_moon:',
':last_quarter_moon_with_face:',
':latin_cross:',
':leaves:',
':ledger:',
':mag:',
':left_luggage:',
':left_right_arrow:',
':leftwards_arrow_with_hook:',
':arrow_left:',
':lemon:',
':leo:',
':leopard:',
':level_slider:',
':libra:',
':light_rail:',
':link:',
':linked_paperclips:',
':lion_face:',
':lipstick:',
':lock:',
':lock_with_ink_pen:',
':lollipop:',
':sob:',
':love_hotel:',
':love_letter:',
':low_brightness:',
':lower_left_ballpoint_pen:',
':lower_left_crayon:',
':lower_left_fountain_pen:',
':lower_left_paintbrush:',
':mahjong:',
':man:',
':couple:',
':man_in_business_suit_levitating:',
':man_with_gua_pi_mao:',
':man_with_turban:',
':mans_shoe:',
':shoe:',
':mantelpiece_clock:',
':maple_leaf:',
':meat_on_bone:',
':black_circle:',
':white_circle:',
':melon:',
':memo:',
':pencil:',
':menorah_with_nine_branches:',
':mens:',
':metro:',
':microphone:',
':microscope:',
':military_medal:',
':milky_way:',
':minibus:',
':minidisc:',
':iphone:',
':mobile_phone_off:',
':calling:',
':money__mouth_face:',
':moneybag:',
':money_with_wings:',
':monkey:',
':monkey_face:',
':monorail:',
':rice_scene:',
':mosque:',
':motor_boat:',
':motorway:',
':mount_fuji:',
':mountain:',
':mountain_bicyclist:',
':mountain_cableway:',
':mountain_railway:',
':mouse2:',
':mouse:',
':lips:',
':movie_camera:',
':moyai:',
':notes:',
':mushroom:',
':musical_keyboard:',
':musical_note:',
':musical_score:',
':nail_care:',
':name_badge:',
':national_park:',
':necktie:',
':ab:',
':negative_squared_cross_mark:',
':a:',
':b:',
':o2:',
':parking:',
':nerd_face:',
':neutral_face:',
':new_moon:',
':honeybee:',
':new_moon_with_face:',
':newspaper:',
':night_with_stars:',
':no_bicycles:',
':no_entry:',
':no_entry_sign:',
':no_mobile_phones:',
':underage:',
':no_pedestrians:',
':no_smoking:',
':non__potable_water:',
':arrow_upper_right:',
':arrow_upper_left:',
':nose:',
':notebook:',
':notebook_with_decorative_cover:',
':nut_and_bolt:',
':octopus:',
':oden:',
':office:',
':oil_drum:',
':ok_hand:',
':old_key:',
':older_man:',
':older_woman:',
':om_symbol:',
':on:',
':oncoming_automobile:',
':oncoming_bus:',
':oncoming_police_car:',
':oncoming_taxi:',
':book:',
':open_book:',
':open_file_folder:',
':open_hands:',
':unlock:',
':mailbox_with_no_mail:',
':mailbox_with_mail:',
':ophiuchus:',
':cd:',
':orange_book:',
':orthodox_cross:',
':outbox_tray:',
':ox:',
':package:',
':page_facing_up:',
':page_with_curl:',
':pager:',
':palm_tree:',
':panda_face:',
':paperclip:',
':part_alternation_mark:',
':tada:',
':passenger_ship:',
':passport_control:',
':feet:',
':paw_prints:',
':peace_symbol:',
':peach:',
':pear:',
':walking:',
':pencil2:',
':penguin:',
':pensive:',
':performing_arts:',
':persevere:',
':bow:',
':person_frowning:',
':raised_hands:',
':person_with_ball:',
':person_with_blond_hair:',
':pray:',
':person_with_pouting_face:',
':computer:',
':pick:',
':pig2:',
':pig:',
':pig_nose:',
':hankey:',
':poop:',
':shit:',
':pill:',
':bamboo:',
':pineapple:',
':pisces:',
':gun:',
':place_of_worship:',
':black_joker:',
':police_car:',
':rotating_light:',
':cop:',
':poodle:',
':popcorn:',
':postal_horn:',
':postbox:',
':stew:',
':potable_water:',
':pouch:',
':poultry_leg:',
':pouting_cat:',
':rage:',
':prayer_beads:',
':princess:',
':printer:',
':loudspeaker:',
':purple_heart:',
':purse:',
':pushpin:',
':put_litter_in_its_place:',
':rabbit2:',
':rabbit:',
':racing_car:',
':racing_motorcycle:',
':radio:',
':radio_button:',
':radioactive_sign:',
':railway_car:',
':railway_track:',
':rainbow:',
':fist:',
':hand:',
':raised_hand:',
':raised_hand_with_fingers_splayed:',
':raised_hand_with_part_between_middle_and_ring_fingers:',
':ram:',
':rat:',
':blue_car:',
':apple:',
':registered:',
':relieved:',
':reminder_ribbon:',
':restroom:',
':reversed_hand_with_middle_finger_extended:',
':revolving_hearts:',
':ribbon:',
':rice_ball:',
':rice_cracker:',
':mag_right:',
':right_anger_bubble:',
':arrow_right_hook:',
':ring:',
':sweet_potato:',
':robot_face:',
':rocket:',
':rolled__up_newspaper:',
':roller_coaster:',
':rooster:',
':rose:',
':rosette:',
':round_pushpin:',
':rowboat:',
':rugby_football:',
':runner:',
':running:',
':running_shirt_with_sash:',
':sagittarius:',
':boat:',
':sailboat:',
':sake:',
':satellite:',
':saxophone:',
':scales:',
':school:',
':school_satchel:',
':scorpion:',
':scorpius:',
':scroll:',
':seat:',
':see_no_evil:',
':seedling:',
':shamrock:',
':shaved_ice:',
':sheep:',
':shield:',
':shinto_shrine:',
':ship:',
':stars:',
':shopping_bags:',
':cake:',
':shower:',
':sign_of_the_horns:',
':japan:',
':six_pointed_star:',
':ski:',
':skier:',
':skull:',
':skull_and_crossbones:',
':sleeping_accommodation:',
':sleeping:',
':zzz:',
':sleepy:',
':sleuth_or_spy:',
':pizza:',
':slightly_frowning_face:',
':slightly_smiling_face:',
':slot_machine:',
':small_airplane:',
':small_blue_diamond:',
':small_orange_diamond:',
':heart_eyes_cat:',
':smiley_cat:',
':innocent:',
':heart_eyes:',
':smiling_imp:',
':smiley:',
':sweat_smile:',
':smile:',
':laughing:',
':satisfied:',
':blush:',
':sunglasses:',
':smirk:',
':smoking:',
':snail:',
':snake:',
':snow_capped_mountain:',
':snowboarder:',
':snowflake:',
':snowman:',
':soccer:',
':icecream:',
':soon:',
':arrow_lower_right:',
':arrow_lower_left:',
':spaghetti:',
':sparkle:',
':sparkles:',
':sparkling_heart:',
':speak_no_evil:',
':speaker:',
':mute:',
':sound:',
':loud_sound:',
':speaking_head_in_silhouette:',
':speech_balloon:',
':speedboat:',
':spider:',
':spider_web:',
':spiral_calendar_pad:',
':spiral_note_pad:',
':shell:',
':sweat_drops:',
':sports_medal:',
':whale:',
':u5272:',
':u5408:',
':u55b6:',
':u6307:',
':u6708:',
':u6709:',
':u6e80:',
':u7121:',
':u7533:',
':u7981:',
':u7a7a:',
':cl:',
':cool:',
':free:',
':id:',
':koko:',
':sa:',
':new:',
':ng:',
':ok:',
':sos:',
':up:',
':vs:',
':stadium:',
':star_and_crescent:',
':star_of_david:',
':station:',
':statue_of_liberty:',
':steam_locomotive:',
':ramen:',
':stopwatch:',
':straight_ruler:',
':strawberry:',
':studio_microphone:',
':partly_sunny:',
':sun_with_face:',
':sunflower:',
':sunrise:',
':sunrise_over_mountains:',
':city_sunrise:',
':surfer:',
':sushi:',
':suspension_railway:',
':swimmer:',
':synagogue:',
':syringe:',
':shirt:',
':tshirt:',
':table_tennis_paddle_and_ball:',
':taco:',
':tanabata_tree:',
':tangerine:',
':taurus:',
':taxi:',
':tea:',
':calendar:',
':telephone_receiver:',
':telescope:',
':tv:',
':tennis:',
':tent:',
':thermometer:',
':thinking_face:',
':thought_balloon:',
':three_button_mouse:',
':+1:',
':thumbsup:',
':__1:',
':thumbsdown:',
':thunder_cloud_and_rain:',
':ticket:',
':tiger2:',
':tiger:',
':timer_clock:',
':tired_face:',
':toilet:',
':tokyo_tower:',
':tomato:',
':tongue:',
':tophat:',
':top:',
':trackball:',
':tractor:',
':tm:',
':train2:',
':tram:',
':train:',
':triangular_flag_on_post:',
':triangular_ruler:',
':trident:',
':trolleybus:',
':trophy:',
':tropical_drink:',
':tropical_fish:',
':trumpet:',
':tulip:',
':turkey:',
':turtle:',
':twisted_rightwards_arrows:',
':two_hearts:',
':two_men_holding_hands:',
':two_women_holding_hands:',
':umbrella:',
':umbrella_on_ground:',
':unamused:',
':unicorn_face:',
':small_red_triangle:',
':arrow_up_small:',
':arrow_up_down:',
':upside__down_face:',
':arrow_up:',
':vertical_traffic_light:',
':vibration_mode:',
':v:',
':video_camera:',
':video_game:',
':vhs:',
':violin:',
':virgo:',
':volcano:',
':volleyball:',
':waning_crescent_moon:',
':waning_gibbous_moon:',
':warning:',
':wastebasket:',
':watch:',
':water_buffalo:',
':wc:',
':ocean:',
':watermelon:',
':waving_black_flag:',
':wave:',
':waving_white_flag:',
':wavy_dash:',
':waxing_crescent_moon:',
':moon:',
':waxing_gibbous_moon:',
':scream_cat:',
':weary:',
':wedding:',
':weight_lifter:',
':whale2:',
':wheel_of_dharma:',
':wheelchair:',
':point_down:',
':grey_exclamation:',
':white_flower:',
':white_frowning_face:',
':white_check_mark:',
':white_large_square:',
':point_left:',
':white_medium_small_square:',
':white_medium_square:',
':star:',
':grey_question:',
':point_right:',
':white_small_square:',
':relaxed:',
':white_square_button:',
':white_sun_behind_cloud:',
':white_sun_behind_cloud_with_rain:',
':white_sun_with_small_cloud:',
':point_up_2:',
':point_up:',
':wind_blowing_face:',
':wind_chime:',
':wine_glass:',
':wink:',
':wolf:',
':woman:',
':dancers:',
':boot:',
':womans_clothes:',
':womans_hat:',
':sandal:',
':womens:',
':world_map:',
':worried:',
':gift:',
':wrench:',
':writing_hand:',
':yellow_heart:',
':yin_yang:',
':zipper__mouth_face:',
]
| 0 | 0 | 0 |
d31af86c40fba5fb7d0eda60023797aec4514b11 | 1,686 | py | Python | app.py | selmargoulart08/dolarparareal | 8824b628d668037cf2981db9107d4e75d8a23ff9 | [
"MIT"
] | 2 | 2022-03-04T16:13:12.000Z | 2022-03-04T21:21:37.000Z | app.py | selmargoulart08/dolarparareal | 8824b628d668037cf2981db9107d4e75d8a23ff9 | [
"MIT"
] | null | null | null | app.py | selmargoulart08/dolarparareal | 8824b628d668037cf2981db9107d4e75d8a23ff9 | [
"MIT"
] | null | null | null | from flask import Flask,render_template,request
import requests
app = Flask(__name__)
API_KEY = 'RQM7GIDWT0ZU2WLU'
@app.route('/',methods=['GET','POST'])
if __name__ == "__main__":
app.run(debug= False) | 45.567568 | 156 | 0.603203 | from flask import Flask,render_template,request
import requests
app = Flask(__name__)
API_KEY = 'RQM7GIDWT0ZU2WLU'
@app.route('/',methods=['GET','POST'])
def home():
if request.method == 'POST':
try:
amount = request.form['amount']
amount = float(amount)
from_c = request.form['from_c']
to_c = request.form['to_c']
url = 'https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency={}&to_currency={}&apikey={}'.format(from_c,to_c,API_KEY)
response = requests.get(url=url).json()
rate = response['Realtime Currency Exchange Rate']['5. Exchange Rate']
rate = float(rate)
result = rate * amount
from_c_code = response['Realtime Currency Exchange Rate']['1. From_Currency Code']
from_c_name = response['Realtime Currency Exchange Rate']['2. From_Currency Name']
to_c_code = response['Realtime Currency Exchange Rate']['3. To_Currency Code']
to_c_name = response['Realtime Currency Exchange Rate']['4. To_Currency Name']
time = response['Realtime Currency Exchange Rate']['6. Last Refreshed']
return render_template('home.html', result=round(result,2), amount=amount,
from_c_code=from_c_code, from_c_name=from_c_name,
to_c_code=to_c_code, to_c_name=to_c_name, time=time)
except Exception as e:
return '<h1>Bad Request : {}</h1>'.format(e)
else:
return render_template('home.html')
if __name__ == "__main__":
app.run(debug= False) | 1,441 | 0 | 23 |
0ae14a3b6881e84b6465b5b3418017ee6a55395e | 4,134 | py | Python | je_verification_code/modules/generate.py | JE-Chen/Python_Generate_Verification_Code | e26869ce778b682ef098b4b4c41f9a85bdf85f97 | [
"MIT"
] | 3 | 2020-12-21T03:59:09.000Z | 2020-12-30T07:27:49.000Z | je_verification_code/modules/generate.py | JE-Chen/Python_Generate_Verification_Code | e26869ce778b682ef098b4b4c41f9a85bdf85f97 | [
"MIT"
] | null | null | null | je_verification_code/modules/generate.py | JE-Chen/Python_Generate_Verification_Code | e26869ce778b682ef098b4b4c41f9a85bdf85f97 | [
"MIT"
] | null | null | null | import base64
import os
import random
from io import BytesIO
import matplotlib.font_manager as fm
from PIL import Image, ImageDraw, ImageFont
| 33.33871 | 102 | 0.577891 | import base64
import os
import random
from io import BytesIO
import matplotlib.font_manager as fm
from PIL import Image, ImageDraw, ImageFont
class GenerateVerificationCode:
@staticmethod
def generate_color(color_r: int = 255, color_g: int = 255, color_b: int = 255):
"""
:param color_r: Color R
:param color_g: Color G
:param color_b: Color B
:return: R,G,B
"""
return random.randint(0, color_r), random.randint(0, color_g), random.randint(0, color_b)
def generate_picture(self, picture_width: int = 175, picture_height: int = 55):
"""
:param picture_width: Image Width
:param picture_height: Image Height
:return: Picture with color
"""
return Image.new('RGB', (picture_width, picture_height), self.generate_color())
@staticmethod
def generate_string():
"""
:return: random choice num or char
"""
num = str(random.randint(0, 9))
low_alpha = chr(random.randint(97, 122))
return random.choice([num, low_alpha])
def generate_code_only_string(self, count: int):
"""
:param count: char count
:return: string
"""
temp = []
for i in range(count):
chars = self.generate_string()
temp.append(chars)
valid = "".join(temp)
return valid
def generate_code(self, count: int, image, font_size: int):
"""
:param count: Code count, how many char in picture
:param image: Image to generate
:param font_size: font's size
:return: Code picture
"""
draw = ImageDraw.Draw(image)
font_file = os.path.join('arial.ttf')
try:
font = ImageFont.truetype(font_file, size=font_size)
except OSError:
font = ImageFont.truetype(fm.findfont(fm.FontProperties(family='DejaVu Sans')), font_size)
temp = []
for i in range(count):
chars = self.generate_string()
draw.text((10 + i * 30, -2), chars, self.generate_color(), font)
temp.append(chars)
valid = "".join(temp)
return valid, image
def generate_noise(self, image,
picture_width: int = 175,
picture_height: int = 55,
line_count: int = 3,
point_count: int = 15):
"""
:param image: Noise image
:param picture_width: Image width
:param picture_height: Image height
:param line_count: Line's count
:param point_count: Point's count
:return: After Noise Image
"""
draw = ImageDraw.Draw(image)
# draw Line
for i in range(line_count):
x1 = random.randint(0, picture_width)
x2 = random.randint(0, picture_width)
y1 = random.randint(0, picture_height)
y2 = random.randint(0, picture_height)
draw.line((x1, y1, x2, y2), fill=self.generate_color())
# draw Point
for point in range(point_count):
draw.point([random.randint(0, picture_width), random.randint(0, picture_height)],
fill=self.generate_color())
x = random.randint(0, picture_width)
y = random.randint(0, picture_height)
draw.arc((x, y, x + 4, y + 4), 0, 90, fill=self.generate_color())
return image
def generate_base64_image(self, code_count: int, font_size: int, save: bool = False):
code_image = self.generate_picture()
valid, code_image = self.generate_code(code_count, code_image, font_size)
code_image = self.generate_noise(code_image)
if save:
code_image.save('code_image.jpeg')
byte = BytesIO()
code_image.save(byte, 'jpeg')
data = byte.getvalue()
byte.close()
encode64 = base64.b64encode(data)
data = str(encode64, encoding='utf-8')
image_data = "data:image/jpeg;base64,{data}".format(data=data)
return valid, image_data
| 620 | 3,347 | 23 |
72feb42656b0d22751d723a497e020605e203efa | 700 | py | Python | pycargr/__init__.py | Florents-Tselai/PyCarGr | ed8ae8878d0d188d1f9ab44b62ed529764ef8e45 | [
"MIT"
] | 13 | 2017-05-07T20:40:23.000Z | 2022-03-09T12:40:02.000Z | pycargr/__init__.py | Florents-Tselai/PyCarGr | ed8ae8878d0d188d1f9ab44b62ed529764ef8e45 | [
"MIT"
] | 1 | 2021-12-08T17:45:49.000Z | 2021-12-08T17:45:49.000Z | pycargr/__init__.py | Florents-Tselai/PyCarGr | ed8ae8878d0d188d1f9ab44b62ed529764ef8e45 | [
"MIT"
] | 14 | 2017-05-08T07:45:17.000Z | 2022-03-20T07:54:28.000Z | from json import dumps
from pathlib import Path
from sqlite3 import connect
from pycargr.model import Car
DB_PATH = Path.home().joinpath('pycargr.db')
SEARCH_BASE_URL = 'https://www.car.gr/classifieds/cars/'
| 35 | 112 | 0.594286 | from json import dumps
from pathlib import Path
from sqlite3 import connect
from pycargr.model import Car
DB_PATH = Path.home().joinpath('pycargr.db')
SEARCH_BASE_URL = 'https://www.car.gr/classifieds/cars/'
def save_car(*cars):
car_data = [(c.car_id, c.title, c.price, c.release_date, c.km, c.bhp, c.url, c.color, c.fueltype,
c.description, c.city, c.region, c.postal_code, c.transmission, dumps(c.images), c.html,
c.scraped_at) for c in
cars]
with connect(str(DB_PATH), timeout=10) as db:
db.executemany("INSERT OR REPLACE INTO cars VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
car_data)
| 465 | 0 | 23 |
41bf7bbb2675b0dfe18d90074eb48a93a6f2e4c5 | 1,293 | py | Python | tests/generator_python3_marshmallow_test.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 21 | 2018-06-15T16:08:57.000Z | 2022-02-11T16:16:11.000Z | tests/generator_python3_marshmallow_test.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 14 | 2018-08-09T18:02:19.000Z | 2022-01-24T18:04:17.000Z | tests/generator_python3_marshmallow_test.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 4 | 2018-11-30T18:19:10.000Z | 2021-11-18T04:04:36.000Z | from pathlib import Path
import ast
import pytest
import astor
import warnings
import os
from json_codegen import load_schema
from json_codegen.generators.python3_marshmallow import Python3MarshmallowGenerator
SCHEMAS_DIR = Path(__file__).parent / "fixtures" / "schemas"
FIXTURES_DIR = Path(__file__).parent / "fixtures" / "python3_marshmallow"
expected_init_py = astor.dump_tree(ast.Module(body=[]))
test_params = sorted(pytest.param(f, id=f.name) for f in SCHEMAS_DIR.glob("*.schema.json"))
@pytest.mark.parametrize("schema_filename", (test_params))
| 26.9375 | 91 | 0.743233 | from pathlib import Path
import ast
import pytest
import astor
import warnings
import os
from json_codegen import load_schema
from json_codegen.generators.python3_marshmallow import Python3MarshmallowGenerator
SCHEMAS_DIR = Path(__file__).parent / "fixtures" / "schemas"
FIXTURES_DIR = Path(__file__).parent / "fixtures" / "python3_marshmallow"
expected_init_py = astor.dump_tree(ast.Module(body=[]))
test_params = sorted(pytest.param(f, id=f.name) for f in SCHEMAS_DIR.glob("*.schema.json"))
def load_fixture(name):
filename = FIXTURES_DIR / (name + ".py")
return astor.parse_file(filename)
@pytest.mark.parametrize("schema_filename", (test_params))
def test_generate(schema_filename):
fixture_filename = FIXTURES_DIR / (schema_filename.name.split(".")[0] + ".py")
schema = load_schema(schema_filename.read_text())
try:
fixture = astor.parse_file(fixture_filename)
except FileNotFoundError:
warnings.warn(f"Fixture not implemented yet: {os.path.basename(fixture_filename)}")
return
generator = Python3MarshmallowGenerator(schema)
result = generator.generate().as_ast()
result_ast = astor.dump_tree(result)
expected = astor.dump_tree(fixture)
print(astor.to_source(result))
assert result_ast == expected
| 687 | 0 | 45 |
9b3c571c5bc94ea87ff20714b5a8b05aada9fe40 | 569 | py | Python | ABC/abc051-abc100/abc079/d.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc051-abc100/abc079/d.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc051-abc100/abc079/d.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| 21.074074 | 61 | 0.411248 | # -*- coding: utf-8 -*-
def main():
h, w = map(int, input().split())
c = [list(map(int, input().split())) for _ in range(10)]
a = [list(map(int, input().split())) for _ in range(h)]
ans = 0
for k in range(10):
for i in range(10):
for j in range(10):
c[i][j] = min(c[i][j], c[i][k] + c[k][j])
for x in range(h):
for y in range(w):
pos = a[x][y]
if pos >= 0:
ans += c[pos][1]
print(ans)
if __name__ == '__main__':
main()
| 473 | 0 | 25 |
cf03515cb78738608985a3bcb75662fa5776e3d7 | 638 | py | Python | Docs/Examples/howtos/glyphMath_00.py | Vectro-Type-Foundry/robofab | cd65d78292d24358c98dce53d283314cdc85878e | [
"BSD-3-Clause"
] | 61 | 2015-01-17T10:15:45.000Z | 2018-12-02T13:53:02.000Z | Docs/Examples/howtos/glyphMath_00.py | Vectro-Type-Foundry/robofab | cd65d78292d24358c98dce53d283314cdc85878e | [
"BSD-3-Clause"
] | 37 | 2015-01-05T23:44:56.000Z | 2018-03-16T19:05:28.000Z | Docs/Examples/howtos/glyphMath_00.py | Vectro-Type-Foundry/robofab | cd65d78292d24358c98dce53d283314cdc85878e | [
"BSD-3-Clause"
] | 25 | 2015-01-08T19:49:36.000Z | 2018-10-29T00:36:46.000Z | # robofab manual
# Glyphmath howto
# Fun examples
#FLM: Fun with GlyphMath
# this example is meant to run with the RoboFab Demo Font
# as the Current Font. So, if you're doing this in FontLab
# import the Demo Font UFO first.
from robofab.world import CurrentFont
from random import random
f = CurrentFont()
condensedLight = f["a#condensed_light"]
wideLight = f["a#wide_light"]
wideBold = f["a#wide_bold"]
diff = wideLight - condensedLight
destination = f.newGlyph("a#deltaexperiment")
destination.clear()
x = wideBold + (condensedLight-wideLight)*random()
destination.appendGlyph( x)
destination.width = x.width
f.update() | 22.785714 | 58 | 0.747649 | # robofab manual
# Glyphmath howto
# Fun examples
#FLM: Fun with GlyphMath
# this example is meant to run with the RoboFab Demo Font
# as the Current Font. So, if you're doing this in FontLab
# import the Demo Font UFO first.
from robofab.world import CurrentFont
from random import random
f = CurrentFont()
condensedLight = f["a#condensed_light"]
wideLight = f["a#wide_light"]
wideBold = f["a#wide_bold"]
diff = wideLight - condensedLight
destination = f.newGlyph("a#deltaexperiment")
destination.clear()
x = wideBold + (condensedLight-wideLight)*random()
destination.appendGlyph( x)
destination.width = x.width
f.update() | 0 | 0 | 0 |
0220466f686772c657c40b619251e273a74a65d7 | 1,376 | py | Python | pypy/translator/js/examples/console/docloader.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/translator/js/examples/console/docloader.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/translator/js/examples/console/docloader.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z |
""" Simple module for loading documentation of various
pypy-cs from doc directory
"""
import py
| 31.272727 | 76 | 0.582122 |
""" Simple module for loading documentation of various
pypy-cs from doc directory
"""
import py
class DocLoader(object):
def __init__(self, consoles, docdir, testfile):
self.consoles = consoles
self.docdir = py.path.local(docdir)
assert self.docdir.check(dir=1)
self.testfile = testfile
assert self.testfile.check()
self.htmls = {}
self.snippets = {}
self.load()
def get_html(self, console):
return self.htmls[console]
def get_snippet(self, console, num):
return str(self.snippets[console][num])
def load(self):
def mangle_name(name):
return name.replace("-", "_").replace(".", "_")
def mangle(source):
source = source.strip()
del source.lines[0]
return source.deindent()
testmod = self.testfile.pyimport()
for console in self.consoles:
html = self.docdir.join(console + '.html').read()
snip_class = getattr(testmod, 'AppTest_' + mangle_name(console))
snippets = [mangle(py.code.Source(getattr(snip_class, name)))
for name in
dir(snip_class) if name.startswith("test_snippet")]
self.snippets[console] = snippets
self.htmls[console] = html % tuple([str(i) for i in snippets])
| 1,144 | 3 | 130 |
9160e3ca26bcd83ce1ff19da7116c3bd5688dbed | 5,685 | py | Python | src/sst/elements/scheduler/simulations/makeInput.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 2 | 2019-06-10T15:32:03.000Z | 2019-06-11T14:17:32.000Z | src/sst/elements/scheduler/simulations/makeInput.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 39 | 2016-01-06T15:08:15.000Z | 2020-06-03T18:12:31.000Z | src/sst/elements/scheduler/simulations/makeInput.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 2 | 2021-05-23T02:28:02.000Z | 2021-09-08T13:38:46.000Z | #!/usr/bin/env python
'''
SST scheduler simulation input file generator
Input parameters are given below
Setting a parameter to "default" or "" will select the default option
'''
import os
# Input workload trace path:
traceName = 'jobtrace_files/bisection_N1.sim'
# Output file name:
outFile = 'simple_libtopomap_bisection_N1.py'
# Machine (cluster) configuration:
# mesh[xdim, ydim, zdim], torus[xdim, ydim, zdim], simple,
# dragonfly[routersPerGroup, portsPerRouter, opticalsPerRouter,
# nodesPerRouter, localTopology, globalTopology]
# localTopology:[all_to_all]
# globalTopology:[absolute,circulant,relative]
# (default: simple)
machine = 'dragonfly[8,11,2,2,all_to_all,absolute]'
# Number of machine nodes
# The script calculates the number of nodes if mesh or torus machine is provided.
# any integer. (default: 1)
numberNodes = ''
# Number of cores in each machine node
# any integer. (default: 1)
coresPerNode = '2'
# Scheduler algorithm:
# cons, delayed, easy, elc, pqueue, prioritize. (default: pqueue)
scheduler = 'easy'
# Fair start time algorithm:
# none, relaxed, strict. (default: none)
FST = ''
# Allocation algorithm:
# bestfit, constraint, energy, firstfit, genalg, granularmbs, hybrid, mbs,
# mc1x1, mm, nearest, octetmbs, oldmc1x1,random, simple, sortedfreelist,
# nearestamap, spectralamap. (default: simple)
allocator = 'simple'
# Task mapping algorithm:
# simple, rcb, random, topo, rcm, nearestamap, spectralamap. (default: simple)
taskMapper = 'topo'
# Communication overhead parameters
# a[b,c] (default: none)
timeperdistance = '.001865[.1569,0.0129]'
# Heat distribution matrix (D_matrix) input file
# file path, none. (default: none)
dMatrixFile = 'none'
# Randomization seed for communication time overhead
# none, any integer. (default: none)
randomSeed = ''
# Detailed network simulation mode
# ON, OFF (default: OFF)
detailedNetworkSim = 'ON'
# Completed jobs trace (in ember) for detailed network sim mode
# file path, none (default: none)
completedJobsTrace = 'emberCompleted.txt'
# Running jobs (in ember) for detailed network sim mode
# file path, none (default: none)
runningJobsTrace = 'emberRunning.txt'
'''
Do not modify the script after this point.
'''
import sys
if __name__ == '__main__':
if outFile == "" or outFile == "default":
print "Error: There is no default value for outFile"
sys.exit()
f = open(outFile,'w')
f.write('# scheduler simulation input file\n')
f.write('import sst\n')
f.write('\n')
f.write('# Define SST core options\n')
f.write('sst.setProgramOption("run-mode", "both")\n')
f.write('\n')
f.write('# Define the simulation components\n')
f.write('scheduler = sst.Component("myScheduler", \
"scheduler.schedComponent")\n')
f.write('scheduler.addParams({\n')
if traceName == "" or traceName == "default":
print "Error: There is no default value for traceName"
os.remove(outFile)
sys.exit()
f.write(' "traceName" : "' + traceName + '",\n')
if machine != "" and machine != "default":
f.write(' "machine" : "' + machine + '",\n')
if coresPerNode != "":
f.write(' "coresPerNode" : "' + coresPerNode + '",\n')
if scheduler != "" and scheduler != "default":
f.write(' "scheduler" : "' + scheduler + '",\n')
if FST != "" and FST != "default":
f.write(' "FST" : "' + FST + '",\n')
if allocator != "" and allocator != "default":
f.write(' "allocator" : "' + allocator + '",\n')
if taskMapper != "" and taskMapper != "default":
f.write(' "taskMapper" : "' + taskMapper + '",\n')
if timeperdistance != "" and timeperdistance != "default":
f.write(' "timeperdistance" : "' + timeperdistance + '",\n')
if dMatrixFile != "" and dMatrixFile != "default":
f.write(' "dMatrixFile" : "' + dMatrixFile + '",\n')
if randomSeed != "" and randomSeed != "default":
f.write(' "runningTimeSeed" : "' + randomSeed + '",\n')
if detailedNetworkSim != "" and detailedNetworkSim != "default":
f.write(' "detailedNetworkSim" : "' + detailedNetworkSim + '",\n')
if completedJobsTrace != "" and completedJobsTrace != "default":
f.write(' "completedJobsTrace" : "' + completedJobsTrace + '",\n')
if runningJobsTrace != "" and runningJobsTrace != "default":
f.write(' "runningJobsTrace" : "' + runningJobsTrace + '",\n')
f.seek(-2, os.SEEK_END)
f.truncate()
f.write('\n})\n')
f.write('\n')
f.write('# nodes\n')
if machine.split('[')[0] == 'mesh' or machine.split('[')[0] == 'torus':
nums = machine.split('[')[1]
nums = nums.split(']')[0]
nums = nums.split(',')
numberNodes = int(nums[0])*int(nums[1])*int(nums[2])
elif machine.split('[')[0] == 'dragonfly':
nums = machine.split('[')[1]
nums = nums.split(']')[0]
nums = nums.split(',')
numberNodes = (int(nums[0])*int(nums[2])+1) *int(nums[0])*int(nums[3])
numberNodes = int(numberNodes)
for i in range(0, numberNodes):
f.write('n' + str(i) + ' = sst.Component("n' + str(i) + \
'", "scheduler.nodeComponent")\n')
f.write('n' + str(i) + '.addParams({\n')
f.write(' "nodeNum" : "' + str(i) + '",\n')
f.write('})\n')
f.write('\n')
f.write('# define links\n')
for i in range(0, numberNodes):
f.write('l' + str(i) + ' = sst.Link("l' + str(i) + '")\n')
f.write('l' + str(i) + '.connect( (scheduler, "nodeLink' + str(i) + \
'", "0 ns"), (n' + str(i) + ', "Scheduler", "0 ns") )\n')
f.write('\n')
f.close()
| 34.664634 | 81 | 0.610554 | #!/usr/bin/env python
'''
SST scheduler simulation input file generator
Input parameters are given below
Setting a parameter to "default" or "" will select the default option
'''
import os
# Input workload trace path:
traceName = 'jobtrace_files/bisection_N1.sim'
# Output file name:
outFile = 'simple_libtopomap_bisection_N1.py'
# Machine (cluster) configuration:
# mesh[xdim, ydim, zdim], torus[xdim, ydim, zdim], simple,
# dragonfly[routersPerGroup, portsPerRouter, opticalsPerRouter,
# nodesPerRouter, localTopology, globalTopology]
# localTopology:[all_to_all]
# globalTopology:[absolute,circulant,relative]
# (default: simple)
machine = 'dragonfly[8,11,2,2,all_to_all,absolute]'
# Number of machine nodes
# The script calculates the number of nodes if mesh or torus machine is provided.
# any integer. (default: 1)
numberNodes = ''
# Number of cores in each machine node
# any integer. (default: 1)
coresPerNode = '2'
# Scheduler algorithm:
# cons, delayed, easy, elc, pqueue, prioritize. (default: pqueue)
scheduler = 'easy'
# Fair start time algorithm:
# none, relaxed, strict. (default: none)
FST = ''
# Allocation algorithm:
# bestfit, constraint, energy, firstfit, genalg, granularmbs, hybrid, mbs,
# mc1x1, mm, nearest, octetmbs, oldmc1x1,random, simple, sortedfreelist,
# nearestamap, spectralamap. (default: simple)
allocator = 'simple'
# Task mapping algorithm:
# simple, rcb, random, topo, rcm, nearestamap, spectralamap. (default: simple)
taskMapper = 'topo'
# Communication overhead parameters
# a[b,c] (default: none)
timeperdistance = '.001865[.1569,0.0129]'
# Heat distribution matrix (D_matrix) input file
# file path, none. (default: none)
dMatrixFile = 'none'
# Randomization seed for communication time overhead
# none, any integer. (default: none)
randomSeed = ''
# Detailed network simulation mode
# ON, OFF (default: OFF)
detailedNetworkSim = 'ON'
# Completed jobs trace (in ember) for detailed network sim mode
# file path, none (default: none)
completedJobsTrace = 'emberCompleted.txt'
# Running jobs (in ember) for detailed network sim mode
# file path, none (default: none)
runningJobsTrace = 'emberRunning.txt'
'''
Do not modify the script after this point.
'''
import sys
if __name__ == '__main__':
if outFile == "" or outFile == "default":
print "Error: There is no default value for outFile"
sys.exit()
f = open(outFile,'w')
f.write('# scheduler simulation input file\n')
f.write('import sst\n')
f.write('\n')
f.write('# Define SST core options\n')
f.write('sst.setProgramOption("run-mode", "both")\n')
f.write('\n')
f.write('# Define the simulation components\n')
f.write('scheduler = sst.Component("myScheduler", \
"scheduler.schedComponent")\n')
f.write('scheduler.addParams({\n')
if traceName == "" or traceName == "default":
print "Error: There is no default value for traceName"
os.remove(outFile)
sys.exit()
f.write(' "traceName" : "' + traceName + '",\n')
if machine != "" and machine != "default":
f.write(' "machine" : "' + machine + '",\n')
if coresPerNode != "":
f.write(' "coresPerNode" : "' + coresPerNode + '",\n')
if scheduler != "" and scheduler != "default":
f.write(' "scheduler" : "' + scheduler + '",\n')
if FST != "" and FST != "default":
f.write(' "FST" : "' + FST + '",\n')
if allocator != "" and allocator != "default":
f.write(' "allocator" : "' + allocator + '",\n')
if taskMapper != "" and taskMapper != "default":
f.write(' "taskMapper" : "' + taskMapper + '",\n')
if timeperdistance != "" and timeperdistance != "default":
f.write(' "timeperdistance" : "' + timeperdistance + '",\n')
if dMatrixFile != "" and dMatrixFile != "default":
f.write(' "dMatrixFile" : "' + dMatrixFile + '",\n')
if randomSeed != "" and randomSeed != "default":
f.write(' "runningTimeSeed" : "' + randomSeed + '",\n')
if detailedNetworkSim != "" and detailedNetworkSim != "default":
f.write(' "detailedNetworkSim" : "' + detailedNetworkSim + '",\n')
if completedJobsTrace != "" and completedJobsTrace != "default":
f.write(' "completedJobsTrace" : "' + completedJobsTrace + '",\n')
if runningJobsTrace != "" and runningJobsTrace != "default":
f.write(' "runningJobsTrace" : "' + runningJobsTrace + '",\n')
f.seek(-2, os.SEEK_END)
f.truncate()
f.write('\n})\n')
f.write('\n')
f.write('# nodes\n')
if machine.split('[')[0] == 'mesh' or machine.split('[')[0] == 'torus':
nums = machine.split('[')[1]
nums = nums.split(']')[0]
nums = nums.split(',')
numberNodes = int(nums[0])*int(nums[1])*int(nums[2])
elif machine.split('[')[0] == 'dragonfly':
nums = machine.split('[')[1]
nums = nums.split(']')[0]
nums = nums.split(',')
numberNodes = (int(nums[0])*int(nums[2])+1) *int(nums[0])*int(nums[3])
numberNodes = int(numberNodes)
for i in range(0, numberNodes):
f.write('n' + str(i) + ' = sst.Component("n' + str(i) + \
'", "scheduler.nodeComponent")\n')
f.write('n' + str(i) + '.addParams({\n')
f.write(' "nodeNum" : "' + str(i) + '",\n')
f.write('})\n')
f.write('\n')
f.write('# define links\n')
for i in range(0, numberNodes):
f.write('l' + str(i) + ' = sst.Link("l' + str(i) + '")\n')
f.write('l' + str(i) + '.connect( (scheduler, "nodeLink' + str(i) + \
'", "0 ns"), (n' + str(i) + ', "Scheduler", "0 ns") )\n')
f.write('\n')
f.close()
| 0 | 0 | 0 |
30654d429382de6f8edb4c2fc8cb6391f4f78fba | 2,626 | py | Python | gaul/utils/pbar.py | al-jshen/gaul | f0c8d165adc4dbec328af34e26d8988a89c5c385 | [
"Apache-2.0",
"MIT"
] | null | null | null | gaul/utils/pbar.py | al-jshen/gaul | f0c8d165adc4dbec328af34e26d8988a89c5c385 | [
"Apache-2.0",
"MIT"
] | null | null | null | gaul/utils/pbar.py | al-jshen/gaul | f0c8d165adc4dbec328af34e26d8988a89c5c385 | [
"Apache-2.0",
"MIT"
] | null | null | null | from jax import lax
from jax.experimental import host_callback
from tqdm.auto import tqdm
def progress_bar_scan(num_samples, message=None):
"""
Progress bar for a JAX scan.
"""
if message is None:
message = f"Running for {num_samples:,} iterations"
tqdm_bars = {}
if num_samples > 20:
print_rate = int(num_samples / 20)
else:
print_rate = 1
remainder = num_samples % print_rate
def _update_progress_bar(iter_num):
"""
Updates tqdm progress bar of a JAX scan or loop.
"""
_ = lax.cond(
iter_num == 0,
lambda _: host_callback.id_tap(_define_tqdm, None, result=iter_num),
lambda _: iter_num,
operand=None,
)
_ = lax.cond(
# update tqdm every multiple of `print_rate` except at the end
(iter_num % print_rate == 0) & (iter_num != num_samples - remainder),
lambda _: host_callback.id_tap(_update_tqdm, print_rate, result=iter_num),
lambda _: iter_num,
operand=None,
)
_ = lax.cond(
# update tqdm by `remainder`
iter_num == num_samples - remainder,
lambda _: host_callback.id_tap(_update_tqdm, remainder, result=iter_num),
lambda _: iter_num,
operand=None,
)
def _progress_bar_scan(func):
"""
Decorator that adds a progress bar to `body_fun` used in `lax.scan`.
Note that `body_fun` must either be looping over
`np.arange(num_samples)`, or be looping over a tuple who's first
element is `np.arange(num_samples)` This means that `iter_num`
is the current iteration number
"""
return wrapper_progress_bar
return _progress_bar_scan
| 30.183908 | 86 | 0.589871 | from jax import lax
from jax.experimental import host_callback
from tqdm.auto import tqdm
def progress_bar_scan(num_samples, message=None):
"""
Progress bar for a JAX scan.
"""
if message is None:
message = f"Running for {num_samples:,} iterations"
tqdm_bars = {}
if num_samples > 20:
print_rate = int(num_samples / 20)
else:
print_rate = 1
remainder = num_samples % print_rate
def _define_tqdm(arg, transform):
tqdm_bars[0] = tqdm(range(num_samples))
tqdm_bars[0].set_description(message, refresh=False)
def _update_tqdm(arg, transform):
tqdm_bars[0].update(arg)
def _update_progress_bar(iter_num):
"""
Updates tqdm progress bar of a JAX scan or loop.
"""
_ = lax.cond(
iter_num == 0,
lambda _: host_callback.id_tap(_define_tqdm, None, result=iter_num),
lambda _: iter_num,
operand=None,
)
_ = lax.cond(
# update tqdm every multiple of `print_rate` except at the end
(iter_num % print_rate == 0) & (iter_num != num_samples - remainder),
lambda _: host_callback.id_tap(_update_tqdm, print_rate, result=iter_num),
lambda _: iter_num,
operand=None,
)
_ = lax.cond(
# update tqdm by `remainder`
iter_num == num_samples - remainder,
lambda _: host_callback.id_tap(_update_tqdm, remainder, result=iter_num),
lambda _: iter_num,
operand=None,
)
def _close_tqdm(arg, transform):
tqdm_bars[0].close()
def close_tqdm(result, iter_num):
return lax.cond(
iter_num == num_samples - 1,
lambda _: host_callback.id_tap(_close_tqdm, None, result=result),
lambda _: result,
operand=None,
)
def _progress_bar_scan(func):
"""
Decorator that adds a progress bar to `body_fun` used in `lax.scan`.
Note that `body_fun` must either be looping over
`np.arange(num_samples)`, or be looping over a tuple who's first
element is `np.arange(num_samples)` This means that `iter_num`
is the current iteration number
"""
def wrapper_progress_bar(carry, x):
if type(x) is tuple:
iter_num, *_ = x
else:
iter_num = x
_update_progress_bar(iter_num)
result = func(carry, x)
return close_tqdm(result, iter_num)
return wrapper_progress_bar
return _progress_bar_scan
| 682 | 0 | 139 |
b637446c57444ed8cb2a019389bc13205a3f6424 | 671 | py | Python | regina_normalizer/dict_data.py | grammatek/regina_normalizer | 61ffeafcf1b967b44a3a9f99727013779bfeba13 | [
"Apache-2.0"
] | null | null | null | regina_normalizer/dict_data.py | grammatek/regina_normalizer | 61ffeafcf1b967b44a3a9f99727013779bfeba13 | [
"Apache-2.0"
] | null | null | null | regina_normalizer/dict_data.py | grammatek/regina_normalizer | 61ffeafcf1b967b44a3a9f99727013779bfeba13 | [
"Apache-2.0"
] | null | null | null | import logging
from pathlib import Path | 29.173913 | 85 | 0.611028 | import logging
from pathlib import Path
class PronDict:
lexicon = None
def __init__(self, lexicon_file='data/lexicon.txt'):
"""
Initialize the lexicon containing the words from the pronunciation dictionary
:param lexicon_file: path to lexicon file
"""
try:
with open(lexicon_file) as f:
PronDict.lexicon = f.read().splitlines()
except OSError:
PronDict.lexicon = []
logging.error("Could not read lexicon file: " + lexicon_file)
@staticmethod
def get_lexicon():
if PronDict.lexicon is None:
PronDict()
return PronDict.lexicon | 89 | 521 | 22 |
0edcdddf3b20da72b40dd815ce537c579e072a60 | 7,088 | py | Python | skimage/future/graph/tests/test_rag.py | bvnayak/scikit-image | a6654763f1445aa198dcaab8bd77fe0e2a699c72 | [
"BSD-3-Clause"
] | null | null | null | skimage/future/graph/tests/test_rag.py | bvnayak/scikit-image | a6654763f1445aa198dcaab8bd77fe0e2a699c72 | [
"BSD-3-Clause"
] | null | null | null | skimage/future/graph/tests/test_rag.py | bvnayak/scikit-image | a6654763f1445aa198dcaab8bd77fe0e2a699c72 | [
"BSD-3-Clause"
] | 1 | 2021-02-20T14:11:39.000Z | 2021-02-20T14:11:39.000Z | import numpy as np
from skimage.future import graph
from skimage._shared.version_requirements import is_installed
from skimage import segmentation
import pytest
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_ncut_stable_subgraph():
""" Test to catch an error thrown when subgraph has all equal edges. """
img = np.zeros((100, 100, 3), dtype='uint8')
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 1
labels[:50, 50:] = 2
rag = graph.rag_mean_color(img, labels, mode='similarity')
new_labels = graph.cut_normalized(labels, rag, in_place=False)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
assert new_labels.max() == 0
| 31.784753 | 76 | 0.592833 | import numpy as np
from skimage.future import graph
from skimage._shared.version_requirements import is_installed
from skimage import segmentation
import pytest
def max_edge(g, src, dst, n):
default = {'weight': -np.inf}
w1 = g[n].get(src, default)['weight']
w2 = g[n].get(dst, default)['weight']
return {'weight': max(w1, w2)}
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_rag_merge():
g = graph.rag.RAG()
for i in range(5):
g.add_node(i, {'labels': [i]})
g.add_edge(0, 1, {'weight': 10})
g.add_edge(1, 2, {'weight': 20})
g.add_edge(2, 3, {'weight': 30})
g.add_edge(3, 0, {'weight': 40})
g.add_edge(0, 2, {'weight': 50})
g.add_edge(3, 4, {'weight': 60})
gc = g.copy()
# We merge nodes and ensure that the minimum weight is chosen
# when there is a conflict.
g.merge_nodes(0, 2)
assert g.adj[1][2]['weight'] == 10
assert g.adj[2][3]['weight'] == 30
# We specify `max_edge` as `weight_func` as ensure that maximum
# weight is chosen in case on conflict
gc.merge_nodes(0, 2, weight_func=max_edge)
assert gc.adj[1][2]['weight'] == 20
assert gc.adj[2][3]['weight'] == 40
g.merge_nodes(1, 4)
g.merge_nodes(2, 3)
n = g.merge_nodes(3, 4, in_place=False)
assert sorted(g.node[n]['labels']) == list(range(5))
assert list(g.edges()) == []
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_threshold_cut():
img = np.zeros((100, 100, 3), dtype='uint8')
img[:50, :50] = 255, 255, 255
img[:50, 50:] = 254, 254, 254
img[50:, :50] = 2, 2, 2
img[50:, 50:] = 1, 1, 1
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 0
labels[:50, 50:] = 1
labels[50:, :50] = 2
labels[50:, 50:] = 3
rag = graph.rag_mean_color(img, labels)
new_labels = graph.cut_threshold(labels, rag, 10, in_place=False)
# Two labels
assert new_labels.max() == 1
new_labels = graph.cut_threshold(labels, rag, 10)
# Two labels
assert new_labels.max() == 1
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_cut_normalized():
img = np.zeros((100, 100, 3), dtype='uint8')
img[:50, :50] = 255, 255, 255
img[:50, 50:] = 254, 254, 254
img[50:, :50] = 2, 2, 2
img[50:, 50:] = 1, 1, 1
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 0
labels[:50, 50:] = 1
labels[50:, :50] = 2
labels[50:, 50:] = 3
rag = graph.rag_mean_color(img, labels, mode='similarity')
new_labels = graph.cut_normalized(labels, rag, in_place=False)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
# Two labels
assert new_labels.max() == 1
new_labels = graph.cut_normalized(labels, rag)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
assert new_labels.max() == 1
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_rag_error():
img = np.zeros((10, 10, 3), dtype='uint8')
labels = np.zeros((10, 10), dtype='uint8')
labels[:5, :] = 0
labels[5:, :] = 1
with pytest.raises(ValueError):
graph.rag_mean_color(img, labels,
2, 'non existant mode')
def _weight_mean_color(graph, src, dst, n):
diff = graph.node[dst]['mean color'] - graph.node[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff}
def _pre_merge_mean_color(graph, src, dst):
graph.node[dst]['total color'] += graph.node[src]['total color']
graph.node[dst]['pixel count'] += graph.node[src]['pixel count']
graph.node[dst]['mean color'] = (graph.node[dst]['total color'] /
graph.node[dst]['pixel count'])
def merge_hierarchical_mean_color(labels, rag, thresh, rag_copy=True,
in_place_merge=False):
return graph.merge_hierarchical(labels, rag, thresh, rag_copy,
in_place_merge, _pre_merge_mean_color,
_weight_mean_color)
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_rag_hierarchical():
img = np.zeros((8, 8, 3), dtype='uint8')
labels = np.zeros((8, 8), dtype='uint8')
img[:, :, :] = 31
labels[:, :] = 1
img[0:4, 0:4, :] = 10, 10, 10
labels[0:4, 0:4] = 2
img[4:, 0:4, :] = 20, 20, 20
labels[4:, 0:4] = 3
g = graph.rag_mean_color(img, labels)
g2 = g.copy()
thresh = 20 # more than 11*sqrt(3) but less than
result = merge_hierarchical_mean_color(labels, g, thresh)
assert(np.all(result[:, :4] == result[0, 0]))
assert(np.all(result[:, 4:] == result[-1, -1]))
result = merge_hierarchical_mean_color(labels, g2, thresh,
in_place_merge=True)
assert(np.all(result[:, :4] == result[0, 0]))
assert(np.all(result[:, 4:] == result[-1, -1]))
result = graph.cut_threshold(labels, g, thresh)
assert np.all(result == result[0, 0])
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_ncut_stable_subgraph():
""" Test to catch an error thrown when subgraph has all equal edges. """
img = np.zeros((100, 100, 3), dtype='uint8')
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 1
labels[:50, 50:] = 2
rag = graph.rag_mean_color(img, labels, mode='similarity')
new_labels = graph.cut_normalized(labels, rag, in_place=False)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
assert new_labels.max() == 0
def test_generic_rag_2d():
labels = np.array([[1, 2], [3, 4]], dtype=np.uint8)
g = graph.RAG(labels)
assert g.has_edge(1, 2) and g.has_edge(2, 4) and not g.has_edge(1, 4)
h = graph.RAG(labels, connectivity=2)
assert h.has_edge(1, 2) and h.has_edge(1, 4) and h.has_edge(2, 3)
def test_generic_rag_3d():
labels = np.arange(8, dtype=np.uint8).reshape((2, 2, 2))
g = graph.RAG(labels)
assert g.has_edge(0, 1) and g.has_edge(1, 3) and not g.has_edge(0, 3)
h = graph.RAG(labels, connectivity=2)
assert h.has_edge(0, 1) and h.has_edge(0, 3) and not h.has_edge(0, 7)
k = graph.RAG(labels, connectivity=3)
assert k.has_edge(0, 1) and k.has_edge(1, 2) and k.has_edge(2, 5)
def test_rag_boundary():
labels = np.zeros((16, 16), dtype='uint8')
edge_map = np.zeros_like(labels, dtype=float)
edge_map[8, :] = 0.5
edge_map[:, 8] = 1.0
labels[:8, :8] = 1
labels[:8, 8:] = 2
labels[8:, :8] = 3
labels[8:, 8:] = 4
g = graph.rag_boundary(labels, edge_map, connectivity=1)
assert set(g.nodes()) == set([1, 2, 3, 4])
assert set(g.edges()) == set([(1, 2), (1, 3), (2, 4), (3, 4)])
assert g[1][3]['weight'] == 0.25
assert g[2][4]['weight'] == 0.34375
assert g[1][3]['count'] == 16
| 5,527 | 0 | 271 |
d69fac53ce9bde7627a2d348edbe244afc9d3c48 | 5,347 | py | Python | alunos/views.py | Antonio-Neves/Gestao-Escolar | a97052beb571a32619d4e6b6f5e7c3aae3bc8e9b | [
"MIT"
] | 7 | 2021-05-21T00:23:40.000Z | 2021-12-09T12:35:00.000Z | alunos/views.py | Antonio-Neves/Gestao-Escolar | a97052beb571a32619d4e6b6f5e7c3aae3bc8e9b | [
"MIT"
] | null | null | null | alunos/views.py | Antonio-Neves/Gestao-Escolar | a97052beb571a32619d4e6b6f5e7c3aae3bc8e9b | [
"MIT"
] | 7 | 2021-08-03T22:28:36.000Z | 2022-03-13T20:08:40.000Z | from django.shortcuts import redirect, render, reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.db.models import Case, CharField, Value, When
from django.views.generic.base import TemplateView
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from unidecode import unidecode # normalize strings Csii
from alunos.models import Aluno
from alunos.forms import AlunoForm
from turmas.models import Turma
from accounts.models import CustomUser
# Classes to control admin acess and success messages
from base.base_admin_permissions import BaseAdminUsersAdSe
# Constants Vars
from base.constants import CURRENT_YEAR
def create_user_after_registration(
username, password, first_name, last_name, department):
"""
Create user after aluno registration
"""
CustomUser.objects.create_user(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
department=department
)
def data_processing_user_creation(cpf, name_form, department):
"""
Processing data for user creation
"""
cpf_split_1 = cpf.split('.')
cpf_split_2 = ''.join(cpf_split_1).split('-')
cpf_join = ''.join(cpf_split_2)
name_split = name_form.split()
first_name = name_split[0]
last_name = name_split[-1]
password = f'{unidecode(first_name).lower()}{cpf_join[0:6]}'
# Test if user already exists
cpf_qs = CustomUser.objects.filter(username=cpf_join)
if not cpf_qs:
create_user_after_registration(
cpf_join, password, first_name, last_name, department)
# --- General views --- #
# --- Admin views --- #
# --- Lists views --- #
| 27.994764 | 86 | 0.734618 | from django.shortcuts import redirect, render, reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.db.models import Case, CharField, Value, When
from django.views.generic.base import TemplateView
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from unidecode import unidecode # normalize strings Csii
from alunos.models import Aluno
from alunos.forms import AlunoForm
from turmas.models import Turma
from accounts.models import CustomUser
# Classes to control admin acess and success messages
from base.base_admin_permissions import BaseAdminUsersAdSe
# Constants Vars
from base.constants import CURRENT_YEAR
def create_user_after_registration(
username, password, first_name, last_name, department):
"""
Create user after aluno registration
"""
CustomUser.objects.create_user(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
department=department
)
def data_processing_user_creation(cpf, name_form, department):
"""
Processing data for user creation
"""
cpf_split_1 = cpf.split('.')
cpf_split_2 = ''.join(cpf_split_1).split('-')
cpf_join = ''.join(cpf_split_2)
name_split = name_form.split()
first_name = name_split[0]
last_name = name_split[-1]
password = f'{unidecode(first_name).lower()}{cpf_join[0:6]}'
# Test if user already exists
cpf_qs = CustomUser.objects.filter(username=cpf_join)
if not cpf_qs:
create_user_after_registration(
cpf_join, password, first_name, last_name, department)
# --- General views --- #
class AlunoIndexView(TemplateView):
template_name = 'alunos/index-aluno.html'
# --- Admin views --- #
class AlunoInfoView(BaseAdminUsersAdSe):
pass
class AlunoNewView(BaseAdminUsersAdSe, CreateView):
model = Aluno
template_name = 'alunos/aluno-novo.html'
form_class = AlunoForm
success_url = reverse_lazy('aluno-novo')
success_message = 'Aluno Cadastrado com sucesso'
def post(self, request, *args, **kwargs):
"""
Necessary for user creation after 'Aluno' registration.
"""
form = self.get_form()
if form.is_valid():
# Data for user creation after 'aluno' registration
cpfa = request.POST.get('aluno_cpf')
cpf1 = request.POST.get('aluno_filiacao1_cpf')
cpf2 = request.POST.get('aluno_filiacao2_cpf')
# if 'aluno CPF' in form
if cpfa:
# Data from 'aluno' for user creation
name_a_form = request.POST.get('aluno_nome')
data_processing_user_creation(cpfa, name_a_form, 'al')
# if 'filiação1 CPF' in form
if cpf1:
# Data from Filiação 1 for user creation
name1_form = request.POST.get('aluno_filiacao1_nome')
data_processing_user_creation(cpf1, name1_form, 're')
# if 'filiação2 CPF' in form
if cpf2:
# Data from Filiação 2 for user creation
name2_form = request.POST.get('aluno_filiacao2_nome')
data_processing_user_creation(cpf2, name2_form, 're')
return self.form_valid(form)
else:
context = {'form': form}
return render(request, self.template_name, context)
class AlunoUpdateView(BaseAdminUsersAdSe, UpdateView):
model = Aluno
form_class = AlunoForm
template_name = 'alunos/aluno-alterar.html'
success_message = 'As alterações foram efectuadas com sucesso'
def get_success_url(self):
"""
Reverse to the form of created user, (update view).
"""
return reverse('aluno-alterar', kwargs={'pk': self.object.pk})
class AlunoDeleteView(BaseAdminUsersAdSe, DeleteView):
model = Aluno
template_name = 'alunos/aluno-delete.html'
success_message = 'Os dados do aluno(a) foram corretamente apagados da base de dados'
def get_success_url(self):
"""
Only necessary for display sucess message after delete
"""
messages.success(self.request, self.success_message)
return reverse('alunos')
# --- Lists views --- #
class AlunosListView(BaseAdminUsersAdSe, ListView):
model = Aluno
paginate_by = 20
template_name = 'alunos/alunos.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
turmas = Turma.objects.filter(
turma_ano_letivo=CURRENT_YEAR
).annotate(
ano_escolar_display=Case(
When(turma_ano_escolar='CR', then=Value('Creche')),
When(turma_ano_escolar='G1', then=Value('Maternal I')),
When(turma_ano_escolar='G2', then=Value('Maternal II')),
When(turma_ano_escolar='G3', then=Value('Maternal III')),
When(turma_ano_escolar='G4', then=Value('Jardim I')),
When(turma_ano_escolar='G5', then=Value('Jardim II')),
When(turma_ano_escolar='1A', then=Value('1º Ano')),
When(turma_ano_escolar='2A', then=Value('2º Ano')),
When(turma_ano_escolar='3A', then=Value('3º Ano')),
When(turma_ano_escolar='4A', then=Value('4º Ano')),
When(turma_ano_escolar='5A', then=Value('5º Ano')),
When(turma_ano_escolar='6A', then=Value('6º Ano')),
When(turma_ano_escolar='7A', then=Value('7º Ano')),
When(turma_ano_escolar='8A', then=Value('8º Ano')),
When(turma_ano_escolar='9A', then=Value('9º Ano')),
output_field=CharField()
)
).values_list(
'ano_escolar_display',
'turma_nome',
'turma_etapa_basica',
'turma_aluno'
)
context['turmas'] = turmas
return context
class AlunosEfetivoListView(BaseAdminUsersAdSe, ListView):
model = Aluno
template_name = 'alunos/alunos-efetivo.html'
| 1,230 | 2,311 | 158 |
3275a9b589be4d602a175fa7da9c5e68fd17c61c | 3,319 | py | Python | src/predict.py | elangovana/object-tracking | a9359ac3e3926102f9998eb20500746343e14826 | [
"Apache-2.0"
] | 1 | 2019-12-17T01:17:01.000Z | 2019-12-17T01:17:01.000Z | src/predict.py | elangovana/object-tracking | a9359ac3e3926102f9998eb20500746343e14826 | [
"Apache-2.0"
] | 2 | 2021-09-08T01:37:46.000Z | 2022-03-12T00:13:53.000Z | src/predict.py | elangovana/object-tracking | a9359ac3e3926102f9998eb20500746343e14826 | [
"Apache-2.0"
] | null | null | null | # *****************************************************************************
# * Copyright 2019 Amazon.com, Inc. and its affiliates. All Rights Reserved. *
# *
# Licensed under the Amazon Software License (the "License"). *
# You may not use this file except in compliance with the License. *
# A copy of the License is located at *
# *
# http://aws.amazon.com/asl/ *
# *
# or in the "license" file accompanying this file. This file is distributed *
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either *
# express or implied. See the License for the specific language governing *
# permissions and limitations under the License. *
# *****************************************************************************
import tempfile
import torch
from torchvision import transforms
from model_factory_service_locator import ModelFactoryServiceLocator
class Predict:
"""
Runs predictions on a given model
"""
| 38.593023 | 86 | 0.536607 | # *****************************************************************************
# * Copyright 2019 Amazon.com, Inc. and its affiliates. All Rights Reserved. *
# *
# Licensed under the Amazon Software License (the "License"). *
# You may not use this file except in compliance with the License. *
# A copy of the License is located at *
# *
# http://aws.amazon.com/asl/ *
# *
# or in the "license" file accompanying this file. This file is distributed *
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either *
# express or implied. See the License for the specific language governing *
# permissions and limitations under the License. *
# *****************************************************************************
import tempfile
import torch
from torchvision import transforms
from model_factory_service_locator import ModelFactoryServiceLocator
class Predict:
"""
Runs predictions on a given model
"""
def __init__(self, model_factory_name, model_dict_path, num_classes, device=None):
self.model_factory_name = model_factory_name
model_factory = ModelFactoryServiceLocator().get_factory(model_factory_name)
model = model_factory.load_model(model_dict_path, num_classes)
self.model = model
self.device = device or ('cuda:0' if torch.cuda.is_available() else 'cpu')
def __call__(self, input_file_or_bytes):
# If file
if isinstance(input_file_or_bytes, str):
input_data = self._pre_process_image(input_file_or_bytes)
# Else bytes
elif isinstance(input_file_or_bytes, bytes):
with tempfile.NamedTemporaryFile("w+b") as f:
f.write(input_file_or_bytes)
f.seek(0)
input_data = self._pre_process_image(f)
else:
input_data = input_file_or_bytes
self.model.eval()
with torch.no_grad():
predicted_batch = self.model(input_data)
return predicted_batch
def predict_batch(self, data_loader):
# Model Eval mode
self.model.eval()
predictions = []
# No grad
with torch.no_grad():
for i, (images, _) in enumerate(data_loader):
# Copy to device
images = list(image.to(self.device) for image in images)
predicted_batch = self.model(images)
predictions.extend(predicted_batch)
return predictions
def _pre_process_image(self, input_file_or_bytes):
# Combine all transforms
transform_pipeline = transforms.Compose([
# Regular stuff
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
# torch image: C X H X W
std=[0.229, 0.224, 0.225])])
img_tensor = transform_pipeline(input_file_or_bytes)
return img_tensor
| 1,885 | 0 | 108 |
c4fbb9422b36b275ee83b64ac351d700c80cd454 | 185 | py | Python | pythonexamples/generatepassword.py | faizmd12/ADVANCEDREFLECTIVEADOPTION | 830083a000fc76b58999de88edc668df7c16bad7 | [
"Apache-2.0"
] | null | null | null | pythonexamples/generatepassword.py | faizmd12/ADVANCEDREFLECTIVEADOPTION | 830083a000fc76b58999de88edc668df7c16bad7 | [
"Apache-2.0"
] | 3 | 2018-09-19T17:00:05.000Z | 2018-09-20T03:42:37.000Z | pythonexamples/generatepassword.py | faizmd12/ADVANCEDREFLECTIVEADOPTION | 830083a000fc76b58999de88edc668df7c16bad7 | [
"Apache-2.0"
] | 2 | 2018-09-20T03:28:36.000Z | 2018-09-20T03:31:27.000Z | import string
from random import *
characters = string.ascii_letters + string.punctuation + string.digits
pswd = "".join(choice(characters) for x in range(randint(6, 14)))
print pswd
| 30.833333 | 71 | 0.756757 | import string
from random import *
characters = string.ascii_letters + string.punctuation + string.digits
pswd = "".join(choice(characters) for x in range(randint(6, 14)))
print pswd
| 0 | 0 | 0 |
fb8ba77729ed278c59294b6a64660bff6686985f | 2,180 | py | Python | openslides/agenda/apps.py | boehlke/OpenSlides | 7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2 | [
"MIT"
] | null | null | null | openslides/agenda/apps.py | boehlke/OpenSlides | 7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2 | [
"MIT"
] | null | null | null | openslides/agenda/apps.py | boehlke/OpenSlides | 7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2 | [
"MIT"
] | null | null | null | from typing import Any, Dict, Set
from django.apps import AppConfig
def required_users(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that are displayed as speaker in the given element.
"""
return set(speaker["user_id"] for speaker in element["speakers"])
| 32.058824 | 88 | 0.673394 | from typing import Any, Dict, Set
from django.apps import AppConfig
class AgendaAppConfig(AppConfig):
name = "openslides.agenda"
verbose_name = "OpenSlides Agenda"
angular_site_module = True
def ready(self):
# Import all required stuff.
from django.db.models.signals import pre_delete, post_save
from ..core.signals import permission_change
from ..utils.rest_api import router
from .projector import register_projector_elements
from .signals import (
get_permission_change_data,
listen_to_related_object_post_delete,
listen_to_related_object_post_save,
)
from .views import ItemViewSet
from . import serializers # noqa
from ..utils.access_permissions import required_user
# Define projector elements.
register_projector_elements()
# Connect signals.
post_save.connect(
listen_to_related_object_post_save,
dispatch_uid="listen_to_related_object_post_save",
)
pre_delete.connect(
listen_to_related_object_post_delete,
dispatch_uid="listen_to_related_object_post_delete",
)
permission_change.connect(
get_permission_change_data, dispatch_uid="agenda_get_permission_change_data"
)
# Register viewsets.
router.register(self.get_model("Item").get_collection_string(), ItemViewSet)
# register required_users
required_user.add_collection_string(
self.get_model("Item").get_collection_string(), required_users
)
def get_config_variables(self):
from .config_variables import get_config_variables
return get_config_variables()
def get_startup_elements(self):
"""
Yields all Cachables required on startup i. e. opening the websocket
connection.
"""
yield self.get_model("Item")
def required_users(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that are displayed as speaker in the given element.
"""
return set(speaker["user_id"] for speaker in element["speakers"])
| 1,503 | 362 | 23 |
b60ba245f6e4ca2d7772d3fac4ee1aec9528d42b | 2,367 | py | Python | pysamples/pytictoc/txc13.py | ranarashadmahmood/OMNETPY | 13ab49106a3ac700aa633a8eb37acdad5e3157ab | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 31 | 2020-06-23T13:53:47.000Z | 2022-03-28T08:09:00.000Z | pysamples/pytictoc/txc13.py | ranarashadmahmood/OMNETPY | 13ab49106a3ac700aa633a8eb37acdad5e3157ab | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8 | 2020-11-01T21:35:47.000Z | 2021-08-29T11:40:50.000Z | pysamples/pytictoc/txc13.py | ranarashadmahmood/OMNETPY | 13ab49106a3ac700aa633a8eb37acdad5e3157ab | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8 | 2021-03-22T15:32:22.000Z | 2022-02-02T14:57:56.000Z | """
In this step the destination address is no longer node 2 -- we draw a
random destination, and we'll add the destination address to the message.
The best way is to subclass cMessage and add destination as a data member.
To make the model execute longer, after a message arrives to its destination
the destination node will generate another message with a random destination
address, and so forth.
"""
from pyopp import cSimpleModule, cMessage, EV
| 32.875 | 100 | 0.607098 | """
In this step the destination address is no longer node 2 -- we draw a
random destination, and we'll add the destination address to the message.
The best way is to subclass cMessage and add destination as a data member.
To make the model execute longer, after a message arrives to its destination
the destination node will generate another message with a random destination
address, and so forth.
"""
from pyopp import cSimpleModule, cMessage, EV
class TicTocMsg13(cMessage):
def __init__(self, name):
super().__init__(name)
self.source = None
self.destination = None
self.hopcount = 0
class PyTxc13(cSimpleModule):
def initialize(self):
# Module 0 sends the first message
if self.getIndex() == 0:
# Boot the process scheduling the initial message as a self-message.
self.scheduleAt(0.0, self.generateMessage())
def handleMessage(self, ttmsg):
assert isinstance(ttmsg, TicTocMsg13)
if ttmsg.destination == self.getIndex():
# Message arrived.
EV << "Message " << ttmsg.getName() << " arrived after " << ttmsg.hopcount << " hops.\n"
self.bubble("ARRIVED, starting new one!")
self.delete(ttmsg)
# Generate another one.
EV << "Generating another message: "
newmsg = self.generateMessage()
EV << newmsg.getName() << '\n';
self.forwardMessage(newmsg)
else:
# We need to forward the message.
self.forwardMessage(ttmsg)
def generateMessage(self):
# Produce source and destination addresses.
src = self.getIndex() # our module index
n = self.getVectorSize() # module vector size
dest = self.intuniform(0, n - 2)
if dest >= src:
dest += 1
# Create message object and set source and destination field.
msg = TicTocMsg13("tic-{}-to-{}".format(src, dest))
msg.source = src
msg.destination = dest
return msg
def forwardMessage(self, msg):
# Increment hop count.
msg.hopcount += 1
# Same routing as before: random gate.
n = self.gateSize("gate")
k = self.intuniform(0, n-1)
EV << "Forwarding message " << msg << " on gate[" << k << "]\n"
self.send(msg, "gate$o", k)
| 1,717 | 15 | 180 |
ebcc63cf681a889e84bf9cc7e88caeb454c5a2b5 | 844 | py | Python | examples/metadata.py | SpotlightKid/jackclient-python | cfc1e6a11f50f98abcd351b6e372e98da8e3a06d | [
"MIT"
] | 120 | 2015-04-03T03:55:04.000Z | 2022-03-06T07:21:38.000Z | examples/metadata.py | SpotlightKid/jackclient-python | cfc1e6a11f50f98abcd351b6e372e98da8e3a06d | [
"MIT"
] | 84 | 2015-01-04T12:42:43.000Z | 2022-03-15T18:13:13.000Z | examples/metadata.py | SpotlightKid/jackclient-python | cfc1e6a11f50f98abcd351b6e372e98da8e3a06d | [
"MIT"
] | 31 | 2015-04-11T13:03:35.000Z | 2022-03-06T07:21:38.000Z | #!/usr/bin/env python3
"""Set/get/remove client/port metadata."""
from pprint import pprint
import jack
client = jack.Client('Metadata-Client')
port = client.inports.register('input')
client.set_property(client, jack.METADATA_PRETTY_NAME, 'Best Client Ever')
print('Client "pretty" name:',
jack.get_property(client, jack.METADATA_PRETTY_NAME))
client.set_property(
port, jack.METADATA_PRETTY_NAME, b'a good port', 'text/plain')
print('Port "pretty" name:',
jack.get_property(port, jack.METADATA_PRETTY_NAME))
print('All client properties:')
pprint(jack.get_properties(client))
print('All port properties:')
pprint(jack.get_properties(port))
print('All properties:')
pprint(jack.get_all_properties())
client.remove_property(port, jack.METADATA_PRETTY_NAME)
client.remove_properties(client)
client.remove_all_properties()
| 27.225806 | 74 | 0.770142 | #!/usr/bin/env python3
"""Set/get/remove client/port metadata."""
from pprint import pprint
import jack
client = jack.Client('Metadata-Client')
port = client.inports.register('input')
client.set_property(client, jack.METADATA_PRETTY_NAME, 'Best Client Ever')
print('Client "pretty" name:',
jack.get_property(client, jack.METADATA_PRETTY_NAME))
client.set_property(
port, jack.METADATA_PRETTY_NAME, b'a good port', 'text/plain')
print('Port "pretty" name:',
jack.get_property(port, jack.METADATA_PRETTY_NAME))
print('All client properties:')
pprint(jack.get_properties(client))
print('All port properties:')
pprint(jack.get_properties(port))
print('All properties:')
pprint(jack.get_all_properties())
client.remove_property(port, jack.METADATA_PRETTY_NAME)
client.remove_properties(client)
client.remove_all_properties()
| 0 | 0 | 0 |
e395b41818739ee3e695c411d1a683c3bf241ba7 | 5,284 | py | Python | trench_automation/main.py | yozoon/TrenchDepositionAutomation | 4eb1dd9fbabe7a782aa2070de144240616c00472 | [
"MIT"
] | null | null | null | trench_automation/main.py | yozoon/TrenchDepositionAutomation | 4eb1dd9fbabe7a782aa2070de144240616c00472 | [
"MIT"
] | null | null | null | trench_automation/main.py | yozoon/TrenchDepositionAutomation | 4eb1dd9fbabe7a782aa2070de144240616c00472 | [
"MIT"
] | null | null | null | import csv
from argparse import ArgumentParser, ArgumentTypeError
from os import path
from string import Template
from subprocess import Popen
from tempfile import NamedTemporaryFile
import numpy as np
import util
# This import only works if the directory where "generate_trench.so" is located is present in
# the PYTHONPATH environment variable
#import generate_trench
VIENNATS_EXE = "../../ViennaTools/ViennaTS/build/viennats-2.3.2"
PROJECT_DIRECTORY = path.dirname(__file__)
PROCESS_TIME = 10
DISTANCE_BITS = 8
OUTPUT_DIR = path.join(PROJECT_DIRECTORY, "output")
parser = ArgumentParser(
description="Run physical deposition simulations with different sticking probabilities.")
parser.add_argument(
"output",
type=str,
default="results.csv",
nargs="?",
help="output CSV file for saving the results")
def check_list_input(x):
""" Converts the input string to a list of floats. Only uses input elements with a value between 0 and 1."""
x = x.replace("[", "").replace("]", "").split(",")
try:
x = [float(i) for i in x]
except ValueError as e:
raise ArgumentTypeError(e)
if np.all([0 < i <= 1 for i in x]):
if len(x) == 0:
raise ArgumentTypeError("No sticking probability values provided")
return x
else:
raise ArgumentTypeError(
"The sticking probability has to have a value between 0 and 1.")
parser.add_argument(
"--sticking-probabilities",
dest="sticking_probabilities",
type=check_list_input,
default=[1/2**i for i in range(5)],
help="list of sticking probabilities to be used during the simulation"
)
parser.add_argument(
"--repetitions",
dest="repetitions",
type=int,
default=10,
help="how often the simulation should be repeated for one set of parameters")
if __name__ == "__main__":
main()
| 37.211268 | 112 | 0.614497 | import csv
from argparse import ArgumentParser, ArgumentTypeError
from os import path
from string import Template
from subprocess import Popen
from tempfile import NamedTemporaryFile
import numpy as np
import util
# This import only works if the directory where "generate_trench.so" is located is present in
# the PYTHONPATH environment variable
#import generate_trench
VIENNATS_EXE = "../../ViennaTools/ViennaTS/build/viennats-2.3.2"
PROJECT_DIRECTORY = path.dirname(__file__)
PROCESS_TIME = 10
DISTANCE_BITS = 8
OUTPUT_DIR = path.join(PROJECT_DIRECTORY, "output")
parser = ArgumentParser(
description="Run physical deposition simulations with different sticking probabilities.")
parser.add_argument(
"output",
type=str,
default="results.csv",
nargs="?",
help="output CSV file for saving the results")
def check_list_input(x):
""" Converts the input string to a list of floats. Only uses input elements with a value between 0 and 1."""
x = x.replace("[", "").replace("]", "").split(",")
try:
x = [float(i) for i in x]
except ValueError as e:
raise ArgumentTypeError(e)
if np.all([0 < i <= 1 for i in x]):
if len(x) == 0:
raise ArgumentTypeError("No sticking probability values provided")
return x
else:
raise ArgumentTypeError(
"The sticking probability has to have a value between 0 and 1.")
parser.add_argument(
"--sticking-probabilities",
dest="sticking_probabilities",
type=check_list_input,
default=[1/2**i for i in range(5)],
help="list of sticking probabilities to be used during the simulation"
)
parser.add_argument(
"--repetitions",
dest="repetitions",
type=int,
default=10,
help="how often the simulation should be repeated for one set of parameters")
def main():
args = parser.parse_args()
# Read the template file into a string variable
with open(path.join(PROJECT_DIRECTORY, "parameters.template"), "r") as f:
template_string = f.read()
# Enforce csv file ending and generate additional filename for csv file for saving the geometry
basename = path.splitext(args.output)[0]
data_fname = basename + ".csv"
geometry_fname = basename + "_geom.csv"
# Open the files and create csv writers for them
with open(data_fname, "w+") as datafile, open(geometry_fname, "w+") as geomfile:
data_writer = csv.writer(datafile)
geometry_writer = csv.writer(geomfile)
# Here we could generate new trench geometries using the generate_trench module...
tx, ty = None, None
geometry_id = -1
for sticking_probability in args.sticking_probabilities:
print(f"Sticking probability: {sticking_probability}")
# Use the template to create the content of the parameter file
s = Template(template_string)
out = s.substitute(
GEOMETRY_FILE=path.join(PROJECT_DIRECTORY, "trench.vtk"),
DISTANCE_BITS=DISTANCE_BITS,
# path.join(OUTPUT_DIR, f"result_{i}"),
OUTPUT_PATH=OUTPUT_DIR,
FD_SCHEME="LAX_FRIEDRICHS_1ST_ORDER",
PROCESS_TIME=PROCESS_TIME,
# ",".join([str(i) for i in range(11)]),
OUTPUT_VOLUME=PROCESS_TIME,
DEPOSITION_RATE="1.",
STICKING_PROBABILITY=sticking_probability,
STATISTICAL_ACCURACY="1000.")
# Create a temporary file with the content we just generated
# which can be used as an input for ViennaTS
with NamedTemporaryFile(mode="w+") as paramfile:
paramfile.file.write(out)
paramfile.file.flush()
for _ in range(args.repetitions):
# Call ViennaTS with the just generated temporary process definition file
Popen([VIENNATS_EXE, paramfile.name],
cwd=PROJECT_DIRECTORY).wait()
# Load the points along the trench surface, if they aren't already loaded
if tx is None:
tx, ty, _ = util.extract_line(
path.join(OUTPUT_DIR + f"_{DISTANCE_BITS}bit", "Interface_0_0.vtp"))
geometry_id = geometry_id + 1
geometry_writer.writerow(
[geometry_id, 0] + tx.flatten().tolist())
geometry_writer.writerow(
[geometry_id, 1] + ty.flatten().tolist())
# Load the points along the surface of the deposited layer
x, y, _ = util.extract_line(
path.join(OUTPUT_DIR + f"_{DISTANCE_BITS}bit", "Interface_1_0.vtp"))
# Calculate the layer thickness
dist = util.line_to_distance(tx, ty, x, y)
# Add the layer thickness to the array, but first append the current geometry_id and
# sticking probability to them
data_writer.writerow([geometry_id, sticking_probability] +
dist.flatten().tolist())
print("Done!")
if __name__ == "__main__":
main()
| 3,392 | 0 | 23 |
fd3c6d601192d4d75328ddcda0cc18d339d3860f | 41,703 | py | Python | pirates/makeapirate/NameGUI.py | ksmit799/POTCO-PS | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 8 | 2017-01-24T04:33:29.000Z | 2020-11-01T08:36:24.000Z | pirates/makeapirate/NameGUI.py | ksmit799/Pirates-Online-Remake | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 1 | 2017-03-02T18:05:17.000Z | 2017-03-14T06:47:10.000Z | pirates/makeapirate/NameGUI.py | ksmit799/Pirates-Online-Remake | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 11 | 2017-03-02T18:46:07.000Z | 2020-11-01T08:36:26.000Z | # File: N (Python 2.4)
import random
import types
import string
from direct.fsm import StateData
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from direct.task import Task
from pandac.PandaModules import *
from pandac.PandaModules import TextEncoder
from otp.namepanel import NameCheck
from otp.otpbase import OTPLocalizer as OL
from pirates.piratesbase import PLocalizer as PL
from pirates.pirate import HumanDNA
from pirates.piratesbase import PiratesGlobals
from pirates.piratesgui import GuiButton
from pirates.piratesgui import PiratesGuiGlobals
from pirates.leveleditor import NPCList
from pirates.makeapirate.PCPickANamePattern import PCPickANamePattern
from direct.distributed.MsgTypes import *
from direct.distributed import PyDatagram
MAX_NAME_WIDTH = 9
| 41.578265 | 1,162 | 0.603362 | # File: N (Python 2.4)
import random
import types
import string
from direct.fsm import StateData
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from direct.task import Task
from pandac.PandaModules import *
from pandac.PandaModules import TextEncoder
from otp.namepanel import NameCheck
from otp.otpbase import OTPLocalizer as OL
from pirates.piratesbase import PLocalizer as PL
from pirates.pirate import HumanDNA
from pirates.piratesbase import PiratesGlobals
from pirates.piratesgui import GuiButton
from pirates.piratesgui import PiratesGuiGlobals
from pirates.leveleditor import NPCList
from pirates.makeapirate.PCPickANamePattern import PCPickANamePattern
from direct.distributed.MsgTypes import *
from direct.distributed import PyDatagram
MAX_NAME_WIDTH = 9
class NameGUI(DirectFrame, StateData.StateData):
NICKNAME = 'Nickname'
FIRST = 'First'
PREFIX = 'Prefix'
SUFFIX = 'Suffix'
_NameGUI__MODE_INIT = 0
_NameGUI__MODE_TYPEANAME = 1
_NameGUI__MODE_PICKANAME = 2
POSSIBLE_NAME_COMBOS = {
'first-last': [
0,
1,
1] }
text = TextNode('text')
text.setFont(PiratesGlobals.getInterfaceFont())
def __init__(self, main = None, independent = False):
DirectFrame.__init__(self)
DirectFrame.initialiseoptions(self, NameGUI)
self.charGui = loader.loadModel('models/gui/char_gui')
self.triangleGui = loader.loadModel('models/gui/triangle')
if hasattr(base, 'cr') and not hasattr(base.cr, 'isFake'):
self.cr = base.cr
else:
self.cr = None
self.main = main
self.independent = independent
if self.independent:
np = NodePath(PlaneNode('p', Plane(Vec4(1, 0, 0, 0))))
self.mainFrame = DirectFrame(parent = base.a2dBottomRight, relief = None)
self.bookModel = DirectFrame(parent = self.mainFrame, image = self.charGui.find('**/chargui_base'), image_pos = (-0.13, 0, 0), relief = None)
self.bookModel.setClipPlane(np)
np.setX(-1.1299999999999999)
np.reparentTo(self.bookModel)
self.mainFrame.setScale(0.41999999999999998)
self.mainFrame.setX(-0.76000000000000001)
self.mainFrame.setZ(1.2)
self.parent = self.bookModel
self.avatar = main
else:
self.parent = main.bookModel
self.avatar = main.avatar
self.mode = self._NameGUI__MODE_INIT
self.wantTypeAName = True
self.names = [
'',
'',
'',
'']
self.savedGender = None
self.savedMaleName = None
self.savedMaleActiveStates = None
self.savedFemaleName = None
self.savedFemaleActiveStates = None
self.customName = False
self.nicknameIndex = 2
self.firstIndex = 2
self.prefixIndex = 2
self.suffixIndex = 2
self.listsCreated = 0
self.nicknameActive = 0
self.firstActive = 1
self.lastActive = 1
self.nameEntry = None
self.pickANameGui = []
self.typeANameGui = []
self.fsm = ClassicFSM.ClassicFSM('NameShop', [
State.State('Init', self.enterInit, self.exitInit, [
'Pay']),
State.State('Pay', self.enterPay, self.exitPay, [
'PickAName',
'TypeAName']),
State.State('PickAName', self.enterPickAName, self.exitPickAName, [
'TypeAName',
'Done']),
State.State('TypeAName', self.enterTypeAName, self.exitTypeAName, [
'PickAName',
'Approved',
'Accepted',
'Rejected',
'Done']),
State.State('Approved', self.enterApproved, self.exitApproved, [
'PickAName',
'Done']),
State.State('Accepted', self.enterAccepted, self.exitAccepted, [
'Done']),
State.State('Rejected', self.enterRejected, self.exitRejected, [
'TypeAName']),
State.State('Done', self.enterDone, self.exitDone, [
'Init',
'Pay'])], 'Init', 'Done')
self.fsm.enterInitialState()
self.initNameLists()
if self.independent or not (self.main.wantNPCViewer):
self.makeRandomName()
def initNameLists(self):
buf = [
' ',
' ']
self.nicknamesMale = PL.PirateNames_NickNamesGeneric + PL.PirateNames_NickNamesMale
self.nicknamesFemale = PL.PirateNames_NickNamesGeneric + PL.PirateNames_NickNamesFemale
self.firstNamesMale = PL.PirateNames_FirstNamesGeneric + PL.PirateNames_FirstNamesMale
self.firstNamesFemale = PL.PirateNames_FirstNamesGeneric + PL.PirateNames_FirstNamesFemale
self.lastPrefixesMale = PL.PirateNames_LastNamePrefixesGeneric + PL.PirateNames_LastNamePrefixesCapped + PL.PirateNames_LastNamePrefixesMale
self.lastPrefixesFemale = PL.PirateNames_LastNamePrefixesGeneric + PL.PirateNames_LastNamePrefixesCapped + PL.PirateNames_LastNamePrefixesFemale
self.lastSuffixesMale = PL.PirateNames_LastNameSuffixesGeneric + PL.PirateNames_LastNameSuffixesMale
self.lastSuffixesFemale = PL.PirateNames_LastNameSuffixesGeneric + PL.PirateNames_LastNameSuffixesFemale
self.nicknamesMale.sort()
self.nicknamesFemale.sort()
self.firstNamesMale.sort()
self.firstNamesFemale.sort()
self.lastPrefixesMale.sort()
self.lastPrefixesFemale.sort()
self.lastSuffixesMale.sort()
self.lastSuffixesFemale.sort()
self.nicknamesMale = buf + self.nicknamesMale + buf
self.nicknamesFemale = buf + self.nicknamesFemale + buf
self.firstNamesMale = buf + self.firstNamesMale + buf
self.firstNamesFemale = buf + self.firstNamesFemale + buf
self.lastPrefixesMale = buf + self.lastPrefixesMale + buf
self.lastPrefixesFemale = buf + self.lastPrefixesFemale + buf
self.lastSuffixesMale = buf + self.lastSuffixesMale + buf
self.lastSuffixesFemale = buf + self.lastSuffixesFemale + buf
self.makeRandomName()
def enter(self):
if self.mode == self._NameGUI__MODE_INIT:
self.loadPickAName()
self.loadTypeAName()
self.listsCreated = 1
name = self.getDNA().getDNAName()
if name:
if not (self.independent) and self.main.isNPCEditor:
self._NameGUI__assignNameToTyped(name)
return None
self.decipherName(name)
if self.mode == self._NameGUI__MODE_TYPEANAME:
return None
else:
self.makeRandomName()
elif self.mode == self._NameGUI__MODE_PICKANAME:
self.enterPickAName()
elif self.mode == self._NameGUI__MODE_TYPEANAME:
self.enterTypeAName()
if self.savedGender:
if self.savedGender != self.getDNA().gender:
self.listsCreated = 0
self.reset()
if self.getDNA().getGender() == 'f':
self.nicknameList['items'] = self.nicknamesFemale[:]
self.firstList['items'] = self.firstNamesFemale[:]
self.prefixList['items'] = self.lastPrefixesFemale[:]
self.suffixList['items'] = self.lastSuffixesFemale[:]
else:
self.nicknameList['items'] = self.nicknamesMale[:]
self.firstList['items'] = self.firstNamesMale[:]
self.prefixList['items'] = self.lastPrefixesMale[:]
self.suffixList['items'] = self.lastSuffixesMale[:]
self.listsCreated = 1
if self.getDNA().gender == 'm' and self.savedMaleName:
(self.nicknameIndex, self.firstIndex, self.prefixIndex, self.suffixIndex) = self.savedMaleName
(self.nicknameActive, self.firstActive, self.lastActive) = self.savedMaleActiveStates
elif self.getDNA().gender == 'f' and self.savedFemaleName:
(self.nicknameIndex, self.firstIndex, self.prefixIndex, self.suffixIndex) = self.savedFemaleName
(self.nicknameActive, self.firstActive, self.lastActive) = self.savedFemaleActiveStates
else:
self.makeRandomName()
self._updateLists()
self._updateCheckBoxes()
self.fsm.request('Pay')
def exit(self):
self.hide()
if self.cr:
self.ignore(self.cr.getWishNameResultMsg())
if hasattr(self, 'self._nameCheckCallback'):
del self._nameCheckCallback
if self.independent:
pass
1
self.main.enableRandom()
self.fsm.request('Done')
def assignAvatar(self, avatar):
self.avatar = avatar
def _checkNpcNames(self, name):
def match(npcName, name = name):
name = TextEncoder().encodeWtext(name)
name = string.strip(name)
return TextEncoder.upper(npcName) == TextEncoder.upper(name)
for npcId in NPCList.NPC_LIST.keys():
data = NPCList.NPC_LIST[npcId]
if type(data) is types.DictType and HumanDNA.HumanDNA.setName in data:
npcName = data[HumanDNA.HumanDNA.setName]
if (self.independent or not (self.main.isNPCEditor)) and match(npcName):
self.notify.info('name matches NPC name "%s"' % npcName)
return OL.NCGeneric
match(npcName)
def getTypeANameProblem(self, callback):
if not self.customName:
callback(None)
else:
problem = None
name = self.nameEntry.get()
name = TextEncoder().decodeText(name)
name = name.strip()
name = TextEncoder().encodeWtext(name)
self.nameEntry.enterText(name)
problem = NameCheck.checkName(self.nameEntry.get(), [
self._checkNpcNames], font = self.nameEntry.getFont())
if problem:
callback(problem)
elif self.cr:
self.ignore(self.cr.getWishNameResultMsg())
self.acceptOnce(self.cr.getWishNameResultMsg(), self._handleSetWishnameResult)
self._nameCheckCallback = callback
self._sendSetWishname(justCheck = True)
return None
def _checkTypeANameAsPickAName(self):
if self.customName:
pnp = PCPickANamePattern(self.nameEntry.get(), self.getDNA().gender)
if pnp.hasNamePattern():
self.fsm.request('PickAName')
pattern = pnp.getNamePattern()
actives = [
0,
choice(pattern[1] != -1, 1, 0),
choice(pattern[2] != -1, 1, 0)]
indices = pattern
self._updateGuiToPickAName(actives, indices)
def _sendSetWishname(self, justCheck = False):
name = self.nameEntry.get()
if justCheck:
self.cr.sendWishNameAnonymous(name)
else:
self.cr.sendWishName(self.main.id, name)
def _handleSetWishnameResult(self, result, avId, name):
callback = self._nameCheckCallback
del self._nameCheckCallback
problem = OL.NCGeneric
if result in (self.cr.WishNameResult.PendingApproval, self.cr.WishNameResult.Approved):
problem = None
callback(problem)
def save(self):
if self.independent:
if self.customName:
self._sendSetWishname()
else:
name = self.getNumericName()
self.cr.avatarManager.sendRequestPatternName(self.main.id, name[0], name[1], name[2], name[3])
else:
self.avatar.dna.setName(self._getName())
def loadPickAName(self):
self.nameFrameTitle = DirectFrame(parent = self.parent, relief = None, frameColor = (0.5, 0.5, 0.5, 0.29999999999999999), text = PL.NameFrameTitle, text_fg = (1, 1, 1, 1), text_scale = 0.17999999999999999, text_pos = (0, 0), pos = (0, 0, 0.29999999999999999), scale = 0.69999999999999996)
self.pirateName = DirectLabel(parent = self.parent, relief = None, image = self.charGui.find('**/chargui_frame02'), image_scale = (15, 10, 10), text = PL.NameGUI_EmptyNameText, text_align = TextNode.ACenter, text_fg = (1, 1, 0.5, 1), text_pos = (0, 0.25), text_wordwrap = MAX_NAME_WIDTH, scale = 0.14999999999999999, pos = (0, 0, -1.1000000000000001))
if self.getDNA().getGender() == 'f':
lists = (self.nicknamesFemale, self.firstNamesFemale, self.lastPrefixesFemale, self.lastSuffixesFemale)
else:
lists = (self.nicknamesMale, self.firstNamesMale, self.lastPrefixesMale, self.lastSuffixesMale)
self.nicknameList = self._makeScrolledList(items = lists[0], pos = (-0.81000000000000005, 0, -0.20000000000000001), makeExtraArgs = [
self.NICKNAME], extraArgs = [
0])
self.nicknameList.stash()
self.firstList = self._makeScrolledList(items = lists[1], pos = (-0.65000000000000002, 0, -0.20000000000000001), makeExtraArgs = [
self.FIRST], extraArgs = [
1])
self.prefixList = self._makeScrolledList(items = lists[2], pos = (-0.10000000000000001, 0, -0.20000000000000001), makeExtraArgs = [
self.PREFIX], extraArgs = [
2])
self.suffixList = self._makeScrolledList(items = lists[3], pos = (0.45000000000000001, 0, -0.20000000000000001), makeExtraArgs = [
self.SUFFIX], extraArgs = [
3])
self.nicknameCheck = self._makeCheckbox(text = PL.NameGUI_CheckboxText[0], command = self.nicknameToggle, pos = (-0.81000000000000005, 0, 0.10000000000000001))
self.nicknameCheck.stash()
self.firstCheck = self._makeCheckbox(text = PL.NameGUI_CheckboxText[0], command = self.firstToggle, pos = (-0.65000000000000002, 0, 0.10000000000000001))
self.lastCheck = self._makeCheckbox(text = PL.NameGUI_CheckboxText[0], command = self.lastToggle, pos = (-0.10000000000000001, 0, 0.10000000000000001))
self.nicknameHigh = self._makeHighlight((-0.81000000000000005, 0, -0.20000000000000001))
self.nicknameHigh.hide()
self.firstHigh = self._makeHighlight((-0.65000000000000002, 0, -0.20000000000000001))
self.prefixHigh = self._makeHighlight((-0.10000000000000001, 0, -0.20000000000000001))
self.suffixHigh = self._makeHighlight((0.45000000000000001, 0, -0.20000000000000001))
self.randomNameButton = self._makeButton(text = PL.NameGUI_RandomButtonText, command = self.makeRandomName, pos = (-0.5, 0, -1.3999999999999999))
self.randomNameButton.hide()
func = lambda param = self: param.fsm.request('TypeAName')
self.typeANameButton = self._makeButton(text = PL.NameGUI_TypeANameButtonText, command = func, pos = (0, 0, -1.7))
self.typeANameButton.hide()
self.pickANameGui.append(self.nicknameHigh)
self.pickANameGui.append(self.firstHigh)
self.pickANameGui.append(self.prefixHigh)
self.pickANameGui.append(self.suffixHigh)
self.pickANameGui.append(self.nicknameList)
self.pickANameGui.append(self.firstList)
self.pickANameGui.append(self.prefixList)
self.pickANameGui.append(self.suffixList)
self.pickANameGui.append(self.pirateName)
self.pickANameGui.append(self.typeANameButton)
self.pickANameGui.append(self.nicknameCheck)
self.pickANameGui.append(self.firstCheck)
self.pickANameGui.append(self.lastCheck)
self.hide()
def loadTypeAName(self):
self.nameEntry = DirectEntry(parent = self.parent, relief = DGG.FLAT, scale = 0.16, width = MAX_NAME_WIDTH, numLines = 2, focus = 0, cursorKeys = 1, autoCapitalize = 1, frameColor = (0.0, 0.0, 0.0, 0.0), text = PL.NameGUI_EmptyNameText, text_fg = (1.0, 1.0, 0.5, 1.0), pos = (-0.65000000000000002, 0.0, -0.050000000000000003), suppressKeys = 1, suppressMouse = 1, image = self.charGui.find('**/chargui_frame02'), image_scale = (15, 0.0, 8.5), image_pos = (4.3899999999999997, 0.0, -0.20000000000000001))
self.nameEntryGuidelines = DirectLabel(parent = self.parent, relief = None, text = PL.NameGUI_Guidelines, text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG3, text_pos = (0, 0.25), text_wordwrap = 18, scale = 0.10000000000000001, pos = (-0.69999999999999996, 0, -0.5))
if self.cr:
self.nameEntryGuidelinesURL = DirectButton(parent = self.parent, relief = None, pos = (0, 0, -0.55000000000000004), command = base.popupBrowser, extraArgs = [
launcher.getValue('GAME_INGAME_NAMING')], text = PL.NameGUI_URLText, text0_fg = PiratesGuiGlobals.TextFG2, text1_fg = PiratesGuiGlobals.TextFG2, text2_fg = PiratesGuiGlobals.TextFG1, text_font = PiratesGlobals.getInterfaceFont(), text_shadow = PiratesGuiGlobals.TextShadow, text_scale = 0.089999999999999997, text_pos = (0, -0.63500000000000001))
func = lambda param = self: param.fsm.request('PickAName')
self.pickANameButton = self._makeButton(text = PL.NameGUI_PickANameButtonText, command = func, pos = (0, 0, -1.7))
if not self.independent:
if not self.main.isNPCEditor:
self.submitButton = self._makeButton(text = PL.NameGUI_SubmitButtonText, command = self._typedAName, pos = (0, 0, 1.7))
self.submitButton.hide()
else:
self.cancelButton = GuiButton.GuiButton(parent = self.bookModel, text = PL.MakeAPirateCancel, text_fg = (1, 1, 1, 1), text_scale = 0.080000000000000002, text_pos = (0, -0.25 * 0.10000000000000001, 0), scale = 1.8, image_scale = 0.40000000000000002, command = self.cancel, pos = (-0.68000000000000005, 0, -2.4300000000000002))
self.randomButton = GuiButton.GuiButton(parent = self.bookModel, text = PL.RandomButton, text_fg = (1, 1, 1, 1), text_scale = 0.080000000000000002, text_pos = (0, -0.25 * 0.10000000000000001, 0), scale = 1.8, image_scale = 0.40000000000000002, command = self.makeRandomName, pos = (0.050000000000000003, 0, -2.4300000000000002))
self.randomButton.hide()
self.submitButton = GuiButton.GuiButton(parent = self.bookModel, text = PL.NameGUI_SubmitButtonText, text_fg = (1, 1, 1, 1), text_scale = 0.080000000000000002, text_pos = (0, -0.25 * 0.10000000000000001, 0), scale = 1.8, image_scale = 0.40000000000000002, command = self.complete, pos = (0.78000000000000003, 0, -2.4300000000000002))
self.typeANameGui.append(self.pickANameButton)
self.typeANameGui.append(self.nameEntry)
self.typeANameGui.append(self.nameEntryGuidelines)
if self.cr:
self.typeANameGui.append(self.nameEntryGuidelinesURL)
self.hide()
def _makeScrolledList(self, items, pos, makeExtraArgs, extraArgs):
lst = items[:]
dsl = DirectScrolledList(parent = self.parent, relief = None, items = lst, itemMakeFunction = self._makeItemLabel, itemMakeExtraArgs = makeExtraArgs, extraArgs = extraArgs, command = self._listsChanged, pos = pos, scale = 0.080000000000000002, incButton_pos = (1.5, 0, -6), incButton_relief = None, incButton_image = (self.triangleGui.find('**/triangle'), self.triangleGui.find('**/triangle_down'), self.triangleGui.find('**/triangle_over')), incButton_image_scale = 1.8, incButton_image_hpr = (0, 0, 90), incButton_image_pos = (0, 0, -0.5), decButton_pos = (1.5, 0, 2), decButton_relief = None, decButton_image = (self.triangleGui.find('**/triangle'), self.triangleGui.find('**/triangle_down'), self.triangleGui.find('**/triangle_over')), decButton_image_scale = 1.8, decButton_image_hpr = (0, 0, 270), decButton_image_pos = (0, 0, 0.5), itemFrame_relief = None, itemFrame_pos = (-0.75, 0, 0), itemFrame_scale = 1.0, itemFrame_image = self.charGui.find('**/chargui_frame04'), itemFrame_image_scale = (14, 10, 10), itemFrame_image_pos = (2.3999999999999999, 0, -2), itemFrame_text_fg = (1, 1, 1, 1), forceHeight = 1.1000000000000001, numItemsVisible = 5)
return dsl
def _makeHighlight(self, pos):
return DirectFrame(parent = self.parent, relief = DGG.FLAT, frameColor = (1, 1, 1, 0.40000000000000002), frameSize = (-1.1000000000000001, 4, -2.2000000000000002, -1.1000000000000001), borderWidth = (1, 0.5), pos = pos, scale = 0.089999999999999997)
def _makeItemLabel(self, text, index, args = []):
f = DirectFrame(state = 'normal', relief = None, text = text, text_scale = 1.0, text_pos = (-0.29999999999999999, 0.14000000000000001, 0), text_align = TextNode.ALeft, text_fg = (1, 1, 1, 1), textMayChange = 0)
if len(args) > 0:
listType = args[0]
f.bind(DGG.B1PRESS, lambda x, f = f: self._nameClickedOn(listType, index))
return f
def _makeButton(self, text, command, pos):
b = DirectButton(parent = self.parent, relief = None, image = (self.charGui.find('**/chargui_frame02'), self.charGui.find('**/chargui_frame02_down'), self.charGui.find('**/chargui_frame02_over')), text = text, text_fg = (1, 1, 1, 1), text_align = TextNode.ACenter, text_scale = 0.10000000000000001, command = command, pos = pos)
return b
def _makeCheckbox(self, text, command, pos):
c = DirectCheckButton(parent = self.parent, relief = None, scale = 0.10000000000000001, boxBorder = 0.080000000000000002, boxRelief = None, pos = pos, text = text, text_fg = (1, 1, 1, 1), text_scale = 0.80000000000000004, text_pos = (0.40000000000000002, 0), indicator_pos = (0, 0, 0), indicator_text_fg = (1, 1, 1, 1), command = command, text_align = TextNode.ALeft)
return c
def _nameClickedOn(self, listType, index):
if listType == self.NICKNAME:
self.nicknameIndex = index
elif listType == self.FIRST:
self.firstIndex = index
elif listType == self.PREFIX:
self.prefixIndex = index
else:
self.suffixIndex = index
self._updateLists()
def _listsChanged(self, extraArgs):
if self.listsCreated:
if extraArgs == 0:
if self.nicknameActive:
self.enableList(self.nicknameList)
self.names[0] = self.nicknameList['items'][self.nicknameList.index + 2]['text']
self.nicknameHigh.show()
else:
self.disableList(self.nicknameList)
self.names[0] = ''
self.nicknameHigh.hide()
self.nicknameIndex = self.nicknameList.index + 2
elif extraArgs == 1:
if self.firstActive:
self.enableList(self.firstList)
self.names[1] = self.firstList['items'][self.firstList.index + 2]['text']
self.firstHigh.show()
else:
self.disableList(self.firstList)
self.names[1] = ''
self.firstHigh.hide()
self.firstIndex = self.firstList.index + 2
elif extraArgs == 2:
if self.lastActive:
self.enableList(self.prefixList)
self.names[2] = self.prefixList['items'][self.prefixList.index + 2]['text']
self.prefixHigh.show()
else:
self.disableList(self.prefixList)
self.names[2] = ''
self.prefixHigh.hide()
self.prefixIndex = self.prefixList.index + 2
elif extraArgs == 3:
if self.lastActive:
self.enableList(self.suffixList)
self.names[3] = self.suffixList['items'][self.suffixList.index + 2]['text']
self.suffixHigh.show()
else:
self.disableList(self.suffixList)
self.names[3] = ''
self.suffixHigh.hide()
self.suffixIndex = self.suffixList.index + 2
if len(self.names[0] + self.names[1] + self.names[2] + self.names[3]) > 0:
self.updateName()
def _updateLists(self):
oldIndices = [
self.nicknameIndex,
self.firstIndex,
self.prefixIndex,
self.suffixIndex]
self.firstList.scrollTo(self.firstIndex - 2)
self._restoreIndices(oldIndices)
self.prefixList.scrollTo(self.prefixIndex - 2)
self._restoreIndices(oldIndices)
self.suffixList.scrollTo(self.suffixIndex - 2)
self._restoreIndices(oldIndices)
def _getName(self):
newName = ''
if self.mode == self._NameGUI__MODE_TYPEANAME:
newName = self.nameEntry.get()
newName = TextEncoder().decodeText(newName)
newName = newName.strip()
newName = TextEncoder().encodeWtext(newName)
else:
newName += self.names[0]
if len(newName) > 0 and len(self.names[1]) > 0:
newName += ' '
newName += self.names[1]
if len(newName) > 0 and len(self.names[2]) > 0:
newName += ' '
newName += self.names[2]
if self.names[2] in PL.PirateNames_LastNamePrefixesCapped:
newName += self.names[3].capitalize()
else:
newName += self.names[3]
return newName
def updateName(self):
self.pirateName['text'] = self._getName()
def _restoreIndices(self, indices):
self.nicknameIndex = indices[0]
self.firstIndex = indices[1]
self.prefixIndex = indices[2]
self.suffixIndex = indices[3]
def enableList(self, listToEnable):
listToEnable.show()
listToEnable.decButton['state'] = 'normal'
listToEnable.incButton['state'] = 'normal'
def disableList(self, listToDisable):
listToDisable.decButton['state'] = 'disabled'
listToDisable.incButton['state'] = 'disabled'
for item in listToDisable['items']:
if item.__class__.__name__ != 'str':
item.hide()
continue
def unload(self):
self.nicknameCheck.destroy()
self.nicknameList.destroy()
if self.independent:
self.mainFrame.destroy()
elif self.nameEntry:
self.nameEntry.destroy()
self.nameEntryGuidelines.destroy()
if self.cr:
self.nameEntryGuidelinesURL.destroy()
del self.main
del self.parent
del self.avatar
del self.fsm
def reset(self):
for item in self.nicknameList['items'] + self.firstList['items'] + self.prefixList['items'] + self.suffixList['items']:
if item.__class__.__name__ != 'str':
item.destroy()
continue
self.nicknameIndex = 2
self.firstIndex = 2
self.prefixIndex = 2
self.suffixIndex = 2
self.nicknameList.index = 0
self.firstList.index = 0
self.prefixList.index = 0
self.suffixList.index = 0
def showPickAName(self):
self.nameFrameTitle.show()
for elt in self.pickANameGui:
if elt != self.nicknameHigh and elt != self.firstHigh and elt != self.prefixHigh and elt != self.suffixHigh:
elt.show()
continue
def hasCustomName(self):
return self.customName
def showTypeAName(self):
self.customName = True
self.nameFrameTitle.show()
for elt in self.typeANameGui:
elt.show()
def hide(self):
self.nameFrameTitle.hide()
for elt in self.pickANameGui:
elt.hide()
for elt in self.typeANameGui:
elt.hide()
def makeRandomName(self):
if self.customName and not (self.independent):
return None
if self.getDNA().getGender() == 'f':
self.nicknameIndex = ''
self.firstIndex = random.choice(range(len(self.firstNamesFemale) - 4)) + 2
self.prefixIndex = random.choice(range(len(self.lastPrefixesFemale) - 4)) + 2
self.suffixIndex = random.choice(range(len(self.lastSuffixesFemale) - 4)) + 2
else:
self.nicknameIndex = ''
self.firstIndex = random.choice(range(len(self.firstNamesMale) - 4)) + 2
self.prefixIndex = random.choice(range(len(self.lastPrefixesMale) - 4)) + 2
self.suffixIndex = random.choice(range(len(self.lastSuffixesMale) - 4)) + 2
nameCombo = random.choice(self.POSSIBLE_NAME_COMBOS.keys())
(self.nicknameActive, self.firstActive, self.lastActive) = self.POSSIBLE_NAME_COMBOS[nameCombo]
self._updateGuiToPickAName([
self.nicknameActive,
self.firstActive,
self.lastActive], [
0,
self.firstIndex,
self.prefixIndex,
self.suffixIndex])
def _updateGuiToPickAName(self, actives, indices):
(self.nicknameActive, self.firstActive, self.lastActive) = actives
(nickname, self.firstIndex, self.prefixIndex, self.suffixIndex) = indices
if self.listsCreated:
self._updateLists()
self._updateCheckBoxes()
elif self.getDNA().getGender() == 'f':
self.names[0] = ''
self.names[1] = self.firstNamesFemale[self.firstIndex]
self.names[2] = self.lastPrefixesFemale[self.prefixIndex]
self.names[3] = self.lastSuffixesFemale[self.suffixIndex]
else:
self.names[0] = ''
self.names[1] = self.firstNamesMale[self.firstIndex]
self.names[2] = self.lastPrefixesMale[self.prefixIndex]
self.names[3] = self.lastSuffixesMale[self.suffixIndex]
self.notify.debug('random name blindly generated:%s' % self._getName())
def decipherName(self, name):
nameParts = name.split()
if len(nameParts) == 1:
self.nicknameEnabled = 0
nameInFirst = self._NameGUI__checkForNameInFirstList(nameParts[0])
nameInLast = self._NameGUI__checkForNameInLastList(nameParts[0])
if not nameInFirst or nameInLast:
self._NameGUI__assignNameToTyped(name)
return None
elif len(nameParts) == 2:
if self._NameGUI__checkForNameInNicknameList(nameParts[0]):
nameInFirst = self._NameGUI__checkForNameInFirstList(nameParts[1])
nameInLast = self._NameGUI__checkForNameInLastList(nameParts[1])
if not nameInFirst or nameInLast:
self._NameGUI__assignNameToTyped(name)
return None
else:
nameInFirst = self._NameGUI__checkForNameInFirstList(nameParts[0])
nameInLast = self._NameGUI__checkForNameInLastList(nameParts[1])
if not nameInFirst and nameInLast:
self._NameGUI__assignNameToTyped(name)
return None
elif len(nameParts) == 3:
nameInNick = self._NameGUI__checkForNameInNicknameList(nameParts[0])
nameInFirst = self._NameGUI__checkForNameInFirstList(nameParts[1])
nameInLast = self._NameGUI__checkForNameInLastList(nameParts[2])
if not nameInNick and nameInFirst and nameInLast:
self._NameGUI__assignNameToTyped(name)
return None
else:
self._NameGUI__assignNameToTyped(name)
return None
self.mode = self._NameGUI__MODE_PICKANAME
self._updateLists()
self._updateCheckBoxes()
def _NameGUI__checkForNameInNicknameList(self, name):
if self.getDNA().getGender() == 'f':
nicknameTextList = self.nicknamesFemale
else:
nicknameTextList = self.nicknamesMale
if nicknameTextList.__contains__(name):
self.nicknameEnabled = 1
self.nicknameIndex = nicknameTextList.index(name)
return True
else:
self.nicknameEnabled = 0
return False
def _NameGUI__checkForNameInFirstList(self, name):
if self.getDNA().getGender() == 'f':
firstTextList = self.firstNamesFemale
else:
firstTextList = self.firstNamesMale
if firstTextList.__contains__(name):
self.firstEnabled = 1
self.firstIndex = firstTextList.index(name)
return True
else:
self.firstEnabled = 0
return False
def _NameGUI__checkForNameInLastList(self, name):
if self.getDNA().getGender() == 'f':
prefixTextList = self.lastPrefixesFemale
suffixTextList = self.lastSuffixesFemale
else:
prefixTextList = self.lastPrefixesMale
suffixTextList = self.lastSuffixesMale
for prefix in prefixTextList:
if prefix.strip() != '' and name.startswith(prefix) and suffixTextList.__contains__(name[len(prefix):]):
self.lastEnabled = 1
self.prefixIndex = prefixTextList.index(prefix)
self.suffixIndex = suffixTextList.index(name[len(prefix):])
return True
continue
self.lastEnabled = 0
return False
def _NameGUI__assignNameToTyped(self, name):
self.nameEntry.enterText(name)
self.mode = self._NameGUI__MODE_TYPEANAME
self.fsm.request('Pay')
def nicknameToggle(self, value):
self.nicknameActive = self.nicknameCheck['indicatorValue']
self._listsChanged(0)
if self.nicknameActive:
self.nicknameList.refresh()
self._updateCheckBoxes()
def firstToggle(self, value):
self.firstActive = self.firstCheck['indicatorValue']
if not self.firstActive or self.lastActive:
self.firstActive = 1
self.notify.debug(random.choice(PL.NameGUI_NoNameWarnings))
self._listsChanged(1)
if self.firstActive:
self.firstList.refresh()
self._updateCheckBoxes()
def lastToggle(self, value):
self.lastActive = self.lastCheck['indicatorValue']
if not self.firstActive or self.lastActive:
self.lastActive = 1
self.notify.debug(random.choice(PL.NameGUI_NoNameWarnings))
self._listsChanged(2)
self._listsChanged(3)
if self.lastActive:
self.prefixList.refresh()
self.suffixList.refresh()
self._updateCheckBoxes()
def _updateCheckBoxes(self):
self.nicknameCheck['indicatorValue'] = self.nicknameActive
self.nicknameCheck['text'] = PL.NameGUI_CheckboxText[int(self.nicknameActive)]
self.nicknameCheck.setIndicatorValue()
self.firstCheck['indicatorValue'] = self.firstActive
self.firstCheck['text'] = PL.NameGUI_CheckboxText[int(self.firstActive)]
self.firstCheck.setIndicatorValue()
self.lastCheck['indicatorValue'] = self.lastActive
self.lastCheck['text'] = PL.NameGUI_CheckboxText[int(self.lastActive)]
self.lastCheck.setIndicatorValue()
def enterInit(self):
pass
def exitInit(self):
pass
def enterPay(self):
if self.mode == self._NameGUI__MODE_TYPEANAME:
self.fsm.request('TypeAName')
else:
self.fsm.request('PickAName')
def exitPay(self):
pass
def enterPickAName(self):
if self.independent:
self.randomButton.show()
else:
self.main.enableRandom()
self.mode = self._NameGUI__MODE_PICKANAME
self.customName = False
self.showPickAName()
self._updateLists()
self._updateCheckBoxes()
def exitPickAName(self):
if self.independent:
self.randomButton.hide()
self.hide()
def enterTypeAName(self):
self.mode = self._NameGUI__MODE_TYPEANAME
if not self.independent:
self.main.disableRandom()
self.typeANameButton.hide()
self.showTypeAName()
self.nameEntry['focus'] = 1
def _typedAName(self, *args):
self.nameEntry['focus'] = 0
name = self.nameEntry.get()
name = TextEncoder().decodeText(name)
name = name.strip()
name = TextEncoder().encodeWtext(name)
self.nameEntry.enterText(name)
self.notify.debug('Chosen name: %s' % self.nameEntry.get())
problem = NameCheck.checkName(name, [
self._checkNpcNames], font = self.nameEntry.getFont())
if problem:
print problem
self.nameEntry.enterText('')
else:
self.fsm.request('Approved')
def exitTypeAName(self):
self.typeANameButton.show()
self.hide()
def enterApproved(self):
self.fsm.request('Accepted')
def exitApproved(self):
pass
def enterRejected(self):
pass
def exitRejected(self):
pass
def enterAccepted(self):
pass
def exitAccepted(self):
pass
def enterDone(self):
self.notify.debug('Entering done state')
if self.independent:
self.save()
messenger.send('NameGUIFinished', [
1])
return None
if self.getDNA().gender == 'm':
self.savedMaleActiveStates = (self.nicknameActive, self.firstActive, self.lastActive)
self.savedMaleName = [
self.nicknameIndex,
self.firstIndex,
self.prefixIndex,
self.suffixIndex]
self.savedGender = 'm'
elif self.getDNA().gender == 'f':
self.savedFemaleName = [
self.nicknameIndex,
self.firstIndex,
self.prefixIndex,
self.suffixIndex]
self.savedFemaleActiveStates = (self.nicknameActive, self.firstActive, self.lastActive)
self.savedGender = 'f'
def exitDone(self):
pass
def complete(self):
self.nameEntry['focus'] = 0
name = self.nameEntry.get()
name = TextEncoder().decodeText(name)
name = name.strip()
name = TextEncoder().encodeWtext(name)
self.nameEntry.enterText(name)
self.notify.debug('Chosen name: %s' % name)
if self.customName:
problem = NameCheck.checkName(name, [
self._checkNpcNames], font = self.nameEntry.getFont())
if problem:
print problem
self.nameEntry.enterText('')
else:
self.fsm.request('Done')
else:
self.fsm.request('Done')
def cancel(self):
messenger.send('NameGUIFinished', [
0])
def getNumericName(self):
nick = 0
first = 0
pre = 0
suff = 0
if self.firstActive:
first = self.firstIndex
if self.lastActive:
pre = self.prefixIndex
suff = self.suffixIndex
return (nick, first, pre, suff)
def findWidestInList(self, nameList):
maxWidth = 0
maxName = ''
for name in nameList:
width = self.text.calcWidth(name)
if width > maxWidth:
maxWidth = self.text.calcWidth(name)
maxName = name
continue
print maxName + ' ' + str(maxWidth)
return maxName
def findWidestName(self):
longestBoyTitle = self.findWidestInList(self.nicknamesMale[:])
longestGirlTitle = self.findWidestInList(self.nicknamesFemale[:])
longestBoyFirst = self.findWidestInList(self.firstNamesMale[:])
longestGirlFirst = self.findWidestInList(self.firstNamesFemale[:])
longestLastPrefix = self.findWidestInList(self.lastPrefixesFemale[:] + self.lastPrefixesMale[:])
longestLastSuffix = self.findWidestInList(self.lastSuffixesFemale[:] + self.lastSuffixesMale[:])
longestBoyName = longestBoyTitle + ' ' + longestBoyFirst + ' ' + longestLastPrefix + longestLastSuffix
longestGirlName = longestGirlTitle + ' ' + longestGirlFirst + ' ' + longestLastPrefix + longestLastSuffix
longestName = self.findWidestInList([
longestBoyName,
longestGirlName])
return longestName
def getDNA(self):
if self.independent:
return self.main.dna
else:
return self.main.pirate.style
| 38,199 | 2,622 | 23 |
9feb5e9b65602d98fbbe238994e65df1e102a0e9 | 972 | py | Python | app.py | BelminD/bobby | 8763fa9e12dd911dfe8e279bd33db65495ec067b | [
"MIT"
] | 1 | 2020-03-02T14:50:11.000Z | 2020-03-02T14:50:11.000Z | app.py | BelminD/bobby | 8763fa9e12dd911dfe8e279bd33db65495ec067b | [
"MIT"
] | null | null | null | app.py | BelminD/bobby | 8763fa9e12dd911dfe8e279bd33db65495ec067b | [
"MIT"
] | 1 | 2020-03-26T08:56:06.000Z | 2020-03-26T08:56:06.000Z | import argparse
import config
import utils
from chat import ChatSession
from utils import Color
if __name__ == '__main__':
main()
| 22.604651 | 140 | 0.609053 | import argparse
import config
import utils
from chat import ChatSession
from utils import Color
def parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-f',
'--file_name',
type=str,
help='chatlog output filename, .txt format'
)
return parser.parse_args()
def main():
session = ChatSession()
utils.create_folder()
session.send_message("The friendly chatbot is here! 🤖")
args = parser()
file_name = utils.generate_file_name(args.file_name)
# Main loop
try:
while True:
session.run(
read_buffer=b'',
file_name=file_name,
path=config.PATH,
print_flag=config.PRINT_CHAT
)
except KeyboardInterrupt:
print(f'\n{Color.OKGREEN}Great session! The chatlogs has been saved to `{config.PATH}/{file_name}`{Color.ENDC}') # noqa: E501, E999
if __name__ == '__main__':
main()
| 790 | 0 | 46 |
005e24a95240d597e4128dcd7bd42257cd1d34bb | 486 | py | Python | tests/checkio/home/test_most_wanted_letter.py | zoido/checkio_python_solutions | 858cc7eafbbf55c8506e14cce260d17406fbf09c | [
"MIT"
] | null | null | null | tests/checkio/home/test_most_wanted_letter.py | zoido/checkio_python_solutions | 858cc7eafbbf55c8506e14cce260d17406fbf09c | [
"MIT"
] | 2 | 2017-10-14T17:44:17.000Z | 2018-04-06T18:53:37.000Z | tests/checkio/home/test_most_wanted_letter.py | zoido/checkio_python_solutions | 858cc7eafbbf55c8506e14cce260d17406fbf09c | [
"MIT"
] | null | null | null | from checkio.home.most_wanted_letter import checkio
| 37.384615 | 68 | 0.598765 | from checkio.home.most_wanted_letter import checkio
def test_checkio():
assert checkio("Hello World!") == "l", "Hello test"
assert checkio("How do you do?") == "o", "O is most wanted"
assert checkio("One") == "e", "All letter only once."
assert checkio("Oops!") == "o", "Don't forget about lower case."
assert checkio("AAaooo!!!!") == "a", "Only letters."
assert checkio("abe") == "a", "The First."
assert checkio("a" * 9000 + "b" * 1000) == "a", "Long."
| 410 | 0 | 23 |
f22e830fb9aacfaedbbdc3927d8137c30da1348f | 3,346 | py | Python | train.py | takahiro-777/tf-dqn-reversi | 35875c593e58b60173c290b0a04544dfa288289f | [
"MIT"
] | null | null | null | train.py | takahiro-777/tf-dqn-reversi | 35875c593e58b60173c290b0a04544dfa288289f | [
"MIT"
] | 3 | 2017-11-04T05:55:09.000Z | 2017-11-04T11:49:21.000Z | train.py | takahiro-777/tf-dqn-reversi | 35875c593e58b60173c290b0a04544dfa288289f | [
"MIT"
] | null | null | null | import copy
from Reversi import Reversi
from dqn_agent import DQNAgent
if __name__ == "__main__":
# parameters
#n_epochs = 1000
n_epochs = 5
# environment, agent
env = Reversi()
# playerID
playerID = [env.Black, env.White, env.Black]
# player agent
players = []
# player[0]= env.Black
players.append(DQNAgent(env.enable_actions, env.name, env.screen_n_rows, env.screen_n_cols))
# player[1]= env.White
players.append(DQNAgent(env.enable_actions, env.name, env.screen_n_rows, env.screen_n_cols))
for e in range(n_epochs):
# reset
env.reset()
terminal = False
while terminal == False: # 1エピソードが終わるまでループ
for i in range(0, len(players)):
state = env.screen
#print(state)
targets = env.get_enables(playerID[i])
exploration = (n_epochs - e + 20)/(n_epochs + 20)
#exploration = 0.1
if len(targets) > 0:
# どこかに置く場所がある場合
#すべての手をトレーニングする
for tr in targets:
tmp = copy.deepcopy(env)
tmp.update(tr, playerID[i])
#終了判定
win = tmp.winner()
end = tmp.isEnd()
#次の状態
state_X = tmp.screen
target_X = tmp.get_enables(playerID[i+1])
if len(target_X) == 0:
target_X = tmp.get_enables(playerID[i])
# 両者トレーニング
for j in range(0, len(players)):
reword = 0
if end == True:
if win == playerID[j]:
# 勝ったら報酬1を得る
reword = 1
players[j].store_experience(state, targets, tr, reword, state_X, target_X, end)
#print(state)
#print(state_X)
#if e > n_epochs*0.2:
# players[j].experience_replay()
# 行動を選択
action = players[i].select_action(state, targets, exploration)
# 行動を実行
env.update(action, playerID[i])
# for log
loss = players[i].current_loss
Q_max, Q_action = players[i].select_enable_action(state, targets)
print("player:{:1d} | pos:{:2d} | LOSS: {:.4f} | Q_MAX: {:.4f} | Q_ACTION: {:.4f}".format(
playerID[i], action, loss, Q_max, Q_action))
# 行動を実行した結果
terminal = env.isEnd()
for j in range(0, len(players)):
if e > n_epochs*0.3:
for k in range(25):
players[j].experience_replay()
elif e > n_epochs*0.1:
for k in range(5):
players[j].experience_replay()
w = env.winner()
print("EPOCH: {:03d}/{:03d} | WIN: player{:1d}".format(
e, n_epochs, w))
# 保存は後攻のplayer2 を保存する。
if e%50 == 0:
players[1].save_model(e)
| 32.173077 | 110 | 0.443515 | import copy
from Reversi import Reversi
from dqn_agent import DQNAgent
if __name__ == "__main__":
# parameters
#n_epochs = 1000
n_epochs = 5
# environment, agent
env = Reversi()
# playerID
playerID = [env.Black, env.White, env.Black]
# player agent
players = []
# player[0]= env.Black
players.append(DQNAgent(env.enable_actions, env.name, env.screen_n_rows, env.screen_n_cols))
# player[1]= env.White
players.append(DQNAgent(env.enable_actions, env.name, env.screen_n_rows, env.screen_n_cols))
for e in range(n_epochs):
# reset
env.reset()
terminal = False
while terminal == False: # 1エピソードが終わるまでループ
for i in range(0, len(players)):
state = env.screen
#print(state)
targets = env.get_enables(playerID[i])
exploration = (n_epochs - e + 20)/(n_epochs + 20)
#exploration = 0.1
if len(targets) > 0:
# どこかに置く場所がある場合
#すべての手をトレーニングする
for tr in targets:
tmp = copy.deepcopy(env)
tmp.update(tr, playerID[i])
#終了判定
win = tmp.winner()
end = tmp.isEnd()
#次の状態
state_X = tmp.screen
target_X = tmp.get_enables(playerID[i+1])
if len(target_X) == 0:
target_X = tmp.get_enables(playerID[i])
# 両者トレーニング
for j in range(0, len(players)):
reword = 0
if end == True:
if win == playerID[j]:
# 勝ったら報酬1を得る
reword = 1
players[j].store_experience(state, targets, tr, reword, state_X, target_X, end)
#print(state)
#print(state_X)
#if e > n_epochs*0.2:
# players[j].experience_replay()
# 行動を選択
action = players[i].select_action(state, targets, exploration)
# 行動を実行
env.update(action, playerID[i])
# for log
loss = players[i].current_loss
Q_max, Q_action = players[i].select_enable_action(state, targets)
print("player:{:1d} | pos:{:2d} | LOSS: {:.4f} | Q_MAX: {:.4f} | Q_ACTION: {:.4f}".format(
playerID[i], action, loss, Q_max, Q_action))
# 行動を実行した結果
terminal = env.isEnd()
for j in range(0, len(players)):
if e > n_epochs*0.3:
for k in range(25):
players[j].experience_replay()
elif e > n_epochs*0.1:
for k in range(5):
players[j].experience_replay()
w = env.winner()
print("EPOCH: {:03d}/{:03d} | WIN: player{:1d}".format(
e, n_epochs, w))
# 保存は後攻のplayer2 を保存する。
if e%50 == 0:
players[1].save_model(e)
| 0 | 0 | 0 |