hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a306b67e04bd9ee4518bc5dd6e295cbacb63b7c | 321 | py | Python | aoclib/edge.py | BrendanLeber/aoclib | 67c6939cf045f812855339f496db55eb441cfe7f | [
"MIT"
] | null | null | null | aoclib/edge.py | BrendanLeber/aoclib | 67c6939cf045f812855339f496db55eb441cfe7f | [
"MIT"
] | null | null | null | aoclib/edge.py | BrendanLeber/aoclib | 67c6939cf045f812855339f496db55eb441cfe7f | [
"MIT"
] | 1 | 2019-03-21T16:21:03.000Z | 2019-03-21T16:21:03.000Z | # -*- coding: utf-8 -*-
from __future__ import annotations
from dataclasses import dataclass
@dataclass
| 17.833333 | 38 | 0.604361 | # -*- coding: utf-8 -*-
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Edge:
u: int # the "from" vertex
v: int # the "to" vertex
def __str__(self) -> str:
return f"{self.u} -> {self.v}"
def reversed(self) -> Edge:
return Edge(self.v, self.u)
| 85 | 106 | 22 |
cb875a5067dc949e30897b549f24e12ff78ddb3c | 218 | py | Python | src/fractal/world/_realizing/_op/flowing.py | jedhsu/fractal | 97833ddc5063fae72352cf590738fef508c02f0c | [
"MIT"
] | null | null | null | src/fractal/world/_realizing/_op/flowing.py | jedhsu/fractal | 97833ddc5063fae72352cf590738fef508c02f0c | [
"MIT"
] | null | null | null | src/fractal/world/_realizing/_op/flowing.py | jedhsu/fractal | 97833ddc5063fae72352cf590738fef508c02f0c | [
"MIT"
] | null | null | null | """
Flow
====
Time-related operations for a realizing world.
"""
from ..realizing import Realizing
| 10.9 | 46 | 0.623853 | """
Flow
====
Time-related operations for a realizing world.
"""
from ..realizing import Realizing
class Flowing(
Realizing,
):
def at_dawn(self):
pass
def has_world_ended(self):
pass
| 28 | 11 | 76 |
cdec793af3e3d4f747ac579ba41367a9a96a9dcd | 3,312 | py | Python | tests/test_option_binding.py | clidoc/clidoc.python | cbc2717aa3ba6cd279c8106784bc711febbef29c | [
"MIT"
] | null | null | null | tests/test_option_binding.py | clidoc/clidoc.python | cbc2717aa3ba6cd279c8106784bc711febbef29c | [
"MIT"
] | 3 | 2015-05-10T06:45:37.000Z | 2015-05-15T08:26:14.000Z | tests/test_option_binding.py | clidoc/clidoc.python | cbc2717aa3ba6cd279c8106784bc711febbef29c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from clidoc_option_binding import *
from utils import generate_key_checker, CLIDOC_TEST_MODE
key_checker = generate_key_checker(
{
"-c",
"--long-4",
"command",
},
{
"-a",
"-b",
"--long-1",
"--long-2",
"<p3>",
},
{
"-d",
"-e",
"--long-3",
},
)
| 21.647059 | 66 | 0.513587 | # -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from clidoc_option_binding import *
from utils import generate_key_checker, CLIDOC_TEST_MODE
key_checker = generate_key_checker(
{
"-c",
"--long-4",
"command",
},
{
"-a",
"-b",
"--long-1",
"--long-2",
"<p3>",
},
{
"-d",
"-e",
"--long-3",
},
)
def test_option_a():
outcome = clidoc(
["utility_name", "-a", "value"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert "value" == outcome["-a"]
def test_option_b():
outcome = clidoc(
["utility_name", "-b", "value"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert "value" == outcome["-b"]
def test_option_c_p3():
outcome = clidoc(
["utility_name", "-c", "value"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert outcome["-c"]
assert "value" == outcome["<p3>"]
def test_option_e():
outcome = clidoc(
["utility_name", "-e", "a", "b", "c"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["a", "b", "c"] == outcome["-e"]
outcome = clidoc(
["utility_name", "-e", "a", "-eb", "-ec"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["a", "b", "c"] == outcome["-e"]
outcome = clidoc(
["utility_name", "-e", "a", "command", "b"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["a", "command", "b"] == outcome["-e"]
outcome = clidoc(
["utility_name", "-e", "a", "command", "-e", "b", "c"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["a", "b", "c"] == outcome["-e"]
assert outcome["command"]
def test_guideline_8():
outcome = clidoc(
["utility_name", "-e", "a,b,,c,"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["a", "b", "c"] == outcome["-e"]
outcome = clidoc(
["utility_name", "-e", "a,b,,c,"],
CLIDOC_TEST_MODE | GUIDELINE_8_OFF,
)
key_checker(outcome)
assert ["a,b,,c,"] == outcome["-e"]
outcome = clidoc(
["utility_name", "-e", "c,d", "-e", "a,b,,c,"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["c,d", "a,b,,c,"] == outcome["-e"]
def test_option_long_1():
outcome = clidoc(
["utility_name", "--long-1", "!@#^&$!"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert "!@#^&$!" == outcome["--long-1"]
def test_option_long_2():
outcome = clidoc(
["utility_name", "--long-2", "!@#^&$!"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert "!@#^&$!" == outcome["--long-2"]
def test_option_long_3():
outcome = clidoc(
["utility_name", "--long-3", "a", "b", "c"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert ["a", "b", "c"] == outcome["--long-3"]
def test_option_long_4():
outcome = clidoc(
["utility_name", "-f"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert outcome["--long-4"]
outcome = clidoc(
["utility_name", "--long-4"],
CLIDOC_TEST_MODE,
)
key_checker(outcome)
assert outcome["--long-4"]
| 2,605 | 0 | 207 |
4034a523b36517e6a0bdf04a9888a83e2ea95364 | 252 | py | Python | pyuri/__init__.py | nick-allen/python-uri | a526316270b83cd76651baec016d1509aab68981 | [
"MIT"
] | 1 | 2016-07-29T01:29:04.000Z | 2016-07-29T01:29:04.000Z | pyuri/__init__.py | nick-allen/pyuri | a526316270b83cd76651baec016d1509aab68981 | [
"MIT"
] | 1 | 2016-10-24T15:03:30.000Z | 2016-11-01T18:01:52.000Z | pyuri/__init__.py | nick-allen/pyuri | a526316270b83cd76651baec016d1509aab68981 | [
"MIT"
] | null | null | null | """Python URI Handling"""
from pkg_resources import get_distribution, DistributionNotFound
from .uri import URI
__all__ = ['URI']
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = '0.0.0-dev'
| 18 | 64 | 0.753968 | """Python URI Handling"""
from pkg_resources import get_distribution, DistributionNotFound
from .uri import URI
__all__ = ['URI']
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = '0.0.0-dev'
| 0 | 0 | 0 |
404465605a931d46f6e31c6a291d217087149125 | 1,731 | py | Python | guts/migration/driver.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/migration/driver.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/migration/driver.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from guts.i18n import _
class MigrationDriver(object):
"""Base class for migration drivers."""
def initialize(self, connection_dict):
"""Initialize Migration Driver.
This is for drivers that don't implement initialize().
"""
msg = _("Initialize source hypervisor is not "
"implemented by the driver.")
raise NotImplementedError(msg)
def get_vms_list(self):
"""Get all VMs stub.
This is for drivers that don't implement get_vms_list().
"""
msg = _("Get VMs list from source hypervisor is not "
"implemented by the driver.")
raise NotImplementedError(msg)
def download_vm_disks(self, context, vm_uuid, base_path):
"""Download VM disks stub.
This is for drivers that don't implement download_vm_disks().
"""
msg = _("Method to download VM disks from source hypervisor to "
"base_path is not implemented by the driver.")
raise NotImplementedError(msg)
| 33.288462 | 78 | 0.657423 | # Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from guts.i18n import _
class MigrationDriver(object):
"""Base class for migration drivers."""
def __init__(self, *args, **kwargs):
pass
def initialize(self, connection_dict):
"""Initialize Migration Driver.
This is for drivers that don't implement initialize().
"""
msg = _("Initialize source hypervisor is not "
"implemented by the driver.")
raise NotImplementedError(msg)
def get_vms_list(self):
"""Get all VMs stub.
This is for drivers that don't implement get_vms_list().
"""
msg = _("Get VMs list from source hypervisor is not "
"implemented by the driver.")
raise NotImplementedError(msg)
def download_vm_disks(self, context, vm_uuid, base_path):
"""Download VM disks stub.
This is for drivers that don't implement download_vm_disks().
"""
msg = _("Method to download VM disks from source hypervisor to "
"base_path is not implemented by the driver.")
raise NotImplementedError(msg)
| 28 | 0 | 27 |
8ac95dbd0a9032681d9dfaccf24e28f0495f5ae1 | 4,019 | py | Python | base_config.py | zclonely/ACNet | 1fa157c5b38be2ecfc4a79f8ebe8e7cb30c71561 | [
"MIT"
] | 2 | 2019-12-23T03:03:24.000Z | 2019-12-23T03:03:26.000Z | base_config.py | zclonely/ACNet | 1fa157c5b38be2ecfc4a79f8ebe8e7cb30c71561 | [
"MIT"
] | null | null | null | base_config.py | zclonely/ACNet | 1fa157c5b38be2ecfc4a79f8ebe8e7cb30c71561 | [
"MIT"
] | null | null | null | from collections import namedtuple
from model_map import get_dataset_name_by_model_name
BaseConfigByEpoch = namedtuple('BaseConfigByEpoch', ['network_type', 'dataset_name', 'dataset_subset', 'global_batch_size', 'num_node', 'device',
'weight_decay', 'weight_decay_bias', 'optimizer_type', 'momentum',
'bias_lr_factor', 'max_epochs', 'base_lr', 'lr_epoch_boundaries', 'lr_decay_factor', 'linear_final_lr',
'warmup_epochs', 'warmup_method', 'warmup_factor',
'ckpt_iter_period', 'tb_iter_period',
'output_dir', 'tb_dir',
'init_weights', 'save_weights',
'val_epoch_period', 'grad_accum_iters',
'deps',
'se_reduce_scale'])
| 83.729167 | 181 | 0.622294 | from collections import namedtuple
from model_map import get_dataset_name_by_model_name
BaseConfigByEpoch = namedtuple('BaseConfigByEpoch', ['network_type', 'dataset_name', 'dataset_subset', 'global_batch_size', 'num_node', 'device',
'weight_decay', 'weight_decay_bias', 'optimizer_type', 'momentum',
'bias_lr_factor', 'max_epochs', 'base_lr', 'lr_epoch_boundaries', 'lr_decay_factor', 'linear_final_lr',
'warmup_epochs', 'warmup_method', 'warmup_factor',
'ckpt_iter_period', 'tb_iter_period',
'output_dir', 'tb_dir',
'init_weights', 'save_weights',
'val_epoch_period', 'grad_accum_iters',
'deps',
'se_reduce_scale'])
def get_baseconfig_by_epoch(network_type, dataset_name, dataset_subset, global_batch_size, num_node,
weight_decay, optimizer_type, momentum,
max_epochs, base_lr, lr_epoch_boundaries, lr_decay_factor, linear_final_lr,
warmup_epochs, warmup_method, warmup_factor,
ckpt_iter_period, tb_iter_period,
output_dir, tb_dir, save_weights,
device='cuda', weight_decay_bias=0, bias_lr_factor=2, init_weights=None, val_epoch_period=-1, grad_accum_iters=1,
deps=None,
se_reduce_scale=0):
print('----------------- show lr schedule --------------')
print('base_lr:', base_lr)
print('max_epochs:', max_epochs)
print('lr_epochs:', lr_epoch_boundaries)
print('lr_decay:', lr_decay_factor)
print('linear_final_lr:', linear_final_lr)
print('-------------------------------------------------')
return BaseConfigByEpoch(network_type=network_type,dataset_name=dataset_name,dataset_subset=dataset_subset,global_batch_size=global_batch_size,num_node=num_node, device=device,
weight_decay=weight_decay,weight_decay_bias=weight_decay_bias,optimizer_type=optimizer_type,momentum=momentum,bias_lr_factor=bias_lr_factor,
max_epochs=max_epochs, base_lr=base_lr, lr_epoch_boundaries=lr_epoch_boundaries,lr_decay_factor=lr_decay_factor, linear_final_lr=linear_final_lr,
warmup_epochs=warmup_epochs,warmup_method=warmup_method,warmup_factor=warmup_factor,
ckpt_iter_period=int(ckpt_iter_period),tb_iter_period=int(tb_iter_period),
output_dir=output_dir, tb_dir=tb_dir,
init_weights=init_weights, save_weights=save_weights,
val_epoch_period=val_epoch_period, grad_accum_iters=grad_accum_iters, deps=deps, se_reduce_scale=se_reduce_scale)
def get_baseconfig_for_test(network_type, dataset_subset, global_batch_size, init_weights, device='cuda', deps=None, se_reduce_scale=0):
return BaseConfigByEpoch(network_type=network_type, dataset_name=get_dataset_name_by_model_name(network_type),
dataset_subset=dataset_subset, global_batch_size=global_batch_size, num_node=1, device=device,
weight_decay=None, weight_decay_bias=None, optimizer_type=None, momentum=None, bias_lr_factor=None,
max_epochs=None, base_lr=None, lr_epoch_boundaries=None, lr_decay_factor=None, linear_final_lr=None,
warmup_epochs=None, warmup_method=None, warmup_factor=None, ckpt_iter_period=None,
tb_iter_period=None, output_dir=None, tb_dir=None, init_weights=init_weights,
save_weights=None, val_epoch_period=None, grad_accum_iters=None, deps=deps, se_reduce_scale=se_reduce_scale) | 2,959 | 0 | 49 |
52edcb7aea820f4031994c44df4c877102b60d05 | 385 | py | Python | Chapter 4 - Lists & Tuples/01_list.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | null | null | null | Chapter 4 - Lists & Tuples/01_list.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | null | null | null | Chapter 4 - Lists & Tuples/01_list.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | 1 | 2021-04-21T10:23:08.000Z | 2021-04-21T10:23:08.000Z | #Create a list using []
a = [1,2,3,7,66]
#print the list using print() function
print(a)
#Access using index using a[0], a[1], ....
print(a[2])
#Changing the value of the list
a[0] = 777
print(a)
#We can create a list with items of different type
b = [77,"Root",False,6.9]
print(b)
#List Slicing
friends = ["Root","Groot","Sam","Alex",99]
print(friends[0:3])
print(friends[-4:])
| 16.73913 | 50 | 0.649351 | #Create a list using []
a = [1,2,3,7,66]
#print the list using print() function
print(a)
#Access using index using a[0], a[1], ....
print(a[2])
#Changing the value of the list
a[0] = 777
print(a)
#We can create a list with items of different type
b = [77,"Root",False,6.9]
print(b)
#List Slicing
friends = ["Root","Groot","Sam","Alex",99]
print(friends[0:3])
print(friends[-4:])
| 0 | 0 | 0 |
cbb881db08ca129cbba2949277f9b0b3fed4d340 | 138 | py | Python | Data Scientist Career Path/3. Python Fundamentals/11. Python Files/6. with.py | myarist/Codecademy | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | [
"MIT"
] | 23 | 2021-06-06T15:35:55.000Z | 2022-03-21T06:53:42.000Z | Data Scientist Career Path/3. Python Fundamentals/11. Python Files/6. with.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Data Scientist Career Path/3. Python Fundamentals/11. Python Files/6. with.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 9 | 2021-06-08T01:32:04.000Z | 2022-03-18T15:38:09.000Z | with open('fun_file.txt') as close_this_file:
setup = close_this_file.readline()
punchline = close_this_file.readline()
print(setup)
| 23 | 45 | 0.768116 | with open('fun_file.txt') as close_this_file:
setup = close_this_file.readline()
punchline = close_this_file.readline()
print(setup)
| 0 | 0 | 0 |
feda459c8123407cf53096f28c805421e375f313 | 3,141 | py | Python | guides/python/pysample/crypto/aes/aes_enc.py | ToraNova/library | 20b321302868e8c2ce8723c808aa9e7a313e2cb8 | [
"MIT"
] | null | null | null | guides/python/pysample/crypto/aes/aes_enc.py | ToraNova/library | 20b321302868e8c2ce8723c808aa9e7a313e2cb8 | [
"MIT"
] | null | null | null | guides/python/pysample/crypto/aes/aes_enc.py | ToraNova/library | 20b321302868e8c2ce8723c808aa9e7a313e2cb8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# used to encrypt a byte string
# used as a tool for testing
# AES encryption
from Crypto.Cipher import AES
import base64
import os
import sys
import binascii
if __name__ == "__main__":
e_key = None
plain = None
allz = False
AES_mode = AES.MODE_CBC
#input parsing
if(len(sys.argv) < 3):
print("Please specify key and msg")
print("aes_enc <key:base64 16bytes> <msg> <flag>")
print("b flag to use ECB mode")
print("c flag to use CTR mode")
print("p flag to encrypt ONLY 15bytes of the message (used with ECB)")
print("default mode is CBC")
print("flags are appended together, i.e : zb")
exit(1)
try:
e_key = base64.b64decode(sys.argv[1])
plain = sys.argv[2]
except Exception as e:
print("Please specify key as base64 input !",str(e))
exit(1)
#additionaly flag parsing
if(len(sys.argv) > 3):
if('b' in sys.argv[3]):
AES_mode = AES.MODE_ECB
if('p' in sys.argv[3] and len(plain)>15 ):
plain = plain[:15]
if('c' in sys.argv[3]):
AES_mode = AES.MODE_CTR
#input sanitized (partially)
padded = rawpad( plain , AES.block_size) # input padding, AES block size is fixed to 16 bytes
iv = os.urandom( AES.block_size ) # initialization vector
print("Encrypting {} ({} bytes) with key 0x{}".format(plain,len(plain),gethex_bstring(e_key)))
print("Padded Base64 :",base64.b64encode(padded.encode('utf-8')).decode('utf-8'))
print("Padded Hex :",gethex_sstring(padded))
print("Post padding length : {} bytes".format(len(padded)))
if(AES_mode == AES.MODE_ECB):
Ecipher = AES.new( e_key, AES.MODE_ECB) # ECB mode does not use IV
else:
Ecipher = AES.new( e_key, AES_mode, iv) # encrypting cipher obj
block = Ecipher.encrypt(padded)
cipher = iv + block # append the block behind the iv
print("\nPure cipherblock output")
print("Base64 :",base64.b64encode(block).decode('utf-8'))
print("Hex :",gethex_bstring(block))
print("Length : {} bytes".format(len(block)))
print("\nCiphertext with IV inserted:")
print("Base64 :",base64.b64encode(cipher).decode('utf-8'))
print("Hex :",gethex_bstring(cipher))
print("Length : {} bytes".format(len(cipher)))
# Decryption checking
print("\nDecryption checkback...")
# extract the iv out
iv = cipher[:AES.block_size]
cipher = cipher[AES.block_size:]
if(AES_mode == AES.MODE_ECB):
Dcipher = AES.new( e_key, AES.MODE_ECB)
else:
Dcipher = AES.new( e_key, AES_mode, iv)
plain = Dcipher.decrypt(cipher)
plain = rawunpad(plain)
print("Decrypted plaintext :",plain.decode('utf-8')," Length : {} bytes".format(len(plain)))
| 29.083333 | 95 | 0.687042 | #!/usr/bin/python3
# used to encrypt a byte string
# used as a tool for testing
# AES encryption
from Crypto.Cipher import AES
import base64
import os
import sys
import binascii
def rawpad(s,BLOCK_SIZE):
#pad it with the characters representing original block size length
return s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)
def rawunpad(s):
#unpad by reading original blocksize length, and stripping the pad away
return s[:-ord(s[len(s)-1:])]
def gethex_bstring(s):
#get the hex representation of a byte string (as a string)
return ''.join("{:02X}".format(b) for b in s )
def gethex_sstring(s):
#get the hex representation of a string (as a string)
return ''.join("{:02X}".format(ord(b)) for b in s )
if __name__ == "__main__":
e_key = None
plain = None
allz = False
AES_mode = AES.MODE_CBC
#input parsing
if(len(sys.argv) < 3):
print("Please specify key and msg")
print("aes_enc <key:base64 16bytes> <msg> <flag>")
print("b flag to use ECB mode")
print("c flag to use CTR mode")
print("p flag to encrypt ONLY 15bytes of the message (used with ECB)")
print("default mode is CBC")
print("flags are appended together, i.e : zb")
exit(1)
try:
e_key = base64.b64decode(sys.argv[1])
plain = sys.argv[2]
except Exception as e:
print("Please specify key as base64 input !",str(e))
exit(1)
#additionaly flag parsing
if(len(sys.argv) > 3):
if('b' in sys.argv[3]):
AES_mode = AES.MODE_ECB
if('p' in sys.argv[3] and len(plain)>15 ):
plain = plain[:15]
if('c' in sys.argv[3]):
AES_mode = AES.MODE_CTR
#input sanitized (partially)
padded = rawpad( plain , AES.block_size) # input padding, AES block size is fixed to 16 bytes
iv = os.urandom( AES.block_size ) # initialization vector
print("Encrypting {} ({} bytes) with key 0x{}".format(plain,len(plain),gethex_bstring(e_key)))
print("Padded Base64 :",base64.b64encode(padded.encode('utf-8')).decode('utf-8'))
print("Padded Hex :",gethex_sstring(padded))
print("Post padding length : {} bytes".format(len(padded)))
if(AES_mode == AES.MODE_ECB):
Ecipher = AES.new( e_key, AES.MODE_ECB) # ECB mode does not use IV
else:
Ecipher = AES.new( e_key, AES_mode, iv) # encrypting cipher obj
block = Ecipher.encrypt(padded)
cipher = iv + block # append the block behind the iv
print("\nPure cipherblock output")
print("Base64 :",base64.b64encode(block).decode('utf-8'))
print("Hex :",gethex_bstring(block))
print("Length : {} bytes".format(len(block)))
print("\nCiphertext with IV inserted:")
print("Base64 :",base64.b64encode(cipher).decode('utf-8'))
print("Hex :",gethex_bstring(cipher))
print("Length : {} bytes".format(len(cipher)))
# Decryption checking
print("\nDecryption checkback...")
# extract the iv out
iv = cipher[:AES.block_size]
cipher = cipher[AES.block_size:]
if(AES_mode == AES.MODE_ECB):
Dcipher = AES.new( e_key, AES.MODE_ECB)
else:
Dcipher = AES.new( e_key, AES_mode, iv)
plain = Dcipher.decrypt(cipher)
plain = rawunpad(plain)
print("Decrypted plaintext :",plain.decode('utf-8')," Length : {} bytes".format(len(plain)))
| 484 | 0 | 92 |
6390b3c93147251c06317ddc9b994d40186364a9 | 424 | py | Python | aoc2020/day9/day9_part2.py | GetPastTheMonkey/advent-of-code | db80be6d87baba4d5315cc69276905c55762da86 | [
"MIT"
] | 1 | 2019-09-15T16:37:24.000Z | 2019-09-15T16:37:24.000Z | aoc2020/day9/day9_part2.py | GetPastTheMonkey/advent-of-code | db80be6d87baba4d5315cc69276905c55762da86 | [
"MIT"
] | null | null | null | aoc2020/day9/day9_part2.py | GetPastTheMonkey/advent-of-code | db80be6d87baba4d5315cc69276905c55762da86 | [
"MIT"
] | null | null | null | from utils import get_input_lines
n = 20874512
sums = []
ints = []
solved = False
for line in get_input_lines(__file__):
i = int(line)
sums.append(0)
ints.append(i)
for idx in range(len(sums)):
sums[idx] = sums[idx] + i
if sums[idx] == n:
subset = ints[idx:]
print(min(subset) + max(subset))
solved = True
break
if solved:
break
| 19.272727 | 44 | 0.540094 | from utils import get_input_lines
n = 20874512
sums = []
ints = []
solved = False
for line in get_input_lines(__file__):
i = int(line)
sums.append(0)
ints.append(i)
for idx in range(len(sums)):
sums[idx] = sums[idx] + i
if sums[idx] == n:
subset = ints[idx:]
print(min(subset) + max(subset))
solved = True
break
if solved:
break
| 0 | 0 | 0 |
47f18ea67a40d6e9e4db44332a0499fd963249fe | 579 | py | Python | ch02/conditionals.py | skinisbizapps/learning-python | 3c77fb94d978dd77057d1f0f4450d887dd389486 | [
"Apache-2.0"
] | null | null | null | ch02/conditionals.py | skinisbizapps/learning-python | 3c77fb94d978dd77057d1f0f4450d887dd389486 | [
"Apache-2.0"
] | null | null | null | ch02/conditionals.py | skinisbizapps/learning-python | 3c77fb94d978dd77057d1f0f4450d887dd389486 | [
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__":
main() | 19.3 | 79 | 0.533679 |
def compare(x, y):
st = ""
if(x < y):
st = "x is less than y"
elif(x == y):
st = "x is equal to y"
else:
st = "x is greater than y"
return st
def another_compare(x, y):
st = "x is less than y" if(x < y) else "x is greater than or the same as y"
return st
def main():
x,y = 10, 100
print(compare(x, y))
print(another_compare(x, y))
x = 100
print(compare(x, y))
print(another_compare(x, y))
x = 1000
print(compare(x, y))
print(another_compare(x, y))
if __name__ == "__main__":
main() | 470 | 0 | 69 |
4eaca5bc138f355d112c860003e0c72d36d9f501 | 2,412 | py | Python | pyredditlive.py | nbr23/pyredditlive | c8ebccef8671201806682d2768a14e707e9814b5 | [
"MIT"
] | null | null | null | pyredditlive.py | nbr23/pyredditlive | c8ebccef8671201806682d2768a14e707e9814b5 | [
"MIT"
] | null | null | null | pyredditlive.py | nbr23/pyredditlive | c8ebccef8671201806682d2768a14e707e9814b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import asyncio
import websockets
import requests
import json
import sys
import os
import yaml
import urllib
import time
if __name__ == "__main__":
sys.exit(main())
| 30.15 | 180 | 0.609867 | #!/usr/bin/env python3
import asyncio
import websockets
import requests
import json
import sys
import os
import yaml
import urllib
import time
def load_config(config_path):
config = {}
if os.path.exists(config_path):
with open(config_path) as configfile:
config = yaml.load(configfile, Loader=yaml.SafeLoader)
if 'PYRL_TELEGRAM_BOT' in os.environ:
config['telegram_bot'] = os.environ['PYRL_TELEGRAM_BOT']
if 'PYRL_TELEGRAM_CHAT_ID' in os.environ:
config['telegram_chat_id'] = os.environ['PYRL_TELEGRAM_CHAT_ID']
if 'telegram_bot' not in config or 'telegram_chat_id' not in config:
raise Exception("No configuration file found or environment variable set")
return config
def get_ws_url(url):
res = (
requests.get(f"{url}about.json", headers={"User-agent": "Mozilla/5.0"})
.json()
.get("data", {})
)
if res.get("state") == "live":
return res.get("websocket_url")
raise Exception(f"Livethread state is {res.get('state')}")
def post_update(update, config):
if update.get("type") == "update":
body = update.get("payload", {}).get("data", {}).get("body")
if body is not None:
print(f"POSTING {body}")
requests.get(
f'https://api.telegram.org/{config["telegram_bot"]}/sendMessage?chat_id={config.get("telegram_chat_id")}&text={urllib.parse.quote_plus(body)}'
)
async def livethread(url, config):
while True:
ws_url = get_ws_url(url)
try:
async with websockets.connect(ws_url) as websocket:
requests.get(
f'https://api.telegram.org/{config["telegram_bot"]}/sendMessage?chat_id={config.get("telegram_chat_id")}&text={urllib.parse.quote_plus("Connected to "+ws_url)}'
)
print(f"Connected to {ws_url}")
while True:
update = await websocket.recv()
print(f"RAW JSON:\n{update}")
post_update(json.loads(update), config)
except websockets.ConnectionClosed:
continue
def main():
config = load_config("./config.yml")
while True:
try:
asyncio.run(livethread(sys.argv[1], config))
except asyncio.exceptions.TimeoutError:
time.sleep(5)
if __name__ == "__main__":
sys.exit(main())
| 2,098 | 0 | 115 |
cf8a7bb7e19ae6d2eb73dfe3eefc77adf2a57cf3 | 1,069 | py | Python | pychecker/checker.py | PyVCEchecker/PyVCEchecker | 879756927c82a2612c817bdffbae25234ff62558 | [
"MIT"
] | null | null | null | pychecker/checker.py | PyVCEchecker/PyVCEchecker | 879756927c82a2612c817bdffbae25234ff62558 | [
"MIT"
] | null | null | null | pychecker/checker.py | PyVCEchecker/PyVCEchecker | 879756927c82a2612c817bdffbae25234ff62558 | [
"MIT"
] | null | null | null | import argparse
from pychecker.check import check_project, check_pkgver, print_results
parser = argparse.ArgumentParser(
description="PyChecker: check whether your project's Require-Python is correct"
)
package_group = parser.add_argument_group("package")
package_group.add_argument("-p", "--package", help="Package name")
package_group.add_argument("-v", "--version", help="Version of the package")
project_group = parser.add_argument_group("project")
project_group.add_argument("-r", "--root", help="Root path of the project")
project_group.add_argument("-c", "--python_requires", help="python_requires expression")
| 34.483871 | 88 | 0.724041 | import argparse
from pychecker.check import check_project, check_pkgver, print_results
parser = argparse.ArgumentParser(
description="PyChecker: check whether your project's Require-Python is correct"
)
package_group = parser.add_argument_group("package")
package_group.add_argument("-p", "--package", help="Package name")
package_group.add_argument("-v", "--version", help="Version of the package")
project_group = parser.add_argument_group("project")
project_group.add_argument("-r", "--root", help="Root path of the project")
project_group.add_argument("-c", "--python_requires", help="python_requires expression")
def main(args=None):
args = parser.parse_args(args)
if args.package and args.version:
# check a PyPI package
results = check_pkgver(args.package, args.version)
elif args.root and args.python_requires:
# check a local project
results = check_project(args.path, args.python_requires)
else:
print(parser.print_help())
results = None
if results:
print_results(results)
| 419 | 0 | 23 |
4de13b8171f09853c55e978ef311746acd908c93 | 597 | py | Python | setup.py | rohit18115/OQMscores-python | 96cddadbd8eb2b12df72a216e6093e2f7d642531 | [
"MIT"
] | null | null | null | setup.py | rohit18115/OQMscores-python | 96cddadbd8eb2b12df72a216e6093e2f7d642531 | [
"MIT"
] | null | null | null | setup.py | rohit18115/OQMscores-python | 96cddadbd8eb2b12df72a216e6093e2f7d642531 | [
"MIT"
] | null | null | null | from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(name='oqmscore',
version='0.1',
description='Objective quality measure score for speech',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rohit18115/OQMscores-python",
packages=['oqmscore'],
install_requires=[
'pesq',
'numpy',
'librosa',
'scipy'
],
author='Rohit Arora',
author_email='rohit18115@iiitd.ac.in',
zip_safe=False)
| 27.136364 | 63 | 0.639866 | from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(name='oqmscore',
version='0.1',
description='Objective quality measure score for speech',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rohit18115/OQMscores-python",
packages=['oqmscore'],
install_requires=[
'pesq',
'numpy',
'librosa',
'scipy'
],
author='Rohit Arora',
author_email='rohit18115@iiitd.ac.in',
zip_safe=False)
| 0 | 0 | 0 |
444a8d70b910bb9557b6b49213f60ccdb90ea5bf | 2,039 | py | Python | core/etl/models/service.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | core/etl/models/service.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | core/etl/models/service.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# ServiceModel
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
from datetime import datetime
# NOC modules
from .base import BaseModel
from .typing import Reference
from .serviceprofile import ServiceProfile
from .managedobject import ManagedObject
from .subscriber import Subscriber
| 26.828947 | 85 | 0.574301 | # ----------------------------------------------------------------------
# ServiceModel
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
from datetime import datetime
# NOC modules
from .base import BaseModel
from .typing import Reference
from .serviceprofile import ServiceProfile
from .managedobject import ManagedObject
from .subscriber import Subscriber
class Service(BaseModel):
id: str
parent: Optional[Reference["Service"]]
subscriber: Optional[Reference["Subscriber"]]
profile: Reference["ServiceProfile"]
ts: Optional[datetime]
# Workflow state
state: Optional[str]
# Last state change
state_changed: Optional[datetime]
# Workflow event
event: Optional[str]
agreement_id: Optional[str]
order_id: Optional[str]
stage_id: Optional[str]
stage_name: Optional[str]
stage_start: Optional[datetime]
account_id: Optional[str]
address: Optional[str]
managed_object: Optional[Reference["ManagedObject"]]
nri_port: Optional[str]
cpe_serial: Optional[str]
cpe_mac: Optional[str]
cpe_model: Optional[str]
cpe_group: Optional[str]
labels: Optional[List[str]]
description: Optional[str] = None
class Config:
fields = {"state_changed": "logical_status_start", "state": "logical_status"}
allow_population_by_field_name = True
_csv_fields = [
"id",
"parent",
"subscriber",
"profile",
"ts",
"state",
"state_changed",
"agreement_id",
"order_id",
"stage_id",
"stage_name",
"stage_start",
"account_id",
"address",
"managed_object",
"nri_port",
"cpe_serial",
"cpe_mac",
"cpe_model",
"cpe_group",
"description",
"labels",
]
| 0 | 1,439 | 23 |
b47ee23494a4382d2a0cc60f01cc0616a4774df2 | 1,955 | py | Python | pubsub/animation/scanning-pixel.py | yanigisawa/coffee-scale | 8131f5485646fa6b9803ca13b3e20d02d9debfce | [
"MIT"
] | 13 | 2015-07-29T12:07:40.000Z | 2018-06-29T13:21:41.000Z | pubsub/animation/scanning-pixel.py | yanigisawa/coffee-scale | 8131f5485646fa6b9803ca13b3e20d02d9debfce | [
"MIT"
] | 4 | 2015-05-29T11:44:43.000Z | 2018-07-25T20:20:10.000Z | pubsub/animation/scanning-pixel.py | yanigisawa/coffee-scale | 8131f5485646fa6b9803ca13b3e20d02d9debfce | [
"MIT"
] | 7 | 2016-01-19T21:56:08.000Z | 2018-04-25T04:34:32.000Z | #!/usr/bin/env python
from samplebase import SampleBase
# Main function
if __name__ == "__main__":
simple_square = SimpleSquare()
if (not simple_square.process()):
simple_square.print_help()
| 34.910714 | 92 | 0.526854 | #!/usr/bin/env python
from samplebase import SampleBase
class SimpleSquare(SampleBase):
def __init__(self, *args, **kwargs):
super(SimpleSquare, self).__init__(*args, **kwargs)
def run(self):
offset_canvas = self.matrix.CreateFrameCanvas()
x, y = 0, 0
min_x, max_x = 0, offset_canvas.width
min_y, max_y = 0, offset_canvas.height
direction = 1
while True:
self.usleep(50000)
for i in range(0, max_x):
for j in range(0, max_y):
if i == x and j == y:
offset_canvas.SetPixel(i, j, 150, 50, 0)
else:
offset_canvas.SetPixel(i, j, 0, 0, 0)
x = x + 1 * direction
if x > max_x or x < min_x:
direction = direction * -1
y = y + 1
if y > max_y:
y = 0
# for x in range(0, self.matrix.width):
# offset_canvas.SetPixel(x, x, 255, 255, 255)
# offset_canvas.SetPixel(offset_canvas.height - 1 - x, x, 255, 0, 255)
# for x in range(0, offset_canvas.width):
# offset_canvas.SetPixel(x * 2, x, 255, 255, 255)
# offset_canvas.SetPixel((offset_canvas.height - 1 - x) * 2, x, 255, 0, 255)
# for x in range(0, offset_canvas.width):
# offset_canvas.SetPixel(x, 0, 255, 0, 0)
# offset_canvas.SetPixel(x, offset_canvas.height - 1, 255, 255, 0)
# for y in range(0, offset_canvas.height):
# offset_canvas.SetPixel(0, y, 0, 0, 255)
# offset_canvas.SetPixel(offset_canvas.width - 1, y, 0, 255, 0)
offset_canvas = self.matrix.SwapOnVSync(offset_canvas)
# Main function
if __name__ == "__main__":
simple_square = SimpleSquare()
if (not simple_square.process()):
simple_square.print_help()
| 1,659 | 10 | 76 |
8142df55e374bc2702c74ff5c5b846221102fd7e | 66,710 | py | Python | statspark/r_inspired.py | joon3216/statspark | 5c0c5b31694ed76bf43251eab8806bbf9f79a50e | [
"MIT"
] | null | null | null | statspark/r_inspired.py | joon3216/statspark | 5c0c5b31694ed76bf43251eab8806bbf9f79a50e | [
"MIT"
] | null | null | null | statspark/r_inspired.py | joon3216/statspark | 5c0c5b31694ed76bf43251eab8806bbf9f79a50e | [
"MIT"
] | null | null | null |
from functools import reduce
from patsy import dmatrices
from scipy.optimize import curve_fit, fmin
from scipy.stats import chi2
from sklearn.metrics import roc_curve
from statsmodels.stats.outliers_influence import variance_inflation_factor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
# Complete
class Interval():
'''A connected 1-d Interval.'''
def __init__(self, start, stop = None):
'''(Interval, args[, number]) -> None
Initialize an Interval.
Possible start:
1) one number num: initializes (-inf, num];
if num == np.inf, then it initializes (-inf, inf).
2) [num0, num1]: initializes [num0, num1] if num0 <= num1;
if num1 == np.inf, then it initializes [num0, inf).
3) (num0, num1): initializes (num0, num1) if num0 <= num1
If both start and end are specified, then it initializes
(start, stop] given start <= stop. If stop == np.inf,
then this initializes (start, inf).
>>> int1 = Interval(.45)
>>> int1
(-inf, 0.45]
>>> int2 = Interval([.96, 1.03])
>>> int2
[.96, 1.03]
>>> int3 = Interval((2.1, 5))
>>> int3
(2.1, 5)
>>> int4 = Interval(2.1, 5)
>>> int4
(2.1, 5]
'''
if stop is None:
if isinstance(start, (float, int)):
ep = int(start) if isinstance(start, bool) else start
self.__lower = -np.inf
self.__upper = ep
self.loweropen = True
self.upperopen = True if ep == np.inf else False
elif isinstance(start, (list, tuple)):
assert len(start) == 2, \
"The length of an argument must be 2, not " +\
str(len(start)) + "."
assert isinstance(start[0], (float, int)) and \
isinstance(start[1], (float, int)), \
'If two endpoints are given, then both points ' +\
'must be a number. Currently, they are of ' +\
str(type(start[0])) + ' and ' +\
str(type(start[1])) + '.'
assert start[0] <= start[1], \
"Numbers in iterables must be ordered."
self.__lower = int(start[0]) if isinstance(start[0], bool)\
else start[0]
self.__upper = int(start[1]) if isinstance(start[1], bool)\
else start[1]
self.loweropen = False if isinstance(start, list) else True
self.upperopen = False if isinstance(start, list) else True
else:
msg = "Interval is initialized with a number, list, or " +\
"tuple; don't know how to initialize " +\
str(type(start)) + "."
raise TypeError(msg)
else:
assert isinstance(start, (float, int)) and \
isinstance(stop, (float, int)), \
'If two endpoints are given, then both points ' +\
'must be a number. Currently, they are of ' +\
'{0} and {1}.'.format(type(start), type(stop))
assert start <= stop, \
'The given endpoints are ' + str(start) +\
' and ' + str(stop) + ', in that order. ' +\
'Change the order of the two and try again.'
ep0 = int(start) if isinstance(start, bool) else start
ep1 = int(stop) if isinstance(stop, bool) else stop
self.__lower = ep0
self.__upper = ep1
self.loweropen = True
self.upperopen = True if stop == np.inf else False
class Pipe():
'''A class that enables you to Pipe.'''
def __init__(self, obj):
'''
Initialize the function piping mechanism.
'''
self.obj = obj
def __repr__(self):
'''
Print the representation of self.
'''
return str(self.obj)
def collect(self):
'''
Collect the result of piping.
'''
return self.obj
def pipe(self, func, *args, **kwargs):
'''
Pipe.
'''
return Pipe(func(self.obj, *args, **kwargs))
npmap = lambda func, *iterable: np.array(list(map(func, *iterable)))
def add_intercept(data, int_name = 'Intercept', loc = 0, inplace = False):
'''(pd.DataFrame[, str, int, bool]) -> pd.DataFrame
Precondition:
1. -(len(data.columns) + 1) <= loc <= len(data.columns)
2. int_name not in data.columns
Add the column of 1s with the name int_name to data at the
specified loc. data is mutated if inplace is True (False by default).
'''
all_cols_before_intercept = list(data.columns)
assert int_name not in all_cols_before_intercept, \
'{0} already exists in data. Try different int_name.'\
.format(int_name)
assert -(len(data.columns) + 1) <= loc <= len(data.columns), \
'loc must be in between {0} and {1}. Current loc is {2}.'\
.format(-(len(data.columns) + 1), len(data.columns), loc)
if loc < 0:
loc += len(data.columns) + 1
if inplace:
data.insert(loc, int_name, 1)
else:
data_cp = data.copy()
data_cp.insert(loc, int_name, 1)
return data_cp
def additive_terms(terms):
'''([str]) -> str
Return the additive terms of the formula with terms.
>>> additive_terms(['a', 'b', 'c'])
'a + b + c'
'''
return ''.join(map(lambda x: x + ' + ', terms))[:-3]
def csum_N_pois(pmf, support, lambd, eps = 1e-05):
'''(function, np.array, number[, float]) -> np.array
Preconditions:
1. pmf is a pmf of X_i where the random summation S = X_1 + ... + X_N
with N ~ Pois(lambd) has 0, 1, ..., M - 1 as the first M element of
its support.
2. pmf is a function whose output is np.array whenever the input is
np.array.
3. support == np.arange(0, l + 1), where l is the largest number of
the support of pmf.
4. lambd > 0
5. 0 < eps < 1
Return the approximate probability mass function of S, i.e.
P(S = x | S < M) for some appropriate integer M determined by
P(S >= M) < eps, where S is the sum of iid X_i's with
i = 1, ..., N ~ Pois(lambd), X_i ~ pmf, and X_i's support is
a subset of np.arange(0, l + 1) (= support) with l being the largest
element of X_i's support.
>>> def dY(y):
... def pY(d):
... if d in [1, 4]:
... return .25
... elif d == 2:
... return .5
... else:
... return 0
... if not hasattr(y, '__iter__'):
... return pY(y)
... return npmap(pY, y)
...
>>> result_Y = csum_N_pois(dY, np.arange(0, 5), 3)
>>> M_Y = len(result_Y)
>>> print(M_Y, sum(result_Y))
39 0.9999999999999998
>>> result_Y[0:4]
array([0.04978729, 0.03734044, 0.08868328, 0.05951115])
'''
pmf_vec = pmf(support)
# Define the pgf of X_i
g = lambda t: npmap(lambda d: sum(d ** support * pmf_vec), t)
# Find M
Ms = lambda t: (-lambd * (1 - g(t)) - np.log(eps)) / np.log(t)
M = np.ceil(fmin(Ms, 1.001, full_output = True, disp = False)[1])
# Append 0's
pmf_vec = np.append(pmf_vec, np.zeros(int(M - len(pmf_vec))))
# Apply DFT and inverse DFT
gtks = np.fft.fft(pmf_vec)
gS_gtks = np.exp(-lambd * (1 - gtks))
pS_tks = np.fft.ifft(gS_gtks).real
return pS_tks
def dcast(data, formula, value_var = None):
'''(pd.DataFrame, str[, str]) -> pd.DataFrame
Return the grouped DataFrame based on data and formula. If value_var
is specified, then it is used to populate the output DataFrame; if
not specified, then it is guessed from data and formula.
'''
all_cols = list(data.columns)
indices_input = []
indices = formula[:(formula.index('~'))].split('+')
if len(indices) == 1:
indices = indices[0].strip()
indices_input.append(data[indices])
cols_used = [indices]
else:
indices = list(map(lambda x: x.strip(), indices))
for ind in indices:
indices_input.append(data[ind])
cols_used = indices[:]
cols_input = []
cols = formula[(formula.index('~') + 1):].split('+')
if len(cols) == 1:
cols = cols[0].strip()
cols_input.append(data[cols])
cols_used.append(cols)
else:
cols = list(map(lambda x: x.strip(), cols))
for c in cols:
cols_input.append(data[c])
cols_used.extend(cols)
value_col = list(set(all_cols).difference(set(cols_used)))
assert len(value_col) == 1 or value_var is not None, \
'value column ambiguous; should be one of: {0}'.format(value_col)
if len(value_col) == 1:
value_col = value_col[0]
elif value_var is not None:
value_col = value_var
return pd.crosstab(
index = indices_input,
columns = cols_input,
values = data[value_col],
aggfunc = lambda x: x
)
def determine_type(actual, pred, p_thres):
'''(np.array, np.array, float) -> np.array
Determine classification types ('tpn', 'fp', or 'fn') using
actual, pred, and p_thres.
'''
classified = pred > p_thres
result = np.array(list(map(classifier, actual, classified)))
return result
def dist_to_point(X, point):
'''(pd.DataFrame or np.array, np.array) -> float or np.array
Precondition: X.shape[1] == len(point)
Calculate the distance from each row of X to the point.
'''
X = X.values if 'pandas' in str(type(X)) else X
return np.array(list(map(lambda row: np.linalg.norm(row - point), X)))
def dpmf(x, pmf_vec, support_vec = None):
'''(object or *iterable, *iterable[, *iterable]) -> number or np.array
Preconditions:
1. Elements of x are of the same type as elements of support_vec,
if support_vec is specified. If support_vec is not specified, then
x must be a number or an iterable object with numeric elements.
2. sum(pmf_vec) == 1
3. len(pmf_vec) == len(support_vec) if support_vec is specified.
4. If support_vec is specified, then each element of support_vec
must be hashable, i.e. element.__hash__ is not None
Return the probability evaluated at each element of x based on
probabilities in pmf_vec and elements of support_vec if support_vec
is specified (each element of support_vec is the input that corresponds
to the probability in pmf_vec). If not specified, then support_vec will
be replaced with np.arange(0, len(pmf_vec)).
>>> # Example 1
>>> pmf_eg1 = [0.25, 0.5 , 0.25]
>>> support_eg1 = np.array([1, 2, 4])
>>> dpmf(1, pmf_eg1, support_eg1)
0.25
>>> dpmf([3, 4, 6], pmf_eg1, support_eg1)
array([0. , 0.25, 0. ])
>>> dpmf(np.array([3, 4, 6]), pmf_eg1, support_eg1)
array([0. , 0.25, 0. ])
>>>
>>> # Example 2
>>> pmf_eg2 = (.25, .4, .35)
>>> support_eg2 = ['apple', 'orange', 'neither']
>>> dfruit = lambda x: dpmf(x, pmf_eg2, support_eg2)
>>> dfruit(['apple', 'neither'])
array([0.25, 0.35])
>>> dfruit('orange')
0.4
>>> dfruit(np.array(['orange', 'hello']))
array([0.4, 0. ])
'''
M = len(pmf_vec)
if support_vec is None:
support_vec = np.arange(0, M)
D = {}
for i in range(len(support_vec)):
D[support_vec[i]] = pmf_vec[i]
finder = lambda d: D[d] if d in D.keys() else 0
if hasattr(x, '__iter__'):
if type(x) == str:
return finder(x)
return npmap(finder, x)
return finder(x)
def fft_curve(tt, yy, only_sin = False):
'''(array-like, array-like, bool) -> {str: number, lambda, or tuple}
Estimate sin + cos curve of yy through the input time sequence tt,
and return fitting parameters "amp", "omega", "phase", "offset",
"freq", "period", and "fitfunc". Set only_sin = True to fit only a
sine curve.
Reference: https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
'''
tt = np.array(tt)
yy = np.array(yy)
assert len(set(np.diff(tt))) == 1, \
'tt does not have an uniform spacing.'
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
# excluding the zero frequency "peak", which is related to offset
guess_freq = abs(ff[np.argmax(Fyy[1:]) + 1])
guess_amp = np.std(yy) * 2. ** 0.5
guess_offset = np.mean(yy)
guess = [
guess_amp,
2. * np.pi * guess_freq,
0.,
guess_offset
]
if only_sin:
guess = np.array(guess)
popt, pcov = curve_fit(sinfunc, tt, yy, p0 = guess)
A_1, w_1, p_1, c = popt
fitfunc = lambda t: sinfunc(t, A_1, w_1, p_1, c)
A_2, w_2, p_2 = 0, 0, 0
else:
guess.extend([
guess_amp,
2. * np.pi * guess_freq,
0.
])
guess = np.array(guess) / 2
popt, pcov = curve_fit(curve, tt, yy, p0 = guess)
A_1, w_1, p_1, c, A_2, w_2, p_2 = popt
fitfunc = lambda t: curve(t, A_1, w_1, p_1, c, A_2, w_2, p_2)
return {
"amp": [A_1, A_2],
"omega": [w_1, w_2],
"phase": [p_1, p_2],
"offset": c,
"fitfunc": fitfunc,
"maxcov": np.max(pcov),
"rawres": (guess, popt, pcov)
}
def fusion_estimates(y, lambd, theta = None, max_iter = 1000, eps = 1e-05):
'''(np.array, number[, np.array, int, number]) ->
{str: np.array or number}
Preconditions:
1. len(y) == len(theta) if theta specified.
2. lambd > 0 and eps > 0
3. max_iter > 1
Calculate the fusion estimates theta_i's in y_i = theta_i + error_i.
Return the dictionary that stores:
- 'theta', the fusion estimates of y iterated from theta with the
maximum iteration max_iter and the cost difference threshold eps.
- 'phi', the differences of each 'theta'
- 'lambd', the lambd specified
- 'iteration', the number of iterations, and
- 'costs', the cost function evaluated at each iteration where the
first cost is calculated at iteration 0.
See https://joon3216.github.io/research_materials/2018/non_separable_penalty
for details.
'''
n = len(y)
if theta is None:
theta = y.copy()
phi = np.diff(theta)
phisums_old = np.cumsum(phi)
theta_1_new = (sum(y) - sum(phisums_old)) / n
cost = sum((y - theta) ** 2) + lambd * sum(abs(phi))
costs = []
costs.append(cost)
there_is_a_progress = True
iteration = 0
while there_is_a_progress and iteration < max_iter:
phi_new = np.zeros(n)
for j in range(1, n):
phisums_new = np.cumsum(phi_new)
req = sum(
phisums_old[(j - 1):(n - 1)] -\
phisums_old[j - 1] + phisums_new[j - 1]
)
discri = sum(y[j:n]) - (n - (j + 1) + 1) * theta_1_new - req
if discri < -lambd / 2:
phi_new[j] = (discri + lambd / 2) / (n - (j + 1) + 1)
elif discri > lambd / 2:
phi_new[j] = (discri - lambd / 2) / (n - (j + 1) + 1)
phi_new = phi_new[1:]
phisums_new = phisums_new[1:]
theta = np.append(theta_1_new, theta_1_new + phisums_new)
cost = sum((y - theta) ** 2) + lambd * sum(abs(phi_new))
theta_1_new = (sum(y) - sum(phisums_new)) / n
phisums_old = phisums_new
iteration += 1
costs.append(cost)
there_is_a_progress = not (abs(costs[iteration - 1] - cost) <= eps)
return {
'theta': theta,
'phi': phi_new,
'lambd': lambd,
'iteration': iteration,
'costs': np.array(costs)
}
def gauss_seidel(y, B = None, theta = None, lambd = None, max_iter = 50,
eps = 1e-08):
'''(1d-array[, 2d-array, 1d-array, float, int, float]) ->
{str: np.array and str: number}
Preconditions:
1. If B is None, then lambd must not be None and lambd > 0, as well as
len(y) >= 5.
2. If B is not None, then B must be either strictly diagonally
dominant, symmetric positive definite, or both.
3. If theta is not None, then len(y) == len(theta).
4. eps > 0
5. max_iter >= 1
Approximate theta that solves the linear equation y = B @ theta,
where len(y) == n and B is n-by-n, using the Gauss-Seidel method.
If B is specified, then lambd is ignored; if B is not specified,
then lambd must be positive and be specified since the following
B will be used in the equation:
>>> n = len(y) # must be at least 5
>>> B_lambd = np.zeros(n ** 2).reshape(n, n)
>>> B_lambd[0, [0, 1, 2]] = [1, -2, 1]
>>> B_lambd[1, [0, 1, 2, 3]] = [-2, 5, -4, 1]
>>> for j in range(2, n - 2):
... B_lambd[j, [j - 2, j - 1, j, j + 1, j + 2]] = [1, -4, 6, -4, 1]
...
>>> B_lambd[n - 2, [-4, -3, -2, -1]] = [1, -4, 5, -2]
>>> B_lambd[n - 1, [-3, -2, -1]] = [1, -2, 1]
>>> B_lambd = lambd * B_lambd
>>> B = B_lambd + np.identity(n)
If theta is None, then the initial guess starts with theta = y.
'''
assert eps > 0, 'eps must be positive. Current value: ' + str(eps)
max_iter = int(max_iter)
assert max_iter >= 1, \
'max_iter must be at least 1. Current value: ' + str(max_iter)
y = np.array(y)
n = len(y)
if B is None:
msg = 'If B is None, then lambd must be '
assert lambd is not None, msg + 'specified.'
assert lambd > 0, msg + 'positive. Current lambd == ' + str(lambd)
assert n >= 5, \
'If B is None, then len(y) must be at least 5. ' +\
'Currently, len(y) == ' + str(n) + '.'
B_lambd = np.zeros(n ** 2).reshape(n, n)
B_lambd[0, [0, 1, 2]] = [1, -2, 1]
B_lambd[1, [0, 1, 2, 3]] = [-2, 5, -4, 1]
for j in range(2, n - 2):
B_lambd[j, [j - 2, j - 1, j, j + 1, j + 2]] = [1, -4, 6, -4, 1]
B_lambd[n - 2, [-4, -3, -2, -1]] = [1, -4, 5, -2]
B_lambd[n - 1, [-3, -2, -1]] = [1, -2, 1]
B_lambd = lambd * B_lambd
B = B_lambd + np.identity(n)
else:
B = np.array(B).copy()
assert B.shape == (n, n), \
'B.shape == {0}, not {1}'.format(B.shape, (n, n))
if (abs(B).sum(axis = 0) - 2 * abs(B).diagonal() < 0).all():
pass
elif (abs(B).sum(axis = 1) - 2 * abs(B).diagonal() < 0).all():
pass
else:
msg2 =\
'B given is neither strictly diagonally dominant ' +\
'nor symmetric positive definite.'
if (B.T == B).all():
try:
np.linalg.cholesky(B)
except:
raise ValueError(msg2)
else:
raise ValueError(msg2)
LD = np.tril(B)
U = B - LD
if theta is None:
theta = y.copy()
else:
theta = np.array(theta)
assert len(y) == len(theta), \
'If the initial theta is specified, then the length ' +\
'of theta must be the same as y. Currently, ' +\
'len(y) == {0} != {1} == len(theta)'.format(len(y), len(theta))
iteration = 0
errors = [np.linalg.norm(B @ theta - y)]
no_conv = True
while no_conv:
theta = np.linalg.inv(LD) @ (y - (U @ theta))
errors.append(np.linalg.norm(B @ theta - y))
iteration += 1
if errors[-1] < eps or iteration == max_iter:
no_conv = False
errors = np.array(errors)
return {
'theta': theta,
'lambd': lambd,
'iteration': iteration,
'errors': errors
}
def get_p_thres(roc_tbl, criterion = None):
'''(returning pd.DataFrame of produce_roc_table[, [str, number]])
-> float
Precondition: criterion in [('tpr', x), ('fpr', y)]
for some 0 < x < 1 and 0 < y < 1 (criterion need not be a tuple).
Return the probability threshold from roc_tbl based on criterion.
By default, the function returns the threshold that yields the
minimum distance from the roc curve to the point (fpr, tpr) = (0, 1).
If criterion == ('tpr', x) for some 0 < x < 1, then it returns a
probability threshold that achieves the true positive rate of at
least x and has the minimum false positive rate;
if criterion == ('fpr', y) for some 0 < y < 1, then it returns a
probability threshold that achieves the false positive rate of at
most y and has the maximum true positive rate.
'''
if criterion is None:
dtp = roc_tbl['dist_to_optimal_point']
p_thres = roc_tbl\
.loc[lambda x: x['dist_to_optimal_point'] == np.min(dtp)]\
['thresholds']\
.values[0]
else:
msg = 'If criterion is specified, '
assert len(criterion) == 2, \
msg + 'the length of criterion must be 2, not ' +\
str(len(criterion)) + '.'
assert type(criterion) != str, \
msg + 'then it must be an array-like object, not a string.'
assert criterion[0] in ['fpr', 'tpr'], \
msg + 'then the first element must be exactly one of ' +\
'"fpr" or "tpr", not ' + str(criterion[0]) + '.'
type1 = str(type(criterion[1]))
assert 'float' in type1 or 'int' in type1, \
msg + 'then the second element must be a number, not ' +\
type1 + '.'
assert 0 < criterion[1] < 1, \
msg + 'then the second element must be a number on the ' +\
'interval (0, 1), not ' + str(criterion[1]) + '.'
if criterion[0] == 'tpr':
# Optimal p_thres is values[0], but it sometimes does not
# result in a desired tpr. This is because produce_roc_table()
# uses sklearn roc_curve with drop_intermediate = True, and
# a very small change (around a scale of 1e-09) in the
# threshold affects tpr. values[1] is less optimal, but always
# achieves the desired tpr.
p_thres = roc_tbl\
.loc[lambda x: x['tpr'] >= criterion[1]]\
['thresholds']\
.values[1]
else:
# Optimal p_thres is values[-1], but values[-2] is used
# by the same reasoning as above.
p_thres = roc_tbl\
.loc[lambda x: x['fpr'] <= criterion[1]]\
['thresholds']\
.values[-2]
return p_thres
def get_response(mod):
'''(sm.GLMResultsWrapper) -> str
Get the name of response column of mod.
'''
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
return response[14:].strip()
def hsm(x, tau = .5):
'''(pd.Series, float) -> float
Precondition: 0 < tau < 1
Estimate the mode of x by the half sample mode method.
'''
n = len(x)
x = x.sort_values()
m = int(np.ceil(tau * n)) if tau <= .5 else int(np.floor(tau * n))
m1 = int(m - 1)
x2 = x[(m - 1):n]
x1 = x[0:(n - m1)]
k = np.arange(1, n - m1 + 1)
k = k[x2.values - x1.values == min(x2.values - x1.values)]
k = np.random.choice(k, 1)[0] if len(k) > 1 else k[0]
x = x[int(k - 1):int(k + m1)]
r = x.mean() if len(x) <= 2 else hsm(x, tau = tau)
return r
def impute_em(X, max_iter = 3000, eps = 1e-08):
'''(np.array, int, number) -> {str: np.array or int}
Precondition: max_iter >= 1 and eps > 0
Return the dictionary with five keys where:
- Key 'mu' stores the mean estimate of the imputed data.
- Key 'Sigma' stores the variance estimate of the imputed data.
- Key 'X_imputed' stores the imputed data that is mutated from X using
the EM algorithm.
- Key 'C' stores the np.array that specifies the original missing
entries of X.
- Key 'iteration' stores the number of iteration used to compute
'X_imputed' based on max_iter and eps specified.
'''
nr, nc = X.shape
C = np.isnan(X) == False
# Collect M_i and O_i's
one_to_nc = np.arange(1, nc + 1, step = 1)
M = one_to_nc * (C == False) - 1
O = one_to_nc * C - 1
# Generate Mu_0 and Sigma_0
Mu = np.nanmean(X, axis = 0)
observed_rows = np.where(np.isnan(sum(X.T)) == False)[0]
S = np.cov(X[observed_rows, ].T)
if np.isnan(S).any():
S = np.diag(np.nanvar(X, axis = 0))
# Start updating
Mu_tilde, S_tilde = {}, {}
X_tilde = X.copy()
no_conv = True
iteration = 0
while no_conv and iteration < max_iter:
for i in range(nr):
S_tilde[i] = np.zeros(nc ** 2).reshape(nc, nc)
if set(O[i, ]) != set(one_to_nc - 1): # missing vals exist
M_i, O_i = M[i, ][M[i, ] != -1], O[i, ][O[i, ] != -1]
S_MM = S[np.ix_(M_i, M_i)]
S_MO = S[np.ix_(M_i, O_i)]
S_OM = S_MO.T
S_OO = S[np.ix_(O_i, O_i)]
Mu_tilde[i] = Mu[np.ix_(M_i)] +\
S_MO @ np.linalg.inv(S_OO) @\
(X_tilde[i, O_i] - Mu[np.ix_(O_i)])
X_tilde[i, M_i] = Mu_tilde[i]
S_MM_O = S_MM - S_MO @ np.linalg.inv(S_OO) @ S_OM
S_tilde[i][np.ix_(M_i, M_i)] = S_MM_O
Mu_new = np.mean(X_tilde, axis = 0)
S_new = np.cov(X_tilde.T, bias = 1) +\
reduce(np.add, S_tilde.values()) / nr
no_conv =\
np.linalg.norm(Mu - Mu_new) >= eps or\
np.linalg.norm(S - S_new, ord = 2) >= eps
Mu = Mu_new
S = S_new
iteration += 1
return {
'mu': Mu,
'Sigma': S,
'X_imputed': X_tilde,
'C': C,
'iteration': iteration
}
def kde(x, samples, **kwargs):
'''(float or *iterable, *iterable[, arguments of KDEUnivariate])
-> np.array
Return the value of kernel density estimate evaluated at x. kde is
fitted using samples.
'''
dens = sm.nonparametric.KDEUnivariate(samples)
dens.fit(**kwargs)
return dens.evaluate(x)
def kde_mult(X, samples, **kwargs):
'''(*iterable, *iterable[, arguments of KDEMultivariate]) -> np.array
Precondition: number of columns of X == number of columns of samples
Return the value of multidimensional kde evaluated at each row of X.
kde is fitted using samples.
'''
vt = 'c' * X.shape[1]
dens_M = sm.nonparametric.KDEMultivariate(
samples, var_type = vt, **kwargs
)
return dens_M.pdf(X)
def logarithmic_scoring(mod, data, get_sum = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, bool]) -> float
Return the logarithmic scoring of mod onto the data, computed as
y * log(phat) + (1 - y) * log(1 - phat). The higher, the better.
Set get_sum = True to get
sum(y * log(phat) + (1 - y) * log(1 - phat)) instead of a vector.
'''
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
response = response[14:].strip()
assert response in data.columns, \
'response "' + response + '" does not exist in data. Needs one.'
features = list(mod.conf_int().index)
ys = data[response].values
phats = mod.predict(data[features]).values
result = ys * np.log(phats) + (1 - ys) * np.log(1 - phats)
return sum(result) if get_sum else result
def plot_lm(mod, mfrow = (2, 2), hspace = .5, wspace = .3):
'''(sm.RegressionResultsWrapper[, (int, int), float, float]) -> None
Preconditions:
1. mfrow[0] * mfrow[1] == 4
2. len(mfrow) == 2
Plot the following plots of mod in the shape of mfrow, in this order:
* Residuals vs. Fitted plot
* Normal Q-Q plot
* Scale-Location plot
* Residuals vs. Leverage plot
Specify hspace and wspace (arguments of fig.subplots_adjust() where
fig = plt.figure()) to adjust margins between subplots.
'''
fig = plt.figure()
plot_funcs = [plot_rf, plot_qq, plot_sl, plot_rlev]
i = 0
for func in plot_funcs:
i += 1
plt.subplot(mfrow[0], mfrow[1], i)
func(mod)
fig.subplots_adjust(hspace = hspace, wspace = wspace)
plt.show()
def plot_op(mod, response, num_breaks = None, breaks = None,
xlab = 'Predicted Probability',
ylab = 'Observed Proportion'):
'''(sm.GLMResultsWrapper, array-like[, int, np.array, str, str])
-> None
Plot the grouped observed proportions vs. predicted probabilities
of mod that used `response` argument as the reponse.
Specify `num_breaks` to divide linear predictors into that much of
intervals of equal length.
Specify `breaks` to have different bins for linear predictors;
`num_breaks` is ignored if `breaks` is specified.
'''
logit = lambda p: np.log(p / (1 - p))
predprob = mod.predict()
linpred = logit(predprob)
if breaks is None:
if num_breaks is None:
num_breaks = int(len(response) / 50)
breaks = np.unique(
np.quantile(linpred, np.linspace(0, 1, num = num_breaks + 1))
)
bins = pd.cut(linpred, breaks)
df =\
pd.DataFrame({
'y': response,
'count': 1,
'predprob': predprob,
'bins': bins
})\
.groupby('bins')\
.agg(
y = ('y', 'sum'),
counts = ('count', 'sum'),
ppred = ('predprob', 'mean')
)\
.dropna()
df['se_fit'] = np.sqrt(df['ppred'] * (1 - df['ppred']) / df['counts'])
df['ymin'] = df['y'] / df['counts'] - 2 * df['se_fit']
df['ymax'] = df['y'] / df['counts'] + 2 * df['se_fit']
x = np.linspace(min(df['ppred']), max(df['ppred']))
plt.scatter(df['ppred'], df['y'] / df['counts'])
plt.vlines(
df['ppred'], df['ymin'], df['ymax'],
alpha = .3, color = '#1F77B4'
)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.plot(x, x, color = '#FF7F0E', alpha = .4)
plt.show()
def plot_qq(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a QQ-plot of mod. Numbers in the plot indicate outliers. For
example, if `17` is plotted besides a point, then it means that the
observation at index 17, or the 18th observation, of the training data
is considered a possible outlier.
'''
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
arrays = stats.probplot(rstandard, dist = 'norm')
theoretical_q, sorted_rstandard = arrays[0]
slope, intercept, r = arrays[1]
rstandard2 = list(enumerate(rstandard))
rstandard2.sort(key = lambda x: x[1])
rstandard2 = np.array(rstandard2)
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard2)
outliers = np.array(list(outliers))
dat = np.c_[rstandard2, theoretical_q, outliers]
x = np.linspace(min(theoretical_q), max(theoretical_q))
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(
x, slope * x + intercept, linestyle = 'dashed', color = 'grey'
)
plt.title('Normal Q-Q')
plt.xlabel('Theoretical quantiles')
plt.ylabel('Standardized residuals')
dat2 = list(filter(lambda row: row[-1] == 1, dat))
for item in dat2:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def plot_rf(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Residual vs. Fitted plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of
the training data is considered a possible outlier.
'''
residuals = mod.resid
fitted = mod.predict()
lowess_line = sm.nonparametric.lowess(residuals, fitted)
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
dat = np.c_[rstandard, fitted, residuals, outliers]
outlier_ids = dat[dat[:, -1] == 1]
x = np.linspace(min(fitted), max(fitted))
plt.scatter(fitted, residuals)
plt.plot(lowess_line[:, 0], lowess_line[:, 1], color = 'red')
plt.plot(x, np.zeros(len(x)), linestyle = 'dashed', color = 'grey')
plt.title('Residuals vs. Fitted')
plt.xlabel('Fitted values')
plt.ylabel('Residuals')
for item in outlier_ids:
plt.text(item[2], item[3], str(int(item[0])))
plt.show()
def plot_rlev(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Residuals vs. Leverage plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of the
training data is considered a possible outlier.
'''
influence = mod.get_influence()
leverage = influence.hat_matrix_diag
# cooks_d = influence.cooks_distance
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
dat = np.c_[rstandard, leverage, outliers]#, cooks_d[0]]
outlier_ids = dat[dat[:, -1] == 1]
x = np.linspace(0, max(leverage))
y = np.linspace(min(rstandard[:, 1]), max(rstandard[:, 1]))
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(x, np.zeros(len(x)), linestyle = 'dashed', color = 'grey')
plt.plot(np.zeros(len(y)), y, linestyle = 'dashed', color = 'grey')
plt.title('Residuals vs. Leverage')
plt.xlabel('Leverage')
plt.ylabel('Standardized residuals')
for item in outlier_ids:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def plot_sl(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Scale-Location plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of the
training data is considered a possible outlier.
'''
fitted = mod.predict()
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
rstandard[:, 1] = abs(rstandard[:, 1]) ** .5
dat = np.c_[rstandard, fitted, outliers] # id, resid, fitted, outliers
lowess_line = sm.nonparametric.lowess(dat[:, 1], dat[:, 2])
outlier_ids = dat[dat[:, -1] == 1]
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(lowess_line[:, 0], lowess_line[:, 1], color = 'red')
plt.title('Scale-Location')
plt.xlabel('Fitted values')
plt.ylabel(r'$\sqrt{|Standardized\/\/residuals|}$')
for item in outlier_ids:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def produce_roc_table(mod, train):
'''(sm.GLMResultsWrapper, pd.DataFrame) -> pd.DataFrame
Remarks:
1. train must be the data that is used to fit mod.
2. Regardless of whether response is specified or not, train
must contain the endogenous variable used to fit mod:
+ 2.1. If response is None, then the function assumes that
train has Dep. Variable specified in mod.summary() with
exactly the same name.
+ 2.2. If response is specified, then the function assumes that
the endogenous variable with the same name as the
specified response value is one of the columns of train,
and is used to fit mod.
Return DataFrame that contains informations of fpr, tpr, and the
corresponding probability thresholds based on mod and train.
'''
response = get_response(mod)
actuals_train = train[response]
preds_train = mod.predict()
fpr, tpr, threses = roc_curve(actuals_train, preds_train)
roc_tbl = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'thresholds': threses})
dtp = dist_to_point(roc_tbl[['fpr', 'tpr']], np.array([0, 1]))
roc_tbl['dist_to_optimal_point'] = dtp
return roc_tbl
def random_word(n, type = 'alpha'):
'''(int, str) -> str
Precondition: type in ['alnum', 'alpha', 'lower', 'numeric', 'upper']
Return a random combination of characters of length n and of
type `type`:
* 'alnum': lower-case alphabets, capitals, and integers
* 'alpha': lower-case alphabets and capitals
* 'lower': lower-case alphabets
* 'numeric': integers
* 'upper': capitals
'''
assert type in ['alnum', 'alpha', 'lower', 'numeric', 'upper'], \
"type must be one of 'alnum', 'alpha', 'lower', 'numeric', or " +\
"'upper', not " + str(type) + "."
alphabets_upper = [
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
]
alphabets_lower = list(map(lambda x: x.lower(), alphabets_upper))
integers = list(map(str, range(10)))
support =\
alphabets_upper + alphabets_lower + integers if type == 'alnum' \
else alphabets_upper + alphabets_lower if type == 'alpha' \
else alphabets_lower if type == 'lower' \
else integers if type == 'numeric' \
else alphabets_upper
return ''.join(rpmf(n, dchar, support))
def rpmf(n, pmf, support, **kwargs):
'''(int, function, *iterable[, **kwargs]) -> np.array
Precondition:
1. n >= 1
2. support is the support of pmf.
Return n random samples from the specified pmf with support 'support'
and additional arguments of pmf in **kwargs if required. Since this
function uses **kwargs, any additional arguments of pmf you want to
specify must be named.
>>> # Example 1: dX
>>> np.random.seed(1024)
>>> rpmf(n = 20, pmf = dX, support = np.arange(0, 6))
array([5, 5, 5, 5, 5, 5, 1, 0, 1, 5, 5, 5, 5, 3, 5, 5, 5, 2, 5, 1])
>>>
>>> # Example 2: S_Y = Y_1 + ... + Y_N
>>> np.random.seed(1024)
>>> # recall dY in csum_N_pois example
>>> result_S_Y = csum_N_pois(dY, np.arange(0, 5), 3)
>>> result_S_Y = result_S_Y / sum(result_S_Y)
>>> M_S_Y = len(result_S_Y)
>>> rpmf(10, dpmf, np.arange(0, M_S_Y), pmf_vec = result_S_Y)
array([ 8, 22, 6, 8, 7, 9, 2, 0, 2, 9])
>>>
>>> # Example 3: dfruit in dpmf example
>>> np.random.seed(2048)
>>> rpmf(7, dfruit, ['apple', 'orange', 'neither'])
array(['orange', 'apple', 'neither', 'neither', 'neither', 'orange',
'apple'], dtype='<U7')
'''
cmf_vec = np.append(0, np.cumsum(pmf(support, **kwargs)))
unif_01 = np.random.random(n)
result = []
for k in range(n):
for j in range(len(cmf_vec) - 1):
if unif_01[k] >= cmf_vec[j] and unif_01[k] < cmf_vec[j + 1]:
result.append(support[j])
return np.array(result)
# In development
def anova(*args):
'''(sm.GLMResultsWrappers) -> pd.DataFrame
Return the LRT results of models given to *args. If more than two
models are given, then sequential LRT results are returned.
'''
result = {
'Resid. Df': [],
'Resid. Dev': [],
'Df': [''],
'Deviance': [''],
'Pr(>Chi)': ['']
}
models = [*args]
responses = []
fmlrs = []
assert len(models) != 1, \
'Functionality not yet available for only one model; ' +\
'need at least two.'
if len(models) > 1:
for mod in models:
result['Resid. Df'].append(mod.df_resid)
result['Resid. Dev'].append(mod.deviance)
mod_pairs =\
[tuple(models[i:(i + 2)]) for i in range(len(models) - 1)]
for mod0, mod1 in mod_pairs:
result['Df'].append(mod0.df_resid - mod1.df_resid)
result['Deviance'].append(mod0.deviance - mod1.deviance)
result['Pr(>Chi)'].append(
1 - chi2.cdf(
mod0.deviance - mod1.deviance,
df = mod0.df_resid - mod1.df_resid
)
)
else:
pass # for now
return pd.DataFrame(result)
def classify_terbin(mod_terbin, data):
'''(return value of terbin_model(), pd.DataFrame)
-> {str: np.array and/or str: pd.DataFrame}
Compute the probability for each observations of data, and classify
according to mod_terbin.
'''
# Check: does data have all features of mod_ternary and mod_binary?
data_cols = data.columns
ter_features = mod_terbin['mod_ternary'][2].columns
bin_response = mod_terbin['mod_binary'][1].name
bin_features = mod_terbin['mod_binary'][2].columns
assert set(ter_features).issubset(set(data_cols)), \
'data does not have all the features of mod_ternary. ' +\
'The following are missing: ' +\
str(list(set(ter_features).difference(set(data_cols))))
assert set(bin_features).issubset(set(data_cols)), \
'data does not have all the features of mod_binary. ' +\
'The following are missing: ' +\
str(list(set(bin_features).difference(set(data_cols))))
# Check: does data have a binary response column?
# If no, just return the classification result.
# If yes, then return classification result and case counts
data_has_bin_response = bin_response in data.columns
# Predict types: fn, fp, or tpn
types = Pipe(lambda row: ['fn', 'fp', 'tpn'][np.argmax(row)])\
.pipe(
map,
mod_terbin['mod_ternary'][0]\
.predict(data[ter_features])\
.values
)\
.pipe(list)\
.pipe(np.array)\
.collect()
# Predict probabilities
probs = mod_terbin['mod_binary'][0].predict(data[bin_features]).values
# Classify using different probability thresholds
types_probs = np.array(list(zip(types, probs)))
p_threses = {
'fn': mod_terbin['p_threses'][0],
'tpn': mod_terbin['p_threses'][1],
'fp': mod_terbin['p_threses'][2]
}
result = np.array(list(map(
lambda row: float(row[1]) > p_threses[row[0]],
types_probs
)))
result = np.array(list(map(int, result)))
if not data_has_bin_response:
return {
'predicted_types': types,
'result': result,
'p_threses': mod_terbin['p_threses']
}
else:
actuals = data[bin_response].values
total_neg = np.sum(actuals == 0)
total_pos = len(actuals) - total_neg
tn = sum((actuals == 0) & (result == 0))
fp = total_neg - tn
tp = sum((actuals == 1) & (result == 1))
fn = total_pos - tp
case_counts = pd.DataFrame({
'class': [0, 0, 1, 1],
'classified': [0, 1, 0, 1],
'class_total': [total_neg, total_neg, total_pos, total_pos],
'counts': [tn, fp, fn, tp]
})
case_counts['perc'] =\
np.array([tn, fp, fn, tp]) /\
np.array([total_neg, total_neg, total_pos, total_pos])
accuracy = (tp + tn) / (total_pos + total_neg)
return {
'predicted_types': types,
'result': result,
'counts': case_counts,
'accuracy': accuracy,
'p_threses': mod_terbin['p_threses']
}
def count_cases(mod, data, train = None, p_thres = None, criterion = None):
'''(sm.GLMResultsWrapper or return value of terbin_model(),
pd.DataFrame,
[pd.DataFrame , float, [str, number]])
-> pd.DataFrame
Precondition:
1. response of mod consists of 0s and 1s.
2. data contains the response column specified by mod
3. data contains all or more feature columns of mod, including the
intercept if applicable.
4. train must be specified if mod is of class GLMResultsWrapper
5. 0 < p_thres < 1
Count the number of true negatives, false positives, false negatives,
and true positives in data classified by mod and p_thres; train must
be the dataset that is used to fit mod. If p_thres is None, then
it uses the probability threshold that yields the minimum distance
between the ROC curve and the point (fpr, tpr) = (0, 1); if p_thres is
specified, then criterion (used as an argument of get_p_thres()) is
ignored. If mod is not of class sm.GLMResultsWrapper, then every
argument except mod and data are ignored.
'''
if 'GLMResultsWrapper' in str(type(mod)):
assert train is not None, \
'If a given mod is of class GLMResultsWrapper, then ' +\
'train must be specified.'
# Get the (binary) response column; len('Dep. Variable') == 14
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
response = response[14:].strip()
# Checks
all_features_of_data = set(data.columns)
assert response in all_features_of_data, \
'data does not have the response: "' + response + '".'
all_features_of_data.remove(response) # leave only predictors
mod_features = mod.cov_params().columns
mod_features_set = set(mod_features)
assert mod_features_set.issubset(all_features_of_data), \
'data does not have all the features used in mod; data ' +\
'requires the following: {0}'\
.format(
list(mod_features_set.difference(all_features_of_data))
)
mod_features = list(mod_features)
# Compute p_thres if not specified
actuals = data[response].values
preds = mod.predict(data[mod_features]).values
if p_thres is None: # p_thres must come from train, not data
roc_tbl = produce_roc_table(mod, train, response)
p_thres = get_p_thres(roc_tbl, criterion)
classifieds = preds > p_thres
classifieds = np.array(list(map(int, classifieds)))
# Binary classification result
total_neg = np.sum(actuals == 0)
total_pos = len(actuals) - total_neg
tn = sum((actuals == 0) & (classifieds == 0))
fp = total_neg - tn
tp = sum((actuals == 1) & (classifieds == 1))
fn = total_pos - tp
result = pd.DataFrame({
'class': [0, 0, 1, 1],
'classified': [0, 1, 0, 1],
'class_total': [total_neg, total_neg, total_pos, total_pos],
'counts': [tn, fp, fn, tp]
})
result['perc'] =\
np.array([tn, fp, fn, tp]) /\
np.array([total_neg, total_neg, total_pos, total_pos])
accuracy = (tp + tn) / (total_pos + total_neg)
return {
'counts': result,
'accuracy': accuracy,
'p_thres': p_thres
}
else:
result = classify_terbin(mod, data)
del result['result']
return result
def drop1(mod, train, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame) -> pd.DataFrame
Conduct a LRT of mod minus one feature vs. mod for every feature
used in mod, trained by train.
'''
response = get_response(mod)
assert response in train.columns, \
'response "' + response + '" does not exist in train. Needs one.'
int_name = ''
all_features = list(mod.conf_int().index)
for col in all_features:
if (train[col] == 1).all():
int_name += col
break
assert int_name != '', \
'An intercept column does not exist in train. Needs one.'
all_features_minus_int = all_features[:]
all_features_minus_int.remove(int_name)
result = {
'Removed': ['<none>'],
'Df': [''],
'Deviance': [mod.deviance],
'AIC': [mod.aic],
'LRT': [''],
'Pr(>Chi)': [''],
'': ['']
}
for item in all_features_minus_int:
afmi = all_features_minus_int[:]
afmi.remove(item)
if show_progress:
print('LRT: mod - {0} vs. mod'.format(item))
mod_minus1_features = [int_name] + afmi
mod_1dropped = sm.GLM(
train[response],
train[mod_minus1_features],
family = sm.families.Binomial()
)\
.fit()
aov = anova(mod_1dropped, mod)
result['Removed'].append(item)
result['Df'].append(aov['Df'][1])
result['Deviance'].append(aov['Resid. Dev'][0])
result['AIC'].append(mod_1dropped.aic)
result['LRT'].append(aov['Deviance'][1])
p_val = aov['Pr(>Chi)'][1]
result['Pr(>Chi)'].append(p_val)
sig = ''
if p_val <= .001:
sig += '***'
elif p_val <= .01:
sig += '** '
elif p_val <= .05:
sig += '* '
elif p_val <= .1:
sig += '. '
result[''].append(sig)
return pd.DataFrame(result)
def model_by_lrt(mod, train, pval_thres = .05, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, float, bool])
-> sm.GLMResultsWrapper
Precondition: 0 < pval_thres < 1
Sequentially remove a feature that has a maximum p-value from
drop1(mod, train), trained by train, until every feature has a
p-value less that pval_thres. Return sm.GLMResultsWrapper object
that only contains such features. Set show_progress = True to see
the removal process.
'''
assert 0 < pval_thres < 1, \
'pval_thres argument must be between 0 and 1, not ' +\
str(pval_thres) + '.'
response = get_response(mod)
assert response in train.columns, \
'response "' + response + '" does not exist in train. Needs one.'
features = list(mod.conf_int().index)
drop1_result = drop1(mod, train, show_progress)
not_all_less_than_thres =\
not (drop1_result.iloc[1:, :]['Pr(>Chi)'] < pval_thres).all()
if not not_all_less_than_thres:
return mod
i = 0
while not_all_less_than_thres:
i += 1
ordered = drop1_result.iloc[1:, :]\
.sort_values('Pr(>Chi)', ascending = False)
to_remove = ordered['Removed'].values[0]
pval_of_removed = ordered['Pr(>Chi)'].values[0]
if show_progress:
msg = 'Iteration {0}: removed {1} (p-val: {2})'
msg = msg.format(i, to_remove, pval_of_removed)
print(msg)
features.remove(to_remove)
mod_new = sm.GLM(
train[response],
train[features],
family = sm.families.Binomial()
)\
.fit()
print(anova(mod_new, mod)) if show_progress else ''
drop1_result =\
drop1(mod_new, train[[response] + features], show_progress)
not_all_less_than_thres =\
not (drop1_result.iloc[1:, :]['Pr(>Chi)'] < pval_thres).all()
return mod_new
def model_by_vif(mod, train, vif_thres = 5, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, float, bool])
-> {str: sm.GLMResultsWrapper and str: {str: float}}
Precondition: vif_thres > 0
Sequentially remove a feature that has a maximum VIF from mod,
trained by train, until every feature has a VIF less than vif_thres.
Return sm.GLMResultsWrapper object that only contains such features.
Set show_progress = True to see the removal process.
'''
assert vif_thres > 0, \
"vif_thres argument must be positive, not " + str(vif_thres) + "."
# Remove response
response = get_response(mod)
all_cols = list(train.columns)
if response in all_cols:
all_cols.remove(response)
X = train.loc[:, all_cols]
else:
X = train
# Let Intercept be the first predictor
int_name = ''
for c in all_cols:
if (X[c].values == 1).all(): # Try to find Intercept
int_name += c
break
if int_name == '': # Intercept column doesn't exist; make one
int_name += 'Intercept'
assert int_name not in X.columns, \
'"Intercept", the column in train that ' +\
'is NOT the column of 1s and yet uses the name ' +\
'"Intercept", already exists in train. User inspection ' +\
'is required.'
X[int_name] = 1
all_cols2 = [int_name]
all_cols2.extend(all_cols)
all_cols = all_cols2
X = X.loc[:, all_cols]
all_cols.remove(int_name)
# X = train minus response
# i.e. X.columns = [Intercept, *features]
# all_cols: train.columns minus response minus Intercept
# i.e. all_cols = [*features]
vifs = dict(zip(
(c for c in all_cols),
(variance_inflation_factor(X.values, j) \
for j in range(1, X.values.shape[1])) # except Intercept
))
not_all_vifs_less_than_thres =\
not (np.array(list(vifs.values())) < vif_thres).all()
i = 0
while not_all_vifs_less_than_thres:
i += 1
current_max = max(vifs.values())
k_to_remove = ''
for k, v in vifs.items():
if v == current_max:
k_to_remove += k
break
v_removed = vifs.pop(k_to_remove) # same as current_max
if show_progress:
msg = 'Iteration {0}: removed {1} (VIF: {2})'\
.format(i, k_to_remove, v_removed)
print(msg)
del X[k_to_remove]
all_cols.remove(k_to_remove)
vifs = dict(zip(
(c for c in all_cols),
(variance_inflation_factor(X.values, j) \
for j in range(1, X.values.shape[1]))
))
not_all_vifs_less_than_thres =\
not (np.array(list(vifs.values())) < vif_thres).all()
features = [int_name]
features.extend(all_cols)
if show_progress:
msg2 = 'Features used: {0}'.format(features)
print(msg2)
mod_reduced =\
sm.GLM(
train[response],
train.loc[:, features],
family = sm.families.Binomial()
)\
.fit()
return {'model': mod_reduced, 'vifs': vifs}
def model_matrix(data, formula):
'''(pd.DataFrame, str) -> pd.DataFrame
Design data according to formula.
'''
name_df =\
lambda df: pd.DataFrame(df, columns = df.design_info.column_names)
response, features = dmatrices(formula, data)
response = name_df(response)
features = name_df(features)
response_name = response.columns[0]
features.insert(0, response_name, response)
return features
def mutate(data, colname, lambd = None, lambd_df = None):
'''(pd.DataFrame, str[, (str, function), function]) -> pd.DataFrame
Add a column named as the value of colname that is obtained by lambd
or lambd_df. lambd is a tuple of str and function that is applied for
each element in a selected Series of df; lambd_df is a function that
applies to the entire df. If lambd is specified, then lambd_df is
ignored.
>>> df = pd.DataFrame({
'basiscol': ['aba', 'bba', 'cce'],
'extra': [1, 2, 3]
})
>>> df1 =\\
... mutate(
... df,
... 'newcolname',
... ('basiscol', lambda x: x[:2])
... )
...
>>> df2 =\\
... mutate(
... df,
... 'newcolname',
... lambd_df = lambda x: x['basiscol'].apply(lambda y: y[:2])
... )
...
>>> df1.equals(df2)
True
'''
df_cp = data.copy()
assert not (lambd is None and lambd_df is None), \
'Either one of lambd or lambd_df has to be specified.'
if lambd is not None:
df_cp[colname] = df_cp[lambd[0]].apply(lambd[1])
elif lambd_df is not None:
df_cp[colname] = lambd_df(df_cp)
return df_cp
def plot_rl(mod, num_breaks = None, breaks = None,
xlab = 'Linear predictor',
ylab = 'Deviance residuals'):
'''(sm.GLMResultsWrapper[, int, np.array, str, str]) -> None
Plot the means of grouped residuals vs. linear predictors of mod.
Specify `num_breaks` to divide linear predictors into that much of
intervals of equal length.
Specify `breaks` to have different bins for linear predictors;
`num_breaks` is ignored if `breaks` is specified.
'''
logit = lambda p: np.log(p / (1 - p))
residuals = mod.resid_deviance
linpred = logit(mod.predict())
if breaks is None:
if num_breaks is None:
num_breaks = int(len(residuals) / 50)
breaks = np.unique(
np.quantile(linpred, np.linspace(0, 1, num = num_breaks + 1))
)
bins = pd.cut(linpred, breaks)
df = pd.DataFrame(
{'residuals': residuals, 'linpred': linpred, 'bins': bins}
)
df = df.groupby('bins')\
.agg(
residuals = ('residuals', 'mean'),
linpred = ('linpred', 'mean')
)
plt.scatter(df['linpred'], df['residuals'])
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def simulate_nan(X, nan_rate):
'''(np.array, number) -> {str: np.array or number}
Preconditions:
1. np.isnan(X_complete).any() == False
2. 0 <= nan_rate <= 1
Return the dictionary with four keys where:
- Key 'X' stores a np.array where some of the entries in X
are replaced with np.nan based on nan_rate specified.
- Key 'C' stores a np.array where each entry is False if the
corresponding entry in the key 'X''s np.array is np.nan, and True
otherwise.
- Key 'nan_rate' stores nan_rate specified.
- Key 'nan_rate_actual' stores the actual proportion of np.nan
in the key 'X''s np.array.
'''
# Create C matrix; entry is False if missing, and True if observed
X_complete = X.copy()
nr, nc = X_complete.shape
C = np.random.random(nr * nc).reshape(nr, nc) > nan_rate
# Check for which i's we have all components become missing
checker = np.where(sum(C.T) == 0)[0]
if len(checker) == 0:
# Every X_i has at least one component that is observed,
# which is what we want
X_complete[C == False] = np.nan
else:
# Otherwise, randomly "revive" some components in such X_i's
for index in checker:
reviving_components = np.random.choice(
nc,
int(np.ceil(nc * np.random.random())),
replace = False
)
C[index, np.ix_(reviving_components)] = True
X_complete[C == False] = np.nan
return {
'X': X_complete,
'C': C,
'nan_rate': nan_rate,
'nan_rate_actual': np.sum(C == False) / (nr * nc)
}
def terbin_model(mod, train, p_thres = None, criterion = None,
ter_features = None, train_ter = None, **kwargs):
'''(sm.GLMResultsWrapper, pd.DataFrame
[, number, (str, float), [str], pd.DataFrame,
arguments to sm.MNLogit.fit(...)])
-> {str: results}
Precondition:
1. mod is fitted using train.
2. train contains the response column specified in mod.summary().
3. 0 < p_thres < 1
4. set(ter_features).issubset(set(train.columns)) if train_ter is None\
else set(ter_features).issubset(set(train_ter.columns))
Fit a compounded model, or a terbin (ternary-binary) model, based on
mod and train.
* If p_thres is None, then it uses the probability threshold that
yields the minimum distance between the ROC curve and the point
(fpr, tpr) = (0, 1); if p_thres is specified, then criterion
(used as an argument of get_p_thres()) is ignored.
* Specify ter_features to fit a multinomial logit model using those
features. If not specified, then the same formula as mod is used.
* If train_ter is specified, then this training set is used to fit a
multinomial logit model. If not specified, then train works as
train_ter.
'''
# Get the (binary) response column; len('Dep. Variable') == 14
response = get_response(mod)
# Checks
all_features_of_train = set(train.columns)
assert response in all_features_of_train, \
'train does not have the response "' + response + '" specified ' +\
'in mod.'
all_features_of_train.remove(response) # leave only predictors
mod_features = mod.cov_params().columns # features used in mod
mod_features_set = set(mod_features)
assert mod_features_set.issubset(all_features_of_train), \
'train does not have all the features used in mod; train ' +\
'requires the following: {0}'\
.format(list(mod_features_set.difference(all_features_of_train)))
mod_features = list(mod_features)
if ter_features is not None:
if train_ter is None:
assert set(ter_features).issubset(set(train.columns)), \
'ter_features must be a subset of train.columns if ' +\
'train_ter is not specified. train.columns requires ' +\
'the following: ' +\
str(list(set(ter_features).difference(set(train.columns))))
else:
assert set(ter_features).issubset(set(train_ter.columns)), \
'ter_features must be a subset of train_ter.columns if ' +\
'both train_features and train_ter are specified. ' +\
'train_ter.columns requires the following: ' +\
str(list(set(ter_features).difference(set(train_ter.columns))))
else:
ter_features = mod_features
train_ter = train if train_ter is None else train_ter
# Compute p_thres if not specified
if p_thres is None:
roc_tbl = produce_roc_table(mod, train)
p_thres = get_p_thres(roc_tbl, criterion)
# Ternary model
actuals = train[response].values
preds = mod.predict(train[mod_features]).values
response_ter = determine_type(actuals, preds, p_thres)
mod_ter =\
sm.MNLogit(response_ter, train_ter[ter_features])\
.fit(**kwargs)
# Get p_thres_fn and p_thres_fp
p_thres_fn = np.quantile(
mod.predict(train.loc[response_ter == 'fn', mod_features]),
.1
)
p_thres_fp = np.quantile(
mod.predict(train.loc[response_ter == 'fp', mod_features]),
.9
)
return {
'mod_ternary': [mod_ter, response_ter, train_ter[ter_features]],
'mod_binary': [mod, train[response], train[mod_features]],
'p_threses': np.array([p_thres_fn, p_thres, p_thres_fp])
}
| 36.553425 | 117 | 0.566272 |
from functools import reduce
from patsy import dmatrices
from scipy.optimize import curve_fit, fmin
from scipy.stats import chi2
from sklearn.metrics import roc_curve
from statsmodels.stats.outliers_influence import variance_inflation_factor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
# Complete
class Interval():
'''A connected 1-d Interval.'''
def __init__(self, start, stop = None):
'''(Interval, args[, number]) -> None
Initialize an Interval.
Possible start:
1) one number num: initializes (-inf, num];
if num == np.inf, then it initializes (-inf, inf).
2) [num0, num1]: initializes [num0, num1] if num0 <= num1;
if num1 == np.inf, then it initializes [num0, inf).
3) (num0, num1): initializes (num0, num1) if num0 <= num1
If both start and end are specified, then it initializes
(start, stop] given start <= stop. If stop == np.inf,
then this initializes (start, inf).
>>> int1 = Interval(.45)
>>> int1
(-inf, 0.45]
>>> int2 = Interval([.96, 1.03])
>>> int2
[.96, 1.03]
>>> int3 = Interval((2.1, 5))
>>> int3
(2.1, 5)
>>> int4 = Interval(2.1, 5)
>>> int4
(2.1, 5]
'''
if stop is None:
if isinstance(start, (float, int)):
ep = int(start) if isinstance(start, bool) else start
self.__lower = -np.inf
self.__upper = ep
self.loweropen = True
self.upperopen = True if ep == np.inf else False
elif isinstance(start, (list, tuple)):
assert len(start) == 2, \
"The length of an argument must be 2, not " +\
str(len(start)) + "."
assert isinstance(start[0], (float, int)) and \
isinstance(start[1], (float, int)), \
'If two endpoints are given, then both points ' +\
'must be a number. Currently, they are of ' +\
str(type(start[0])) + ' and ' +\
str(type(start[1])) + '.'
assert start[0] <= start[1], \
"Numbers in iterables must be ordered."
self.__lower = int(start[0]) if isinstance(start[0], bool)\
else start[0]
self.__upper = int(start[1]) if isinstance(start[1], bool)\
else start[1]
self.loweropen = False if isinstance(start, list) else True
self.upperopen = False if isinstance(start, list) else True
else:
msg = "Interval is initialized with a number, list, or " +\
"tuple; don't know how to initialize " +\
str(type(start)) + "."
raise TypeError(msg)
else:
assert isinstance(start, (float, int)) and \
isinstance(stop, (float, int)), \
'If two endpoints are given, then both points ' +\
'must be a number. Currently, they are of ' +\
'{0} and {1}.'.format(type(start), type(stop))
assert start <= stop, \
'The given endpoints are ' + str(start) +\
' and ' + str(stop) + ', in that order. ' +\
'Change the order of the two and try again.'
ep0 = int(start) if isinstance(start, bool) else start
ep1 = int(stop) if isinstance(stop, bool) else stop
self.__lower = ep0
self.__upper = ep1
self.loweropen = True
self.upperopen = True if stop == np.inf else False
def __contains__(self, item):
a = self.get_lower()
b = self.get_upper()
if isinstance(item, (float, int)):
if item < a:
return False
elif item == a:
return False if self.loweropen else True
elif a < item < b:
return True
elif item == b:
return False if self.upperopen else True
else:
return False
elif isinstance(item, Interval):
c = item.get_lower()
d = item.get_upper()
if a > c or b < d:
return False
elif a < c and b > d:
return True
else:
if c == a:
if self.loweropen and not item.loweropen:
return False
else:
if d < b:
return True
else:
if self.upperopen and not item.upperopen:
return False
else:
return True
else:
if self.upperopen and not item.upperopen:
return False
else:
return True
elif isinstance(item, (list, tuple)):
return Interval(item) in self
else:
return False
def __repr__(self):
return str(self)
def __str__(self):
left = '(' if self.loweropen else '['
right = ')' if self.upperopen else ']'
result = left + '{0}, {1}' + right
return result.format(self.__lower, self.__upper)
def get_lower(self):
return self.__lower
def get_upper(self):
return self.__upper
def set_lower(self, value):
assert isinstance(value, (float, int)), \
"A lower bound of Interval must be a number, not " +\
str(type(value)) + "."
value = int(value) if isinstance(value, bool) else value
assert value <= self.__upper, \
"lower bound <= upper bound not satisfied. " +\
"Your value is " + str(value) + ", whereas the " +\
"upper bound is " + str(self.__upper) + "."
self.__lower = value
def set_upper(self, value):
assert isinstance(value, (float, int)), \
"An upper bound of Interval must be a number, not " +\
str(type(value)) + "."
value = int(value) if isinstance(value, bool) else value
assert value >= self.__lower, \
"upper bound >= lower bound not satisfied. " +\
"Your value is " + str(value) + ", whereas the " +\
"lower bound is " + str(self.__lower) + "."
self.__upper = value
class Pipe():
'''A class that enables you to Pipe.'''
def __init__(self, obj):
'''
Initialize the function piping mechanism.
'''
self.obj = obj
def __repr__(self):
'''
Print the representation of self.
'''
return str(self.obj)
def collect(self):
'''
Collect the result of piping.
'''
return self.obj
def pipe(self, func, *args, **kwargs):
'''
Pipe.
'''
return Pipe(func(self.obj, *args, **kwargs))
npmap = lambda func, *iterable: np.array(list(map(func, *iterable)))
def add_intercept(data, int_name = 'Intercept', loc = 0, inplace = False):
'''(pd.DataFrame[, str, int, bool]) -> pd.DataFrame
Precondition:
1. -(len(data.columns) + 1) <= loc <= len(data.columns)
2. int_name not in data.columns
Add the column of 1s with the name int_name to data at the
specified loc. data is mutated if inplace is True (False by default).
'''
all_cols_before_intercept = list(data.columns)
assert int_name not in all_cols_before_intercept, \
'{0} already exists in data. Try different int_name.'\
.format(int_name)
assert -(len(data.columns) + 1) <= loc <= len(data.columns), \
'loc must be in between {0} and {1}. Current loc is {2}.'\
.format(-(len(data.columns) + 1), len(data.columns), loc)
if loc < 0:
loc += len(data.columns) + 1
if inplace:
data.insert(loc, int_name, 1)
else:
data_cp = data.copy()
data_cp.insert(loc, int_name, 1)
return data_cp
def additive_terms(terms):
'''([str]) -> str
Return the additive terms of the formula with terms.
>>> additive_terms(['a', 'b', 'c'])
'a + b + c'
'''
return ''.join(map(lambda x: x + ' + ', terms))[:-3]
def csum_N_pois(pmf, support, lambd, eps = 1e-05):
'''(function, np.array, number[, float]) -> np.array
Preconditions:
1. pmf is a pmf of X_i where the random summation S = X_1 + ... + X_N
with N ~ Pois(lambd) has 0, 1, ..., M - 1 as the first M element of
its support.
2. pmf is a function whose output is np.array whenever the input is
np.array.
3. support == np.arange(0, l + 1), where l is the largest number of
the support of pmf.
4. lambd > 0
5. 0 < eps < 1
Return the approximate probability mass function of S, i.e.
P(S = x | S < M) for some appropriate integer M determined by
P(S >= M) < eps, where S is the sum of iid X_i's with
i = 1, ..., N ~ Pois(lambd), X_i ~ pmf, and X_i's support is
a subset of np.arange(0, l + 1) (= support) with l being the largest
element of X_i's support.
>>> def dY(y):
... def pY(d):
... if d in [1, 4]:
... return .25
... elif d == 2:
... return .5
... else:
... return 0
... if not hasattr(y, '__iter__'):
... return pY(y)
... return npmap(pY, y)
...
>>> result_Y = csum_N_pois(dY, np.arange(0, 5), 3)
>>> M_Y = len(result_Y)
>>> print(M_Y, sum(result_Y))
39 0.9999999999999998
>>> result_Y[0:4]
array([0.04978729, 0.03734044, 0.08868328, 0.05951115])
'''
pmf_vec = pmf(support)
# Define the pgf of X_i
g = lambda t: npmap(lambda d: sum(d ** support * pmf_vec), t)
# Find M
Ms = lambda t: (-lambd * (1 - g(t)) - np.log(eps)) / np.log(t)
M = np.ceil(fmin(Ms, 1.001, full_output = True, disp = False)[1])
# Append 0's
pmf_vec = np.append(pmf_vec, np.zeros(int(M - len(pmf_vec))))
# Apply DFT and inverse DFT
gtks = np.fft.fft(pmf_vec)
gS_gtks = np.exp(-lambd * (1 - gtks))
pS_tks = np.fft.ifft(gS_gtks).real
return pS_tks
def dcast(data, formula, value_var = None):
'''(pd.DataFrame, str[, str]) -> pd.DataFrame
Return the grouped DataFrame based on data and formula. If value_var
is specified, then it is used to populate the output DataFrame; if
not specified, then it is guessed from data and formula.
'''
all_cols = list(data.columns)
indices_input = []
indices = formula[:(formula.index('~'))].split('+')
if len(indices) == 1:
indices = indices[0].strip()
indices_input.append(data[indices])
cols_used = [indices]
else:
indices = list(map(lambda x: x.strip(), indices))
for ind in indices:
indices_input.append(data[ind])
cols_used = indices[:]
cols_input = []
cols = formula[(formula.index('~') + 1):].split('+')
if len(cols) == 1:
cols = cols[0].strip()
cols_input.append(data[cols])
cols_used.append(cols)
else:
cols = list(map(lambda x: x.strip(), cols))
for c in cols:
cols_input.append(data[c])
cols_used.extend(cols)
value_col = list(set(all_cols).difference(set(cols_used)))
assert len(value_col) == 1 or value_var is not None, \
'value column ambiguous; should be one of: {0}'.format(value_col)
if len(value_col) == 1:
value_col = value_col[0]
elif value_var is not None:
value_col = value_var
return pd.crosstab(
index = indices_input,
columns = cols_input,
values = data[value_col],
aggfunc = lambda x: x
)
def determine_type(actual, pred, p_thres):
'''(np.array, np.array, float) -> np.array
Determine classification types ('tpn', 'fp', or 'fn') using
actual, pred, and p_thres.
'''
def classifier(y, yhat):
if ((y == 0) & (yhat == 0)) | ((y == 1) & (yhat == 1)):
return 'tpn'
elif (y == 0) & (yhat == 1):
return 'fp'
else:
return 'fn'
classified = pred > p_thres
result = np.array(list(map(classifier, actual, classified)))
return result
def dist_to_point(X, point):
'''(pd.DataFrame or np.array, np.array) -> float or np.array
Precondition: X.shape[1] == len(point)
Calculate the distance from each row of X to the point.
'''
X = X.values if 'pandas' in str(type(X)) else X
return np.array(list(map(lambda row: np.linalg.norm(row - point), X)))
def dpmf(x, pmf_vec, support_vec = None):
'''(object or *iterable, *iterable[, *iterable]) -> number or np.array
Preconditions:
1. Elements of x are of the same type as elements of support_vec,
if support_vec is specified. If support_vec is not specified, then
x must be a number or an iterable object with numeric elements.
2. sum(pmf_vec) == 1
3. len(pmf_vec) == len(support_vec) if support_vec is specified.
4. If support_vec is specified, then each element of support_vec
must be hashable, i.e. element.__hash__ is not None
Return the probability evaluated at each element of x based on
probabilities in pmf_vec and elements of support_vec if support_vec
is specified (each element of support_vec is the input that corresponds
to the probability in pmf_vec). If not specified, then support_vec will
be replaced with np.arange(0, len(pmf_vec)).
>>> # Example 1
>>> pmf_eg1 = [0.25, 0.5 , 0.25]
>>> support_eg1 = np.array([1, 2, 4])
>>> dpmf(1, pmf_eg1, support_eg1)
0.25
>>> dpmf([3, 4, 6], pmf_eg1, support_eg1)
array([0. , 0.25, 0. ])
>>> dpmf(np.array([3, 4, 6]), pmf_eg1, support_eg1)
array([0. , 0.25, 0. ])
>>>
>>> # Example 2
>>> pmf_eg2 = (.25, .4, .35)
>>> support_eg2 = ['apple', 'orange', 'neither']
>>> dfruit = lambda x: dpmf(x, pmf_eg2, support_eg2)
>>> dfruit(['apple', 'neither'])
array([0.25, 0.35])
>>> dfruit('orange')
0.4
>>> dfruit(np.array(['orange', 'hello']))
array([0.4, 0. ])
'''
M = len(pmf_vec)
if support_vec is None:
support_vec = np.arange(0, M)
D = {}
for i in range(len(support_vec)):
D[support_vec[i]] = pmf_vec[i]
finder = lambda d: D[d] if d in D.keys() else 0
if hasattr(x, '__iter__'):
if type(x) == str:
return finder(x)
return npmap(finder, x)
return finder(x)
def fft_curve(tt, yy, only_sin = False):
'''(array-like, array-like, bool) -> {str: number, lambda, or tuple}
Estimate sin + cos curve of yy through the input time sequence tt,
and return fitting parameters "amp", "omega", "phase", "offset",
"freq", "period", and "fitfunc". Set only_sin = True to fit only a
sine curve.
Reference: https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
'''
tt = np.array(tt)
yy = np.array(yy)
assert len(set(np.diff(tt))) == 1, \
'tt does not have an uniform spacing.'
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
# excluding the zero frequency "peak", which is related to offset
guess_freq = abs(ff[np.argmax(Fyy[1:]) + 1])
guess_amp = np.std(yy) * 2. ** 0.5
guess_offset = np.mean(yy)
guess = [
guess_amp,
2. * np.pi * guess_freq,
0.,
guess_offset
]
def sinfunc(t, A_1, w_1, p_1, c):
return A_1 * np.sin(w_1 * t + p_1) + c
if only_sin:
guess = np.array(guess)
popt, pcov = curve_fit(sinfunc, tt, yy, p0 = guess)
A_1, w_1, p_1, c = popt
fitfunc = lambda t: sinfunc(t, A_1, w_1, p_1, c)
A_2, w_2, p_2 = 0, 0, 0
else:
def curve(t, A_1, w_1, p_1, c, A_2, w_2, p_2):
return sinfunc(t, A_1, w_1, p_1, c) +\
A_2 * np.cos(w_2 * t + p_2)
guess.extend([
guess_amp,
2. * np.pi * guess_freq,
0.
])
guess = np.array(guess) / 2
popt, pcov = curve_fit(curve, tt, yy, p0 = guess)
A_1, w_1, p_1, c, A_2, w_2, p_2 = popt
fitfunc = lambda t: curve(t, A_1, w_1, p_1, c, A_2, w_2, p_2)
return {
"amp": [A_1, A_2],
"omega": [w_1, w_2],
"phase": [p_1, p_2],
"offset": c,
"fitfunc": fitfunc,
"maxcov": np.max(pcov),
"rawres": (guess, popt, pcov)
}
def fusion_estimates(y, lambd, theta = None, max_iter = 1000, eps = 1e-05):
'''(np.array, number[, np.array, int, number]) ->
{str: np.array or number}
Preconditions:
1. len(y) == len(theta) if theta specified.
2. lambd > 0 and eps > 0
3. max_iter > 1
Calculate the fusion estimates theta_i's in y_i = theta_i + error_i.
Return the dictionary that stores:
- 'theta', the fusion estimates of y iterated from theta with the
maximum iteration max_iter and the cost difference threshold eps.
- 'phi', the differences of each 'theta'
- 'lambd', the lambd specified
- 'iteration', the number of iterations, and
- 'costs', the cost function evaluated at each iteration where the
first cost is calculated at iteration 0.
See https://joon3216.github.io/research_materials/2018/non_separable_penalty
for details.
'''
n = len(y)
if theta is None:
theta = y.copy()
phi = np.diff(theta)
phisums_old = np.cumsum(phi)
theta_1_new = (sum(y) - sum(phisums_old)) / n
cost = sum((y - theta) ** 2) + lambd * sum(abs(phi))
costs = []
costs.append(cost)
there_is_a_progress = True
iteration = 0
while there_is_a_progress and iteration < max_iter:
phi_new = np.zeros(n)
for j in range(1, n):
phisums_new = np.cumsum(phi_new)
req = sum(
phisums_old[(j - 1):(n - 1)] -\
phisums_old[j - 1] + phisums_new[j - 1]
)
discri = sum(y[j:n]) - (n - (j + 1) + 1) * theta_1_new - req
if discri < -lambd / 2:
phi_new[j] = (discri + lambd / 2) / (n - (j + 1) + 1)
elif discri > lambd / 2:
phi_new[j] = (discri - lambd / 2) / (n - (j + 1) + 1)
phi_new = phi_new[1:]
phisums_new = phisums_new[1:]
theta = np.append(theta_1_new, theta_1_new + phisums_new)
cost = sum((y - theta) ** 2) + lambd * sum(abs(phi_new))
theta_1_new = (sum(y) - sum(phisums_new)) / n
phisums_old = phisums_new
iteration += 1
costs.append(cost)
there_is_a_progress = not (abs(costs[iteration - 1] - cost) <= eps)
return {
'theta': theta,
'phi': phi_new,
'lambd': lambd,
'iteration': iteration,
'costs': np.array(costs)
}
def gauss_seidel(y, B = None, theta = None, lambd = None, max_iter = 50,
eps = 1e-08):
'''(1d-array[, 2d-array, 1d-array, float, int, float]) ->
{str: np.array and str: number}
Preconditions:
1. If B is None, then lambd must not be None and lambd > 0, as well as
len(y) >= 5.
2. If B is not None, then B must be either strictly diagonally
dominant, symmetric positive definite, or both.
3. If theta is not None, then len(y) == len(theta).
4. eps > 0
5. max_iter >= 1
Approximate theta that solves the linear equation y = B @ theta,
where len(y) == n and B is n-by-n, using the Gauss-Seidel method.
If B is specified, then lambd is ignored; if B is not specified,
then lambd must be positive and be specified since the following
B will be used in the equation:
>>> n = len(y) # must be at least 5
>>> B_lambd = np.zeros(n ** 2).reshape(n, n)
>>> B_lambd[0, [0, 1, 2]] = [1, -2, 1]
>>> B_lambd[1, [0, 1, 2, 3]] = [-2, 5, -4, 1]
>>> for j in range(2, n - 2):
... B_lambd[j, [j - 2, j - 1, j, j + 1, j + 2]] = [1, -4, 6, -4, 1]
...
>>> B_lambd[n - 2, [-4, -3, -2, -1]] = [1, -4, 5, -2]
>>> B_lambd[n - 1, [-3, -2, -1]] = [1, -2, 1]
>>> B_lambd = lambd * B_lambd
>>> B = B_lambd + np.identity(n)
If theta is None, then the initial guess starts with theta = y.
'''
assert eps > 0, 'eps must be positive. Current value: ' + str(eps)
max_iter = int(max_iter)
assert max_iter >= 1, \
'max_iter must be at least 1. Current value: ' + str(max_iter)
y = np.array(y)
n = len(y)
if B is None:
msg = 'If B is None, then lambd must be '
assert lambd is not None, msg + 'specified.'
assert lambd > 0, msg + 'positive. Current lambd == ' + str(lambd)
assert n >= 5, \
'If B is None, then len(y) must be at least 5. ' +\
'Currently, len(y) == ' + str(n) + '.'
B_lambd = np.zeros(n ** 2).reshape(n, n)
B_lambd[0, [0, 1, 2]] = [1, -2, 1]
B_lambd[1, [0, 1, 2, 3]] = [-2, 5, -4, 1]
for j in range(2, n - 2):
B_lambd[j, [j - 2, j - 1, j, j + 1, j + 2]] = [1, -4, 6, -4, 1]
B_lambd[n - 2, [-4, -3, -2, -1]] = [1, -4, 5, -2]
B_lambd[n - 1, [-3, -2, -1]] = [1, -2, 1]
B_lambd = lambd * B_lambd
B = B_lambd + np.identity(n)
else:
B = np.array(B).copy()
assert B.shape == (n, n), \
'B.shape == {0}, not {1}'.format(B.shape, (n, n))
if (abs(B).sum(axis = 0) - 2 * abs(B).diagonal() < 0).all():
pass
elif (abs(B).sum(axis = 1) - 2 * abs(B).diagonal() < 0).all():
pass
else:
msg2 =\
'B given is neither strictly diagonally dominant ' +\
'nor symmetric positive definite.'
if (B.T == B).all():
try:
np.linalg.cholesky(B)
except:
raise ValueError(msg2)
else:
raise ValueError(msg2)
LD = np.tril(B)
U = B - LD
if theta is None:
theta = y.copy()
else:
theta = np.array(theta)
assert len(y) == len(theta), \
'If the initial theta is specified, then the length ' +\
'of theta must be the same as y. Currently, ' +\
'len(y) == {0} != {1} == len(theta)'.format(len(y), len(theta))
iteration = 0
errors = [np.linalg.norm(B @ theta - y)]
no_conv = True
while no_conv:
theta = np.linalg.inv(LD) @ (y - (U @ theta))
errors.append(np.linalg.norm(B @ theta - y))
iteration += 1
if errors[-1] < eps or iteration == max_iter:
no_conv = False
errors = np.array(errors)
return {
'theta': theta,
'lambd': lambd,
'iteration': iteration,
'errors': errors
}
def get_p_thres(roc_tbl, criterion = None):
'''(returning pd.DataFrame of produce_roc_table[, [str, number]])
-> float
Precondition: criterion in [('tpr', x), ('fpr', y)]
for some 0 < x < 1 and 0 < y < 1 (criterion need not be a tuple).
Return the probability threshold from roc_tbl based on criterion.
By default, the function returns the threshold that yields the
minimum distance from the roc curve to the point (fpr, tpr) = (0, 1).
If criterion == ('tpr', x) for some 0 < x < 1, then it returns a
probability threshold that achieves the true positive rate of at
least x and has the minimum false positive rate;
if criterion == ('fpr', y) for some 0 < y < 1, then it returns a
probability threshold that achieves the false positive rate of at
most y and has the maximum true positive rate.
'''
if criterion is None:
dtp = roc_tbl['dist_to_optimal_point']
p_thres = roc_tbl\
.loc[lambda x: x['dist_to_optimal_point'] == np.min(dtp)]\
['thresholds']\
.values[0]
else:
msg = 'If criterion is specified, '
assert len(criterion) == 2, \
msg + 'the length of criterion must be 2, not ' +\
str(len(criterion)) + '.'
assert type(criterion) != str, \
msg + 'then it must be an array-like object, not a string.'
assert criterion[0] in ['fpr', 'tpr'], \
msg + 'then the first element must be exactly one of ' +\
'"fpr" or "tpr", not ' + str(criterion[0]) + '.'
type1 = str(type(criterion[1]))
assert 'float' in type1 or 'int' in type1, \
msg + 'then the second element must be a number, not ' +\
type1 + '.'
assert 0 < criterion[1] < 1, \
msg + 'then the second element must be a number on the ' +\
'interval (0, 1), not ' + str(criterion[1]) + '.'
if criterion[0] == 'tpr':
# Optimal p_thres is values[0], but it sometimes does not
# result in a desired tpr. This is because produce_roc_table()
# uses sklearn roc_curve with drop_intermediate = True, and
# a very small change (around a scale of 1e-09) in the
# threshold affects tpr. values[1] is less optimal, but always
# achieves the desired tpr.
p_thres = roc_tbl\
.loc[lambda x: x['tpr'] >= criterion[1]]\
['thresholds']\
.values[1]
else:
# Optimal p_thres is values[-1], but values[-2] is used
# by the same reasoning as above.
p_thres = roc_tbl\
.loc[lambda x: x['fpr'] <= criterion[1]]\
['thresholds']\
.values[-2]
return p_thres
def get_response(mod):
'''(sm.GLMResultsWrapper) -> str
Get the name of response column of mod.
'''
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
return response[14:].strip()
def hsm(x, tau = .5):
'''(pd.Series, float) -> float
Precondition: 0 < tau < 1
Estimate the mode of x by the half sample mode method.
'''
n = len(x)
x = x.sort_values()
m = int(np.ceil(tau * n)) if tau <= .5 else int(np.floor(tau * n))
m1 = int(m - 1)
x2 = x[(m - 1):n]
x1 = x[0:(n - m1)]
k = np.arange(1, n - m1 + 1)
k = k[x2.values - x1.values == min(x2.values - x1.values)]
k = np.random.choice(k, 1)[0] if len(k) > 1 else k[0]
x = x[int(k - 1):int(k + m1)]
r = x.mean() if len(x) <= 2 else hsm(x, tau = tau)
return r
def impute_em(X, max_iter = 3000, eps = 1e-08):
'''(np.array, int, number) -> {str: np.array or int}
Precondition: max_iter >= 1 and eps > 0
Return the dictionary with five keys where:
- Key 'mu' stores the mean estimate of the imputed data.
- Key 'Sigma' stores the variance estimate of the imputed data.
- Key 'X_imputed' stores the imputed data that is mutated from X using
the EM algorithm.
- Key 'C' stores the np.array that specifies the original missing
entries of X.
- Key 'iteration' stores the number of iteration used to compute
'X_imputed' based on max_iter and eps specified.
'''
nr, nc = X.shape
C = np.isnan(X) == False
# Collect M_i and O_i's
one_to_nc = np.arange(1, nc + 1, step = 1)
M = one_to_nc * (C == False) - 1
O = one_to_nc * C - 1
# Generate Mu_0 and Sigma_0
Mu = np.nanmean(X, axis = 0)
observed_rows = np.where(np.isnan(sum(X.T)) == False)[0]
S = np.cov(X[observed_rows, ].T)
if np.isnan(S).any():
S = np.diag(np.nanvar(X, axis = 0))
# Start updating
Mu_tilde, S_tilde = {}, {}
X_tilde = X.copy()
no_conv = True
iteration = 0
while no_conv and iteration < max_iter:
for i in range(nr):
S_tilde[i] = np.zeros(nc ** 2).reshape(nc, nc)
if set(O[i, ]) != set(one_to_nc - 1): # missing vals exist
M_i, O_i = M[i, ][M[i, ] != -1], O[i, ][O[i, ] != -1]
S_MM = S[np.ix_(M_i, M_i)]
S_MO = S[np.ix_(M_i, O_i)]
S_OM = S_MO.T
S_OO = S[np.ix_(O_i, O_i)]
Mu_tilde[i] = Mu[np.ix_(M_i)] +\
S_MO @ np.linalg.inv(S_OO) @\
(X_tilde[i, O_i] - Mu[np.ix_(O_i)])
X_tilde[i, M_i] = Mu_tilde[i]
S_MM_O = S_MM - S_MO @ np.linalg.inv(S_OO) @ S_OM
S_tilde[i][np.ix_(M_i, M_i)] = S_MM_O
Mu_new = np.mean(X_tilde, axis = 0)
S_new = np.cov(X_tilde.T, bias = 1) +\
reduce(np.add, S_tilde.values()) / nr
no_conv =\
np.linalg.norm(Mu - Mu_new) >= eps or\
np.linalg.norm(S - S_new, ord = 2) >= eps
Mu = Mu_new
S = S_new
iteration += 1
return {
'mu': Mu,
'Sigma': S,
'X_imputed': X_tilde,
'C': C,
'iteration': iteration
}
def kde(x, samples, **kwargs):
'''(float or *iterable, *iterable[, arguments of KDEUnivariate])
-> np.array
Return the value of kernel density estimate evaluated at x. kde is
fitted using samples.
'''
dens = sm.nonparametric.KDEUnivariate(samples)
dens.fit(**kwargs)
return dens.evaluate(x)
def kde_mult(X, samples, **kwargs):
'''(*iterable, *iterable[, arguments of KDEMultivariate]) -> np.array
Precondition: number of columns of X == number of columns of samples
Return the value of multidimensional kde evaluated at each row of X.
kde is fitted using samples.
'''
vt = 'c' * X.shape[1]
dens_M = sm.nonparametric.KDEMultivariate(
samples, var_type = vt, **kwargs
)
return dens_M.pdf(X)
def logarithmic_scoring(mod, data, get_sum = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, bool]) -> float
Return the logarithmic scoring of mod onto the data, computed as
y * log(phat) + (1 - y) * log(1 - phat). The higher, the better.
Set get_sum = True to get
sum(y * log(phat) + (1 - y) * log(1 - phat)) instead of a vector.
'''
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
response = response[14:].strip()
assert response in data.columns, \
'response "' + response + '" does not exist in data. Needs one.'
features = list(mod.conf_int().index)
ys = data[response].values
phats = mod.predict(data[features]).values
result = ys * np.log(phats) + (1 - ys) * np.log(1 - phats)
return sum(result) if get_sum else result
def plot_lm(mod, mfrow = (2, 2), hspace = .5, wspace = .3):
'''(sm.RegressionResultsWrapper[, (int, int), float, float]) -> None
Preconditions:
1. mfrow[0] * mfrow[1] == 4
2. len(mfrow) == 2
Plot the following plots of mod in the shape of mfrow, in this order:
* Residuals vs. Fitted plot
* Normal Q-Q plot
* Scale-Location plot
* Residuals vs. Leverage plot
Specify hspace and wspace (arguments of fig.subplots_adjust() where
fig = plt.figure()) to adjust margins between subplots.
'''
fig = plt.figure()
plot_funcs = [plot_rf, plot_qq, plot_sl, plot_rlev]
i = 0
for func in plot_funcs:
i += 1
plt.subplot(mfrow[0], mfrow[1], i)
func(mod)
fig.subplots_adjust(hspace = hspace, wspace = wspace)
plt.show()
def plot_op(mod, response, num_breaks = None, breaks = None,
xlab = 'Predicted Probability',
ylab = 'Observed Proportion'):
'''(sm.GLMResultsWrapper, array-like[, int, np.array, str, str])
-> None
Plot the grouped observed proportions vs. predicted probabilities
of mod that used `response` argument as the reponse.
Specify `num_breaks` to divide linear predictors into that much of
intervals of equal length.
Specify `breaks` to have different bins for linear predictors;
`num_breaks` is ignored if `breaks` is specified.
'''
logit = lambda p: np.log(p / (1 - p))
predprob = mod.predict()
linpred = logit(predprob)
if breaks is None:
if num_breaks is None:
num_breaks = int(len(response) / 50)
breaks = np.unique(
np.quantile(linpred, np.linspace(0, 1, num = num_breaks + 1))
)
bins = pd.cut(linpred, breaks)
df =\
pd.DataFrame({
'y': response,
'count': 1,
'predprob': predprob,
'bins': bins
})\
.groupby('bins')\
.agg(
y = ('y', 'sum'),
counts = ('count', 'sum'),
ppred = ('predprob', 'mean')
)\
.dropna()
df['se_fit'] = np.sqrt(df['ppred'] * (1 - df['ppred']) / df['counts'])
df['ymin'] = df['y'] / df['counts'] - 2 * df['se_fit']
df['ymax'] = df['y'] / df['counts'] + 2 * df['se_fit']
x = np.linspace(min(df['ppred']), max(df['ppred']))
plt.scatter(df['ppred'], df['y'] / df['counts'])
plt.vlines(
df['ppred'], df['ymin'], df['ymax'],
alpha = .3, color = '#1F77B4'
)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.plot(x, x, color = '#FF7F0E', alpha = .4)
plt.show()
def plot_qq(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a QQ-plot of mod. Numbers in the plot indicate outliers. For
example, if `17` is plotted besides a point, then it means that the
observation at index 17, or the 18th observation, of the training data
is considered a possible outlier.
'''
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
arrays = stats.probplot(rstandard, dist = 'norm')
theoretical_q, sorted_rstandard = arrays[0]
slope, intercept, r = arrays[1]
rstandard2 = list(enumerate(rstandard))
rstandard2.sort(key = lambda x: x[1])
rstandard2 = np.array(rstandard2)
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard2)
outliers = np.array(list(outliers))
dat = np.c_[rstandard2, theoretical_q, outliers]
x = np.linspace(min(theoretical_q), max(theoretical_q))
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(
x, slope * x + intercept, linestyle = 'dashed', color = 'grey'
)
plt.title('Normal Q-Q')
plt.xlabel('Theoretical quantiles')
plt.ylabel('Standardized residuals')
dat2 = list(filter(lambda row: row[-1] == 1, dat))
for item in dat2:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def plot_rf(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Residual vs. Fitted plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of
the training data is considered a possible outlier.
'''
residuals = mod.resid
fitted = mod.predict()
lowess_line = sm.nonparametric.lowess(residuals, fitted)
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
dat = np.c_[rstandard, fitted, residuals, outliers]
outlier_ids = dat[dat[:, -1] == 1]
x = np.linspace(min(fitted), max(fitted))
plt.scatter(fitted, residuals)
plt.plot(lowess_line[:, 0], lowess_line[:, 1], color = 'red')
plt.plot(x, np.zeros(len(x)), linestyle = 'dashed', color = 'grey')
plt.title('Residuals vs. Fitted')
plt.xlabel('Fitted values')
plt.ylabel('Residuals')
for item in outlier_ids:
plt.text(item[2], item[3], str(int(item[0])))
plt.show()
def plot_rlev(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Residuals vs. Leverage plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of the
training data is considered a possible outlier.
'''
influence = mod.get_influence()
leverage = influence.hat_matrix_diag
# cooks_d = influence.cooks_distance
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
dat = np.c_[rstandard, leverage, outliers]#, cooks_d[0]]
outlier_ids = dat[dat[:, -1] == 1]
x = np.linspace(0, max(leverage))
y = np.linspace(min(rstandard[:, 1]), max(rstandard[:, 1]))
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(x, np.zeros(len(x)), linestyle = 'dashed', color = 'grey')
plt.plot(np.zeros(len(y)), y, linestyle = 'dashed', color = 'grey')
plt.title('Residuals vs. Leverage')
plt.xlabel('Leverage')
plt.ylabel('Standardized residuals')
for item in outlier_ids:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def plot_sl(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Scale-Location plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of the
training data is considered a possible outlier.
'''
fitted = mod.predict()
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
rstandard[:, 1] = abs(rstandard[:, 1]) ** .5
dat = np.c_[rstandard, fitted, outliers] # id, resid, fitted, outliers
lowess_line = sm.nonparametric.lowess(dat[:, 1], dat[:, 2])
outlier_ids = dat[dat[:, -1] == 1]
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(lowess_line[:, 0], lowess_line[:, 1], color = 'red')
plt.title('Scale-Location')
plt.xlabel('Fitted values')
plt.ylabel(r'$\sqrt{|Standardized\/\/residuals|}$')
for item in outlier_ids:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def produce_roc_table(mod, train):
'''(sm.GLMResultsWrapper, pd.DataFrame) -> pd.DataFrame
Remarks:
1. train must be the data that is used to fit mod.
2. Regardless of whether response is specified or not, train
must contain the endogenous variable used to fit mod:
+ 2.1. If response is None, then the function assumes that
train has Dep. Variable specified in mod.summary() with
exactly the same name.
+ 2.2. If response is specified, then the function assumes that
the endogenous variable with the same name as the
specified response value is one of the columns of train,
and is used to fit mod.
Return DataFrame that contains informations of fpr, tpr, and the
corresponding probability thresholds based on mod and train.
'''
response = get_response(mod)
actuals_train = train[response]
preds_train = mod.predict()
fpr, tpr, threses = roc_curve(actuals_train, preds_train)
roc_tbl = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'thresholds': threses})
dtp = dist_to_point(roc_tbl[['fpr', 'tpr']], np.array([0, 1]))
roc_tbl['dist_to_optimal_point'] = dtp
return roc_tbl
def random_word(n, type = 'alpha'):
'''(int, str) -> str
Precondition: type in ['alnum', 'alpha', 'lower', 'numeric', 'upper']
Return a random combination of characters of length n and of
type `type`:
* 'alnum': lower-case alphabets, capitals, and integers
* 'alpha': lower-case alphabets and capitals
* 'lower': lower-case alphabets
* 'numeric': integers
* 'upper': capitals
'''
assert type in ['alnum', 'alpha', 'lower', 'numeric', 'upper'], \
"type must be one of 'alnum', 'alpha', 'lower', 'numeric', or " +\
"'upper', not " + str(type) + "."
alphabets_upper = [
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
]
alphabets_lower = list(map(lambda x: x.lower(), alphabets_upper))
integers = list(map(str, range(10)))
support =\
alphabets_upper + alphabets_lower + integers if type == 'alnum' \
else alphabets_upper + alphabets_lower if type == 'alpha' \
else alphabets_lower if type == 'lower' \
else integers if type == 'numeric' \
else alphabets_upper
def dchar(x):
return dpmf(
x,
[1 / len(support)] * len(support),
support
)
return ''.join(rpmf(n, dchar, support))
def rpmf(n, pmf, support, **kwargs):
'''(int, function, *iterable[, **kwargs]) -> np.array
Precondition:
1. n >= 1
2. support is the support of pmf.
Return n random samples from the specified pmf with support 'support'
and additional arguments of pmf in **kwargs if required. Since this
function uses **kwargs, any additional arguments of pmf you want to
specify must be named.
>>> # Example 1: dX
>>> np.random.seed(1024)
>>> rpmf(n = 20, pmf = dX, support = np.arange(0, 6))
array([5, 5, 5, 5, 5, 5, 1, 0, 1, 5, 5, 5, 5, 3, 5, 5, 5, 2, 5, 1])
>>>
>>> # Example 2: S_Y = Y_1 + ... + Y_N
>>> np.random.seed(1024)
>>> # recall dY in csum_N_pois example
>>> result_S_Y = csum_N_pois(dY, np.arange(0, 5), 3)
>>> result_S_Y = result_S_Y / sum(result_S_Y)
>>> M_S_Y = len(result_S_Y)
>>> rpmf(10, dpmf, np.arange(0, M_S_Y), pmf_vec = result_S_Y)
array([ 8, 22, 6, 8, 7, 9, 2, 0, 2, 9])
>>>
>>> # Example 3: dfruit in dpmf example
>>> np.random.seed(2048)
>>> rpmf(7, dfruit, ['apple', 'orange', 'neither'])
array(['orange', 'apple', 'neither', 'neither', 'neither', 'orange',
'apple'], dtype='<U7')
'''
cmf_vec = np.append(0, np.cumsum(pmf(support, **kwargs)))
unif_01 = np.random.random(n)
result = []
for k in range(n):
for j in range(len(cmf_vec) - 1):
if unif_01[k] >= cmf_vec[j] and unif_01[k] < cmf_vec[j + 1]:
result.append(support[j])
return np.array(result)
# In development
def anova(*args):
'''(sm.GLMResultsWrappers) -> pd.DataFrame
Return the LRT results of models given to *args. If more than two
models are given, then sequential LRT results are returned.
'''
result = {
'Resid. Df': [],
'Resid. Dev': [],
'Df': [''],
'Deviance': [''],
'Pr(>Chi)': ['']
}
models = [*args]
responses = []
fmlrs = []
assert len(models) != 1, \
'Functionality not yet available for only one model; ' +\
'need at least two.'
if len(models) > 1:
for mod in models:
result['Resid. Df'].append(mod.df_resid)
result['Resid. Dev'].append(mod.deviance)
mod_pairs =\
[tuple(models[i:(i + 2)]) for i in range(len(models) - 1)]
for mod0, mod1 in mod_pairs:
result['Df'].append(mod0.df_resid - mod1.df_resid)
result['Deviance'].append(mod0.deviance - mod1.deviance)
result['Pr(>Chi)'].append(
1 - chi2.cdf(
mod0.deviance - mod1.deviance,
df = mod0.df_resid - mod1.df_resid
)
)
else:
pass # for now
return pd.DataFrame(result)
def classify_terbin(mod_terbin, data):
'''(return value of terbin_model(), pd.DataFrame)
-> {str: np.array and/or str: pd.DataFrame}
Compute the probability for each observations of data, and classify
according to mod_terbin.
'''
# Check: does data have all features of mod_ternary and mod_binary?
data_cols = data.columns
ter_features = mod_terbin['mod_ternary'][2].columns
bin_response = mod_terbin['mod_binary'][1].name
bin_features = mod_terbin['mod_binary'][2].columns
assert set(ter_features).issubset(set(data_cols)), \
'data does not have all the features of mod_ternary. ' +\
'The following are missing: ' +\
str(list(set(ter_features).difference(set(data_cols))))
assert set(bin_features).issubset(set(data_cols)), \
'data does not have all the features of mod_binary. ' +\
'The following are missing: ' +\
str(list(set(bin_features).difference(set(data_cols))))
# Check: does data have a binary response column?
# If no, just return the classification result.
# If yes, then return classification result and case counts
data_has_bin_response = bin_response in data.columns
# Predict types: fn, fp, or tpn
types = Pipe(lambda row: ['fn', 'fp', 'tpn'][np.argmax(row)])\
.pipe(
map,
mod_terbin['mod_ternary'][0]\
.predict(data[ter_features])\
.values
)\
.pipe(list)\
.pipe(np.array)\
.collect()
# Predict probabilities
probs = mod_terbin['mod_binary'][0].predict(data[bin_features]).values
# Classify using different probability thresholds
types_probs = np.array(list(zip(types, probs)))
p_threses = {
'fn': mod_terbin['p_threses'][0],
'tpn': mod_terbin['p_threses'][1],
'fp': mod_terbin['p_threses'][2]
}
result = np.array(list(map(
lambda row: float(row[1]) > p_threses[row[0]],
types_probs
)))
result = np.array(list(map(int, result)))
if not data_has_bin_response:
return {
'predicted_types': types,
'result': result,
'p_threses': mod_terbin['p_threses']
}
else:
actuals = data[bin_response].values
total_neg = np.sum(actuals == 0)
total_pos = len(actuals) - total_neg
tn = sum((actuals == 0) & (result == 0))
fp = total_neg - tn
tp = sum((actuals == 1) & (result == 1))
fn = total_pos - tp
case_counts = pd.DataFrame({
'class': [0, 0, 1, 1],
'classified': [0, 1, 0, 1],
'class_total': [total_neg, total_neg, total_pos, total_pos],
'counts': [tn, fp, fn, tp]
})
case_counts['perc'] =\
np.array([tn, fp, fn, tp]) /\
np.array([total_neg, total_neg, total_pos, total_pos])
accuracy = (tp + tn) / (total_pos + total_neg)
return {
'predicted_types': types,
'result': result,
'counts': case_counts,
'accuracy': accuracy,
'p_threses': mod_terbin['p_threses']
}
def count_cases(mod, data, train = None, p_thres = None, criterion = None):
'''(sm.GLMResultsWrapper or return value of terbin_model(),
pd.DataFrame,
[pd.DataFrame , float, [str, number]])
-> pd.DataFrame
Precondition:
1. response of mod consists of 0s and 1s.
2. data contains the response column specified by mod
3. data contains all or more feature columns of mod, including the
intercept if applicable.
4. train must be specified if mod is of class GLMResultsWrapper
5. 0 < p_thres < 1
Count the number of true negatives, false positives, false negatives,
and true positives in data classified by mod and p_thres; train must
be the dataset that is used to fit mod. If p_thres is None, then
it uses the probability threshold that yields the minimum distance
between the ROC curve and the point (fpr, tpr) = (0, 1); if p_thres is
specified, then criterion (used as an argument of get_p_thres()) is
ignored. If mod is not of class sm.GLMResultsWrapper, then every
argument except mod and data are ignored.
'''
if 'GLMResultsWrapper' in str(type(mod)):
assert train is not None, \
'If a given mod is of class GLMResultsWrapper, then ' +\
'train must be specified.'
# Get the (binary) response column; len('Dep. Variable') == 14
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
response = response[14:].strip()
# Checks
all_features_of_data = set(data.columns)
assert response in all_features_of_data, \
'data does not have the response: "' + response + '".'
all_features_of_data.remove(response) # leave only predictors
mod_features = mod.cov_params().columns
mod_features_set = set(mod_features)
assert mod_features_set.issubset(all_features_of_data), \
'data does not have all the features used in mod; data ' +\
'requires the following: {0}'\
.format(
list(mod_features_set.difference(all_features_of_data))
)
mod_features = list(mod_features)
# Compute p_thres if not specified
actuals = data[response].values
preds = mod.predict(data[mod_features]).values
if p_thres is None: # p_thres must come from train, not data
roc_tbl = produce_roc_table(mod, train, response)
p_thres = get_p_thres(roc_tbl, criterion)
classifieds = preds > p_thres
classifieds = np.array(list(map(int, classifieds)))
# Binary classification result
total_neg = np.sum(actuals == 0)
total_pos = len(actuals) - total_neg
tn = sum((actuals == 0) & (classifieds == 0))
fp = total_neg - tn
tp = sum((actuals == 1) & (classifieds == 1))
fn = total_pos - tp
result = pd.DataFrame({
'class': [0, 0, 1, 1],
'classified': [0, 1, 0, 1],
'class_total': [total_neg, total_neg, total_pos, total_pos],
'counts': [tn, fp, fn, tp]
})
result['perc'] =\
np.array([tn, fp, fn, tp]) /\
np.array([total_neg, total_neg, total_pos, total_pos])
accuracy = (tp + tn) / (total_pos + total_neg)
return {
'counts': result,
'accuracy': accuracy,
'p_thres': p_thres
}
else:
result = classify_terbin(mod, data)
del result['result']
return result
def drop1(mod, train, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame) -> pd.DataFrame
Conduct a LRT of mod minus one feature vs. mod for every feature
used in mod, trained by train.
'''
response = get_response(mod)
assert response in train.columns, \
'response "' + response + '" does not exist in train. Needs one.'
int_name = ''
all_features = list(mod.conf_int().index)
for col in all_features:
if (train[col] == 1).all():
int_name += col
break
assert int_name != '', \
'An intercept column does not exist in train. Needs one.'
all_features_minus_int = all_features[:]
all_features_minus_int.remove(int_name)
result = {
'Removed': ['<none>'],
'Df': [''],
'Deviance': [mod.deviance],
'AIC': [mod.aic],
'LRT': [''],
'Pr(>Chi)': [''],
'': ['']
}
for item in all_features_minus_int:
afmi = all_features_minus_int[:]
afmi.remove(item)
if show_progress:
print('LRT: mod - {0} vs. mod'.format(item))
mod_minus1_features = [int_name] + afmi
mod_1dropped = sm.GLM(
train[response],
train[mod_minus1_features],
family = sm.families.Binomial()
)\
.fit()
aov = anova(mod_1dropped, mod)
result['Removed'].append(item)
result['Df'].append(aov['Df'][1])
result['Deviance'].append(aov['Resid. Dev'][0])
result['AIC'].append(mod_1dropped.aic)
result['LRT'].append(aov['Deviance'][1])
p_val = aov['Pr(>Chi)'][1]
result['Pr(>Chi)'].append(p_val)
sig = ''
if p_val <= .001:
sig += '***'
elif p_val <= .01:
sig += '** '
elif p_val <= .05:
sig += '* '
elif p_val <= .1:
sig += '. '
result[''].append(sig)
return pd.DataFrame(result)
def model_by_lrt(mod, train, pval_thres = .05, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, float, bool])
-> sm.GLMResultsWrapper
Precondition: 0 < pval_thres < 1
Sequentially remove a feature that has a maximum p-value from
drop1(mod, train), trained by train, until every feature has a
p-value less that pval_thres. Return sm.GLMResultsWrapper object
that only contains such features. Set show_progress = True to see
the removal process.
'''
assert 0 < pval_thres < 1, \
'pval_thres argument must be between 0 and 1, not ' +\
str(pval_thres) + '.'
response = get_response(mod)
assert response in train.columns, \
'response "' + response + '" does not exist in train. Needs one.'
features = list(mod.conf_int().index)
drop1_result = drop1(mod, train, show_progress)
not_all_less_than_thres =\
not (drop1_result.iloc[1:, :]['Pr(>Chi)'] < pval_thres).all()
if not not_all_less_than_thres:
return mod
i = 0
while not_all_less_than_thres:
i += 1
ordered = drop1_result.iloc[1:, :]\
.sort_values('Pr(>Chi)', ascending = False)
to_remove = ordered['Removed'].values[0]
pval_of_removed = ordered['Pr(>Chi)'].values[0]
if show_progress:
msg = 'Iteration {0}: removed {1} (p-val: {2})'
msg = msg.format(i, to_remove, pval_of_removed)
print(msg)
features.remove(to_remove)
mod_new = sm.GLM(
train[response],
train[features],
family = sm.families.Binomial()
)\
.fit()
print(anova(mod_new, mod)) if show_progress else ''
drop1_result =\
drop1(mod_new, train[[response] + features], show_progress)
not_all_less_than_thres =\
not (drop1_result.iloc[1:, :]['Pr(>Chi)'] < pval_thres).all()
return mod_new
def model_by_vif(mod, train, vif_thres = 5, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, float, bool])
-> {str: sm.GLMResultsWrapper and str: {str: float}}
Precondition: vif_thres > 0
Sequentially remove a feature that has a maximum VIF from mod,
trained by train, until every feature has a VIF less than vif_thres.
Return sm.GLMResultsWrapper object that only contains such features.
Set show_progress = True to see the removal process.
'''
assert vif_thres > 0, \
"vif_thres argument must be positive, not " + str(vif_thres) + "."
# Remove response
response = get_response(mod)
all_cols = list(train.columns)
if response in all_cols:
all_cols.remove(response)
X = train.loc[:, all_cols]
else:
X = train
# Let Intercept be the first predictor
int_name = ''
for c in all_cols:
if (X[c].values == 1).all(): # Try to find Intercept
int_name += c
break
if int_name == '': # Intercept column doesn't exist; make one
int_name += 'Intercept'
assert int_name not in X.columns, \
'"Intercept", the column in train that ' +\
'is NOT the column of 1s and yet uses the name ' +\
'"Intercept", already exists in train. User inspection ' +\
'is required.'
X[int_name] = 1
all_cols2 = [int_name]
all_cols2.extend(all_cols)
all_cols = all_cols2
X = X.loc[:, all_cols]
all_cols.remove(int_name)
# X = train minus response
# i.e. X.columns = [Intercept, *features]
# all_cols: train.columns minus response minus Intercept
# i.e. all_cols = [*features]
vifs = dict(zip(
(c for c in all_cols),
(variance_inflation_factor(X.values, j) \
for j in range(1, X.values.shape[1])) # except Intercept
))
not_all_vifs_less_than_thres =\
not (np.array(list(vifs.values())) < vif_thres).all()
i = 0
while not_all_vifs_less_than_thres:
i += 1
current_max = max(vifs.values())
k_to_remove = ''
for k, v in vifs.items():
if v == current_max:
k_to_remove += k
break
v_removed = vifs.pop(k_to_remove) # same as current_max
if show_progress:
msg = 'Iteration {0}: removed {1} (VIF: {2})'\
.format(i, k_to_remove, v_removed)
print(msg)
del X[k_to_remove]
all_cols.remove(k_to_remove)
vifs = dict(zip(
(c for c in all_cols),
(variance_inflation_factor(X.values, j) \
for j in range(1, X.values.shape[1]))
))
not_all_vifs_less_than_thres =\
not (np.array(list(vifs.values())) < vif_thres).all()
features = [int_name]
features.extend(all_cols)
if show_progress:
msg2 = 'Features used: {0}'.format(features)
print(msg2)
mod_reduced =\
sm.GLM(
train[response],
train.loc[:, features],
family = sm.families.Binomial()
)\
.fit()
return {'model': mod_reduced, 'vifs': vifs}
def model_matrix(data, formula):
'''(pd.DataFrame, str) -> pd.DataFrame
Design data according to formula.
'''
name_df =\
lambda df: pd.DataFrame(df, columns = df.design_info.column_names)
response, features = dmatrices(formula, data)
response = name_df(response)
features = name_df(features)
response_name = response.columns[0]
features.insert(0, response_name, response)
return features
def mutate(data, colname, lambd = None, lambd_df = None):
'''(pd.DataFrame, str[, (str, function), function]) -> pd.DataFrame
Add a column named as the value of colname that is obtained by lambd
or lambd_df. lambd is a tuple of str and function that is applied for
each element in a selected Series of df; lambd_df is a function that
applies to the entire df. If lambd is specified, then lambd_df is
ignored.
>>> df = pd.DataFrame({
'basiscol': ['aba', 'bba', 'cce'],
'extra': [1, 2, 3]
})
>>> df1 =\\
... mutate(
... df,
... 'newcolname',
... ('basiscol', lambda x: x[:2])
... )
...
>>> df2 =\\
... mutate(
... df,
... 'newcolname',
... lambd_df = lambda x: x['basiscol'].apply(lambda y: y[:2])
... )
...
>>> df1.equals(df2)
True
'''
df_cp = data.copy()
assert not (lambd is None and lambd_df is None), \
'Either one of lambd or lambd_df has to be specified.'
if lambd is not None:
df_cp[colname] = df_cp[lambd[0]].apply(lambd[1])
elif lambd_df is not None:
df_cp[colname] = lambd_df(df_cp)
return df_cp
def plot_rl(mod, num_breaks = None, breaks = None,
xlab = 'Linear predictor',
ylab = 'Deviance residuals'):
'''(sm.GLMResultsWrapper[, int, np.array, str, str]) -> None
Plot the means of grouped residuals vs. linear predictors of mod.
Specify `num_breaks` to divide linear predictors into that much of
intervals of equal length.
Specify `breaks` to have different bins for linear predictors;
`num_breaks` is ignored if `breaks` is specified.
'''
logit = lambda p: np.log(p / (1 - p))
residuals = mod.resid_deviance
linpred = logit(mod.predict())
if breaks is None:
if num_breaks is None:
num_breaks = int(len(residuals) / 50)
breaks = np.unique(
np.quantile(linpred, np.linspace(0, 1, num = num_breaks + 1))
)
bins = pd.cut(linpred, breaks)
df = pd.DataFrame(
{'residuals': residuals, 'linpred': linpred, 'bins': bins}
)
df = df.groupby('bins')\
.agg(
residuals = ('residuals', 'mean'),
linpred = ('linpred', 'mean')
)
plt.scatter(df['linpred'], df['residuals'])
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def simulate_nan(X, nan_rate):
'''(np.array, number) -> {str: np.array or number}
Preconditions:
1. np.isnan(X_complete).any() == False
2. 0 <= nan_rate <= 1
Return the dictionary with four keys where:
- Key 'X' stores a np.array where some of the entries in X
are replaced with np.nan based on nan_rate specified.
- Key 'C' stores a np.array where each entry is False if the
corresponding entry in the key 'X''s np.array is np.nan, and True
otherwise.
- Key 'nan_rate' stores nan_rate specified.
- Key 'nan_rate_actual' stores the actual proportion of np.nan
in the key 'X''s np.array.
'''
# Create C matrix; entry is False if missing, and True if observed
X_complete = X.copy()
nr, nc = X_complete.shape
C = np.random.random(nr * nc).reshape(nr, nc) > nan_rate
# Check for which i's we have all components become missing
checker = np.where(sum(C.T) == 0)[0]
if len(checker) == 0:
# Every X_i has at least one component that is observed,
# which is what we want
X_complete[C == False] = np.nan
else:
# Otherwise, randomly "revive" some components in such X_i's
for index in checker:
reviving_components = np.random.choice(
nc,
int(np.ceil(nc * np.random.random())),
replace = False
)
C[index, np.ix_(reviving_components)] = True
X_complete[C == False] = np.nan
return {
'X': X_complete,
'C': C,
'nan_rate': nan_rate,
'nan_rate_actual': np.sum(C == False) / (nr * nc)
}
def terbin_model(mod, train, p_thres = None, criterion = None,
ter_features = None, train_ter = None, **kwargs):
'''(sm.GLMResultsWrapper, pd.DataFrame
[, number, (str, float), [str], pd.DataFrame,
arguments to sm.MNLogit.fit(...)])
-> {str: results}
Precondition:
1. mod is fitted using train.
2. train contains the response column specified in mod.summary().
3. 0 < p_thres < 1
4. set(ter_features).issubset(set(train.columns)) if train_ter is None\
else set(ter_features).issubset(set(train_ter.columns))
Fit a compounded model, or a terbin (ternary-binary) model, based on
mod and train.
* If p_thres is None, then it uses the probability threshold that
yields the minimum distance between the ROC curve and the point
(fpr, tpr) = (0, 1); if p_thres is specified, then criterion
(used as an argument of get_p_thres()) is ignored.
* Specify ter_features to fit a multinomial logit model using those
features. If not specified, then the same formula as mod is used.
* If train_ter is specified, then this training set is used to fit a
multinomial logit model. If not specified, then train works as
train_ter.
'''
# Get the (binary) response column; len('Dep. Variable') == 14
response = get_response(mod)
# Checks
all_features_of_train = set(train.columns)
assert response in all_features_of_train, \
'train does not have the response "' + response + '" specified ' +\
'in mod.'
all_features_of_train.remove(response) # leave only predictors
mod_features = mod.cov_params().columns # features used in mod
mod_features_set = set(mod_features)
assert mod_features_set.issubset(all_features_of_train), \
'train does not have all the features used in mod; train ' +\
'requires the following: {0}'\
.format(list(mod_features_set.difference(all_features_of_train)))
mod_features = list(mod_features)
if ter_features is not None:
if train_ter is None:
assert set(ter_features).issubset(set(train.columns)), \
'ter_features must be a subset of train.columns if ' +\
'train_ter is not specified. train.columns requires ' +\
'the following: ' +\
str(list(set(ter_features).difference(set(train.columns))))
else:
assert set(ter_features).issubset(set(train_ter.columns)), \
'ter_features must be a subset of train_ter.columns if ' +\
'both train_features and train_ter are specified. ' +\
'train_ter.columns requires the following: ' +\
str(list(set(ter_features).difference(set(train_ter.columns))))
else:
ter_features = mod_features
train_ter = train if train_ter is None else train_ter
# Compute p_thres if not specified
if p_thres is None:
roc_tbl = produce_roc_table(mod, train)
p_thres = get_p_thres(roc_tbl, criterion)
# Ternary model
actuals = train[response].values
preds = mod.predict(train[mod_features]).values
response_ter = determine_type(actuals, preds, p_thres)
mod_ter =\
sm.MNLogit(response_ter, train_ter[ter_features])\
.fit(**kwargs)
# Get p_thres_fn and p_thres_fp
p_thres_fn = np.quantile(
mod.predict(train.loc[response_ter == 'fn', mod_features]),
.1
)
p_thres_fp = np.quantile(
mod.predict(train.loc[response_ter == 'fp', mod_features]),
.9
)
return {
'mod_ternary': [mod_ter, response_ter, train_ter[ter_features]],
'mod_binary': [mod, train[response], train[mod_features]],
'p_threses': np.array([p_thres_fn, p_thres, p_thres_fp])
}
| 3,133 | 0 | 307 |
d464ec8b2a69b1c7cc0adb6879fa792dc9074552 | 7,044 | py | Python | wsserver.py | stanleykao72/Deepfake-Detection | 417de0a0c7756397cf3a611b26008b7ed64727e9 | [
"Apache-2.0"
] | 1 | 2020-09-30T09:33:28.000Z | 2020-09-30T09:33:28.000Z | wsserver.py | stanleykao72/Deepfake-Detection | 417de0a0c7756397cf3a611b26008b7ed64727e9 | [
"Apache-2.0"
] | null | null | null | wsserver.py | stanleykao72/Deepfake-Detection | 417de0a0c7756397cf3a611b26008b7ed64727e9 | [
"Apache-2.0"
] | null | null | null | import asyncio
import os
import secrets
import weakref
import aiohttp.web
from aiohttp import web
import aioredis
from aiohttp import WSCloseCode
import json
from detector_inference import detector_inference
import logging
LOGGING_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
DATE_FORMAT = '%Y%m%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT, datefmt=DATE_FORMAT)
logger = logging.getLogger(__name__)
WS_DOMAIN = os.getenv("WS_DOMAIN", "localhost")
WS_HOST = os.getenv("WS_HOST", "0.0.0.0")
WS_PORT = int(os.getenv("WS_PORT", 9999))
routes = web.RouteTableDef()
# app.router.add_get("/ws/{channel_id}", ws_handler)
@routes.get('/ws/{channel_id}')
# app.router.add_get("/api/rtm.connect", wsticket_handler)
@routes.get('/api/rtm.connect')
# app.router.add_post("/api/rtm.push/{channel_id}", wspush_handler)
@routes.post('/api/rtm.push/{channel_id}')
@aiohttp.web.middleware
if __name__ == "__main__":
main()
| 35.575758 | 167 | 0.642675 | import asyncio
import os
import secrets
import weakref
import aiohttp.web
from aiohttp import web
import aioredis
from aiohttp import WSCloseCode
import json
from detector_inference import detector_inference
import logging
LOGGING_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
DATE_FORMAT = '%Y%m%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT, datefmt=DATE_FORMAT)
logger = logging.getLogger(__name__)
WS_DOMAIN = os.getenv("WS_DOMAIN", "localhost")
WS_HOST = os.getenv("WS_HOST", "0.0.0.0")
WS_PORT = int(os.getenv("WS_PORT", 9999))
routes = web.RouteTableDef()
# app.router.add_get("/ws/{channel_id}", ws_handler)
@routes.get('/ws/{channel_id}')
async def ws_handler(request):
channel_id = request.match_info.get("channel_id")
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(request)
# 我們直接將連線物件 reference 存放在程式記憶體區塊中
request.app["websockets"][channel_id] = ws
logger.info(f"Client connected: {channel_id}")
try:
await asyncio.gather(
listen_to_websocket(ws, request.app, channel_id), listen_to_redis(request.app, channel_id), return_exceptions=True
)
finally:
request.app["websockets"].pop(channel_id, None)
logger.info("Websocket connection closed")
# app.router.add_get("/api/rtm.connect", wsticket_handler)
@routes.get('/api/rtm.connect')
async def wsticket_handler(request):
# 實作可在此檢查認證資訊, 通過後產生一個時效性 ticket, 儲存至認證用快取/資料庫等, 供其他服務查閱
# 最後回覆 websocket 的時效性 url
channel_id = secrets.token_urlsafe(32)
logger.info(f"/api/rtm.connect: channel_id -> {channel_id}")
ws_url = (
f"ws://{WS_DOMAIN}:{WS_PORT}/ws/{channel_id}"
) # 生產環境應為加密的 wss
logger.info(f"/api/rtm.connect: ws_url -> {ws_url}")
return aiohttp.web.json_response({"url": ws_url, "channel_id": channel_id})
# app.router.add_post("/api/rtm.push/{channel_id}", wspush_handler)
@routes.post('/api/rtm.push/{channel_id}')
async def wspush_handler(request):
channel_id = request.match_info.get("channel_id")
ch_name = f"ch:{channel_id}"
data = await request.text()
conn = await request.app["redis_pool"].acquire()
await conn.execute("publish", ch_name, data)
logger.info("#" * 50)
logger.info(f"Message pushed to {channel_id} data is: {data}")
logger.info("#" * 50)
raise aiohttp.web.HTTPOk()
async def listen_to_websocket(ws, app, channel_id):
# logger.info("listen_to_websocket...begin")
# logger.info(f'ws in listen_to_websocket: {ws}')
try:
async for msg in ws:
# 可在此實作從 client 接收訊息時的處理邏輯
params = json.loads(msg.data)
model_name = params["model_name"]
video_path = params["video_path"]
model_path = params["model_path"]
output_path = params["output_path"]
threshold = params['threshold']
cam = params['cam']
start_frame = params["start_frame"]
end_frame = params["end_frame"]
cuda = params["cuda"]
chat_id = params["chat_id"]
message_id = params["message_id"]
predit_video = params["predit_video"]
cam_video = params["cam_video"]
cam_model = params["cam_model"]
await detector_inference(model_name, video_path, model_path, output_path, threshold, cam, cam_model, predit_video, cam_video, start_frame, end_frame, cuda)
# if model_name == 'SPPNet':
# logger.info("getting start SPPNet")
# await dsp_fwa_inference(video_path, model_path, output_path, threshold,
# start_frame=start_frame, end_frame=end_frame, cuda=False)
# logger.info("End SPPNet")
# if model_name == 'XceptionNet':
# logger.info("getting start XceptionNet")
# await deepfake_detection(video_path, model_path, output_path,
# start_frame=start_frame, end_frame=end_frame, cuda=False)
# logger.info("End XceptionNet")
# if model_name == 'EfficentnetB7':
# logger.info("getting start EfficentnetB7")
# await dsp_fwa_inference(video_path, model_path, output_path, threshold,
# start_frame=start_frame, end_frame=end_frame, cuda=False)
# logger.info("End EfficentnetB7")
# logger.info("Message received from client--msg.data: ", msg.data)
# logger.info("Message received from client--type: ", type(json.loads(msg.data)))
ws_client = app["websockets"][channel_id]
logger.info(f'ws_client in websocket: {ws_client}')
await ws_client.send_str('Done!!')
logger.info("#" * 50)
finally:
return ws
async def listen_to_redis(app, channel_id):
conn = await app["redis_pool"].acquire()
ch_name = f"ch:{channel_id}"
try:
await conn.execute_pubsub("subscribe", ch_name)
channel = conn.pubsub_channels[ch_name]
logger.info(f"Channel created: {ch_name}")
ws = app["websockets"][channel_id]
logger.info(f'ws in redis: {ws}')
while await channel.wait_message():
msg = await channel.get(encoding="utf-8")
logger.info(f'print redis msg:{msg}')
await ws.send_str(msg)
await conn.execute_pubsub("unsubscribe", ch_name)
except Exception as e:
logger.info(e)
# except (asyncio.CancelledError, asyncio.TimeoutError):
# pass
async def on_startup(app):
# 建立 Redis connection pool
address = ('localhost', 6379)
encoding = 'utf-8'
app["redis_pool"] = await aioredis.create_pool(
#"redis://localhost:6379?encoding=utf-8",
address=address,
encoding=encoding,
create_connection_timeout=1.5,
minsize=1,
maxsize=1000,
)
async def on_shutdown(app):
# 關閉所有 websockets 與 Redis 連線
await app["redis_pool"].wait_closed()
for ws in app["websockets"].values():
await ws.close(code=WSCloseCode.GOING_AWAY, message="Server shutdown")
@aiohttp.web.middleware
async def auth_middleware(request, handler):
if request.path.startswith("/api/"):
logger.info(f"check token....for {handler}")
pass
# Token 驗證:
# 驗證失敗:
# raise aiohttp.web.HTTPForbidden()
return await handler(request)
def main():
app = aiohttp.web.Application(middlewares=[auth_middleware])
# 建立一個 reference dict 準備關聯全部 ws 連線物件, key 為 {channel_id}
app["websockets"] = weakref.WeakValueDictionary()
app.add_routes(routes)
#app.router.add_get("/ws/{channel_id}", ws_handler)
#app.router.add_get("/api/rtm.connect", wsticket_handler)
#app.router.add_post("/api/rtm.push/{channel_id}", wspush_handler)
app.on_startup.append(on_startup)
app.on_shutdown.append(on_shutdown)
aiohttp.web.run_app(app, host=WS_HOST, port=WS_PORT)
if __name__ == "__main__":
main()
| 6,126 | 0 | 203 |
28fad9c62f12f1efa9baf4ec914dbb8dd458948e | 3,337 | py | Python | xai_court/interpret/saliency_interpreters/attention.py | michaeljneely/court-of-xai | 37eded49f46b3a05ad56986c1a9bb22eee3ac4b1 | [
"MIT"
] | 4 | 2021-05-07T09:40:11.000Z | 2022-03-27T18:19:07.000Z | xai_court/interpret/saliency_interpreters/attention.py | michaeljneely/court-of-xai | 37eded49f46b3a05ad56986c1a9bb22eee3ac4b1 | [
"MIT"
] | 1 | 2021-05-10T09:31:05.000Z | 2021-05-10T09:31:05.000Z | xai_court/interpret/saliency_interpreters/attention.py | michaeljneely/court-of-xai | 37eded49f46b3a05ad56986c1a9bb22eee3ac4b1 | [
"MIT"
] | 1 | 2021-06-06T18:45:39.000Z | 2021-06-06T18:45:39.000Z | from typing import List, Dict, Iterable, Optional, Type, Union
import logging
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from xai_court.config import Config
from xai_court.models.modules.attention.attention import AttentionAnalysisMethods, AttentionAggregator
class AttentionModelPredictor():
"""
Interface for predictors with models that are to be interpreted through their attention mechanism.
"""
def get_attention_based_salience_for_instance(
self,
labeled_instance: Instance,
analysis_method: AttentionAnalysisMethods,
aggregate_method: AttentionAggregator
) -> Dict[str, Iterable[float]]:
"""
Returns a dictionary with for each TextField in the instance, an iterable with the attention paid
to the tokens in that field.
"""
raise NotImplementedError()
def get_suitable_aggregators(self) -> Iterable[Type[Union[None, AttentionAggregator]]]:
"""
Returns one or more suitable aggregator types, if no aggregation is necessary the iterable
should include NoneType.
"""
raise NotImplementedError()
@SaliencyInterpreter.register("attention-interpreter")
| 37.920455 | 120 | 0.697333 | from typing import List, Dict, Iterable, Optional, Type, Union
import logging
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from xai_court.config import Config
from xai_court.models.modules.attention.attention import AttentionAnalysisMethods, AttentionAggregator
class AttentionModelPredictor():
"""
Interface for predictors with models that are to be interpreted through their attention mechanism.
"""
def get_attention_based_salience_for_instance(
self,
labeled_instance: Instance,
analysis_method: AttentionAnalysisMethods,
aggregate_method: AttentionAggregator
) -> Dict[str, Iterable[float]]:
"""
Returns a dictionary with for each TextField in the instance, an iterable with the attention paid
to the tokens in that field.
"""
raise NotImplementedError()
def get_suitable_aggregators(self) -> Iterable[Type[Union[None, AttentionAggregator]]]:
"""
Returns one or more suitable aggregator types, if no aggregation is necessary the iterable
should include NoneType.
"""
raise NotImplementedError()
@SaliencyInterpreter.register("attention-interpreter")
class AttentionInterpreter(SaliencyInterpreter):
def __init__(
self,
predictor: AttentionModelPredictor,
analysis_method: AttentionAnalysisMethods,
aggregate_method: Optional[AttentionAggregator] = None
):
"""
"""
if not isinstance(predictor, AttentionModelPredictor):
raise TypeError("predictor must be of :class:`~.interpret.saliency_interpreters.AttentionModelPredictor`")
super().__init__(predictor)
self.logger = logging.getLogger(Config.logger_name)
self.analysis_method = analysis_method
# Make sure aggregate_method is suitable for predictor
if not any(isinstance(aggregate_method, suitable) for suitable in predictor.get_suitable_aggregators()):
self.logger.warning("The supplied aggregator is not suitable for this predictor!")
self.aggregate_method = aggregate_method
agg_method = f"{self.aggregate_method.id}_" if self.aggregate_method else ""
self._id = f"attn_{agg_method}{self.analysis_method.value}"
@property
def id(self):
return self._id
def saliency_interpret_instances(self, labeled_instances: Iterable[Instance]) -> JsonDict:
instances_with_attn = dict()
for i_idx, instance in enumerate(labeled_instances):
attn_scores = self.predictor.get_attention_based_salience_for_instance(
instance,
analysis_method=self.analysis_method,
aggregate_method=self.aggregate_method
)
instances_with_attn[f'instance_{i_idx+1}'] = {}
# AllenNLP SaliencyInterpreters index the input sequences in reverse order.
for f_idx, field in enumerate(reversed(list(attn_scores.keys()))):
instances_with_attn[f'instance_{i_idx+1}'][f'{self.analysis_method}_{f_idx}'] = list(attn_scores[field])
return sanitize(instances_with_attn)
| 837 | 1,138 | 22 |
ad543e43ec057bc0f0a91c2c2ade3000ce81112d | 1,267 | py | Python | main.py | Davnit/capidaptor | 9e03c263fed9555d5863114fd84deb195795d0e4 | [
"MIT"
] | 5 | 2018-10-06T10:00:43.000Z | 2020-02-02T03:49:53.000Z | main.py | Davnit/capidaptor | 9e03c263fed9555d5863114fd84deb195795d0e4 | [
"MIT"
] | 3 | 2018-10-01T11:43:00.000Z | 2019-10-30T00:31:06.000Z | main.py | Davnit/capidaptor | 9e03c263fed9555d5863114fd84deb195795d0e4 | [
"MIT"
] | 3 | 2018-10-01T06:57:58.000Z | 2018-10-04T03:19:38.000Z | #!/usr/bin/env python3
from server import Server
import argparse
parser = argparse.ArgumentParser(prog='capidaptor')
parser.add_argument('--interface', help='Specifies the interface and port to listen on')
parser.add_argument('--debug', help='Enables debugging mode', action='store_true')
parser.add_argument('--ignore-unsupported', help='Silently drops unsupported commands', action='store_true')
parser.add_argument('--do-version-check', help='Sends version check requests to clients.', action='store_true')
parser.add_argument('--out-format', help='Specifies the format to use when printing console messages.')
parser.add_argument('--debug-format', help='Specifies the format to use when printing debug messages.')
args = parser.parse_args()
if args.interface is None:
s = Server()
else:
if ':' in args.interface:
iface = tuple(args.interface.split(':', maxsplit=1))
s = Server(iface[1], iface[0])
else:
s = Server(args.interface)
if args.debug:
s.debug = True
if args.ignore_unsupported:
s.ignore_unsupported_commands = True
if args.do_version_check:
s.do_version_check = True
if args.out_format:
s.out_format = args.out_format
if args.debug_format:
s.debug_format = args.debug_format
s.start()
| 29.465116 | 111 | 0.729282 | #!/usr/bin/env python3
from server import Server
import argparse
parser = argparse.ArgumentParser(prog='capidaptor')
parser.add_argument('--interface', help='Specifies the interface and port to listen on')
parser.add_argument('--debug', help='Enables debugging mode', action='store_true')
parser.add_argument('--ignore-unsupported', help='Silently drops unsupported commands', action='store_true')
parser.add_argument('--do-version-check', help='Sends version check requests to clients.', action='store_true')
parser.add_argument('--out-format', help='Specifies the format to use when printing console messages.')
parser.add_argument('--debug-format', help='Specifies the format to use when printing debug messages.')
args = parser.parse_args()
if args.interface is None:
s = Server()
else:
if ':' in args.interface:
iface = tuple(args.interface.split(':', maxsplit=1))
s = Server(iface[1], iface[0])
else:
s = Server(args.interface)
if args.debug:
s.debug = True
if args.ignore_unsupported:
s.ignore_unsupported_commands = True
if args.do_version_check:
s.do_version_check = True
if args.out_format:
s.out_format = args.out_format
if args.debug_format:
s.debug_format = args.debug_format
s.start()
| 0 | 0 | 0 |
4e91758f971a4d37da7cfb22510a76d626e4c3c8 | 8,883 | py | Python | tests/test_client.py | pasystem/pyqiwi | 138ae75350ef50ed7643e3ab9ae4b8e6e7c81663 | [
"MIT"
] | 17 | 2016-10-28T11:25:54.000Z | 2021-03-07T20:23:59.000Z | tests/test_client.py | pasystem/pyqiwi | 138ae75350ef50ed7643e3ab9ae4b8e6e7c81663 | [
"MIT"
] | 1 | 2016-10-28T11:36:08.000Z | 2016-10-31T16:40:16.000Z | tests/test_client.py | pasystem/pyqiwi | 138ae75350ef50ed7643e3ab9ae4b8e6e7c81663 | [
"MIT"
] | 3 | 2017-02-06T15:49:00.000Z | 2017-04-15T21:52:39.000Z | # coding: utf-8
from datetime import datetime
from decimal import Decimal
from unittest import TestCase
import httpretty
from pyqiwi import QiwiError, Qiwi
@httpretty.activate
| 32.184783 | 125 | 0.547675 | # coding: utf-8
from datetime import datetime
from decimal import Decimal
from unittest import TestCase
import httpretty
from pyqiwi import QiwiError, Qiwi
class QiwiErrorTestCase(TestCase):
def test_error_code(self):
error = QiwiError(143)
self.assertEqual(error.code, 143)
@httpretty.activate
class QiwiClientTestCase(TestCase):
shop_id = '123'
api_id = '456'
api_password = '123qwe'
notification_password = 'qwe123'
def setUp(self):
self.client = Qiwi(self.shop_id, self.api_id, self.api_password, self.notification_password)
def tearDown(self):
httpretty.reset()
def parse(self, data):
if isinstance(data, bytes):
data = data.decode('utf-8')
return dict(map(lambda x: x.split('='), data.split('&')))
def test__get_invoice_url(self):
self.assertEqual(
self.client._get_invoice_url('10001'),
'https://api.qiwi.com/api/v2/prv/123/bills/10001'
)
def test__get_refund_url(self):
self.assertEqual(
self.client._get_refund_url('1', '002'),
'https://api.qiwi.com/api/v2/prv/123/bills/1/refund/002'
)
def test_url_encode(self):
encoded = self.client._urlencode({
'foo': 'bar',
'ext': '',
'user': 'tel:+79998887766',
})
self.assertEqual(self.parse(encoded), {
'foo': 'bar',
'user': 'tel%3A%2B79998887766',
})
def test_make_auth(self):
self.assertEqual(
self.client._make_auth('user1', 'password'),
b'Basic dXNlcjE6cGFzc3dvcmQ='
)
self.assertEqual(
self.client._make_auth('123456', 'zLQkZDdRvBNUkf9spassword'),
b'Basic MTIzNDU2OnpMUWtaRGRSdkJOVWtmOXNwYXNzd29yZA=='
)
def test__make_signature(self):
signature = self.client._make_signature({
'b': 'bar',
'a': 'foo',
'some': 'param',
'comment': u'Заказ №101'
})
self.assertEqual(signature, b'7nHZIf/w6DLq+CuvzV2BmhT71xA=')
def test__request(self):
url = 'http://example.com'
auth = self.client._make_auth(self.api_id, self.api_password).decode('utf-8')
httpretty.register_uri(httpretty.GET, url, '{"response": {"result_code": 0}}')
response = self.client._request(url)
request = httpretty.HTTPretty.last_request
self.assertEqual(response, {'result_code': 0})
self.assertEqual(request.headers.get('Accept'), 'application/json')
self.assertEqual(request.headers.get('Authorization'), auth)
httpretty.register_uri(httpretty.PUT, url, '{"response": {"result_code": 0}}')
response = self.client._request(url, {'user': 'tel:+79998887766'})
request = httpretty.HTTPretty.last_request
self.assertEqual(response, {'result_code': 0})
self.assertEqual(request.headers.get('Accept'), 'application/json')
self.assertEqual(request.headers.get('Authorization'), auth)
self.assertEqual(request.headers.get('Content-Type'), 'application/x-www-form-urlencoded')
self.assertEqual(request.body, b'user=tel%3A%2B79998887766')
httpretty.reset()
httpretty.register_uri(
httpretty.GET, url, '{"response": {"result_code": 33}}', status=400)
try:
self.client._request(url)
except QiwiError as e:
self.assertEqual(e.code, 33)
else:
self.fail('QiwiError not raised')
def test_create_invoice(self):
invoice_id = '101'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.PUT, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "101"
}
}
}""")
invoice = self.client.create_invoice(
invoice_id=invoice_id,
amount=Decimal('22.00'),
currency='RUB',
comment='Order #101',
user='tel:+79998887766',
lifetime=datetime(2017, 1, 2, 15, 22, 33),
)
self.assertEqual(invoice, {'invoice_id': '101'})
self.assertEqual(self.parse(httpretty.HTTPretty.last_request.body), {
'amount': '22.00',
'ccy': 'RUB',
'comment': 'Order+%23101',
'user': 'tel%3A%2B79998887766',
'lifetime': '2017-01-02T15%3A22%3A33',
})
def test_cancel_invoice(self):
invoice_id = '102'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.PATCH, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "102",
"status": "rejected"
}
}
}""")
invoice = self.client.cancel_invoice(invoice_id)
self.assertEqual(invoice, {
'invoice_id': '102',
'status': "rejected",
})
self.assertEqual(
httpretty.HTTPretty.last_request.body,
b'status=rejected'
)
def test_get_invoice(self):
invoice_id = '103'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.GET, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "103",
"status": "paid"
}
}
}""")
invoice = self.client.get_invoice(invoice_id)
self.assertEqual(invoice, {
'invoice_id': '103',
'status': "paid",
})
def test_create_refund(self):
invoice_id = '104'
refund_id = '1'
url = self.client._get_refund_url(invoice_id, refund_id)
httpretty.register_uri(httpretty.PUT, url, body="""{
"response": {
"result_code": 0,
"refund": {
"invoice_id": "104",
"refund_id": "1",
"amount": "100.00"
}
}
}""")
refund = self.client.create_refund(invoice_id, refund_id, Decimal('100.00'))
self.assertEqual(refund, {
'invoice_id': '104',
'refund_id': '1',
'amount': '100.00',
})
self.assertEqual(
httpretty.HTTPretty.last_request.body,
b'amount=100.00'
)
def test_get_refund(self):
invoice_id = '105'
refund_id = '1'
url = self.client._get_refund_url(invoice_id, refund_id)
httpretty.register_uri(httpretty.GET, url, body="""{
"response": {
"result_code": 0,
"refund": {
"invoice_id": "104",
"refund_id": "1",
"amount": "100.00",
"status": "fail"
}
}
}""")
refund = self.client.get_refund(invoice_id, refund_id)
self.assertEqual(refund, {
'invoice_id': '104',
'refund_id': '1',
'amount': '100.00',
'status': 'fail',
})
def test_get_invoice_url(self):
url = self.client.get_invoice_url('106')
expected = 'https://bill.qiwi.com/order/external/main.action?' + self.client._urlencode({
'shop': self.client.shop_id,
'transaction': '106',
})
self.assertEqual(url, expected)
url = self.client.get_invoice_url('107', True, 'http://google.com/success', 'http://google.com/fail', 'iframe', 'qw')
expected = 'https://bill.qiwi.com/order/external/main.action?' + self.client._urlencode({
'shop': self.client.shop_id,
'transaction': '107',
'iframe': True,
'success_url': 'http://google.com/success',
'fail_url': 'http://google.com/fail',
'target': 'iframe',
'pay_source': 'qw',
})
self.assertEqual(url, expected)
def test_check_auth(self):
self.assertFalse(self.client.check_auth(''))
self.assertFalse(self.client.check_auth(None))
self.assertFalse(self.client.check_auth(b'Basic MTExOjIyMg=='))
self.assertTrue(self.client.check_auth(b'Basic MTIzOnF3ZTEyMw=='))
def test_check_signature(self):
self.assertFalse(self.client.check_signature('', {}))
self.assertFalse(self.client.check_signature('', {'foo': 'bar'}))
self.assertFalse(self.client.check_signature(b'W18ltrPJoSb2N7AEM5Iik02wE10=', {'foo': '111'}))
self.assertTrue(self.client.check_signature(b'4C8pyw0rweDE0gZDYWT3E1B92aQ=', {
'foo': 'bar',
'commend': u'Заказ №102',
}))
| 8,055 | 590 | 71 |
619fabdffb06414f6acd82749f20ecc78ce52e20 | 1,206 | py | Python | frontmatter/__init__.py | kylekirkby/frontmatter | 68bfd0f76bd4ddeb60fc7c28320db03490c9a516 | [
"ISC"
] | null | null | null | frontmatter/__init__.py | kylekirkby/frontmatter | 68bfd0f76bd4ddeb60fc7c28320db03490c9a516 | [
"ISC"
] | null | null | null | frontmatter/__init__.py | kylekirkby/frontmatter | 68bfd0f76bd4ddeb60fc7c28320db03490c9a516 | [
"ISC"
] | null | null | null | import re
import yaml
| 29.414634 | 74 | 0.566335 | import re
import yaml
class Frontmatter:
_yaml_delim = r'(?:---|\+\+\+)'
_yaml = r'(.*?)'
_content = r'\s*(.+)$'
_re_pattern = r'^\s*' + _yaml_delim + _yaml + _yaml_delim + _content
_regex = re.compile(_re_pattern, re.S | re.M)
@classmethod
def read_file(cls, path):
"""Reads file at path and returns dict with separated frontmatter.
See read() for more info on dict return value.
"""
with open(path, encoding="utf-8") as file:
file_contents = file.read()
return cls.read(file_contents)
@classmethod
def read(cls, string):
"""Returns dict with separated frontmatter from string.
Returned dict keys:
attributes -- extracted YAML attributes in dict form.
body -- string contents below the YAML separators
frontmatter -- string representation of YAML
"""
fmatter = ""
body = ""
result = cls._regex.search(string)
if result:
fmatter = result.group(1)
body = result.group(2)
return {
"attributes": yaml.load(fmatter),
"body": body,
"frontmatter": fmatter,
}
| 0 | 1,161 | 23 |
c47be3b1dc8ec4a07a8b95d594288ba031947a6e | 7,895 | py | Python | src/fastnn/processors/cv/object_detection.py | aychang95/fastnn | 93f02b860845959a8c625f6c99267f103756318b | [
"MIT"
] | 7 | 2020-12-03T07:04:47.000Z | 2022-03-25T11:51:15.000Z | src/fastnn/processors/cv/object_detection.py | aychang95/fastnn | 93f02b860845959a8c625f6c99267f103756318b | [
"MIT"
] | null | null | null | src/fastnn/processors/cv/object_detection.py | aychang95/fastnn | 93f02b860845959a8c625f6c99267f103756318b | [
"MIT"
] | null | null | null | import logging
from typing import Optional, List, Union, Tuple, Callable
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import ConvertImageDtype
from PIL import Image, ImageDraw
from PIL import ImageFont
import numpy as np
from fastnn.utils.cv import ImageDataset
from fastnn.processors.base_processing import Processor
logger = logging.getLogger(__name__)
class ObjectDetectionProcessor(Processor):
"""Object Detection processor dealing with image files or 3xHxW formatted images and boxes, scores, labels out processing.
Since most resizing and padding transforms are done by the object detection models in PyTorch, datasets and dataloaders willl
generate batches of images as lists.
Usage:
```python
>>> processor = ObjectDetectionProcessor()
>>> processor.process(file_paths=["file_path.png"])
**Parameters:**
* **label_strings** - List of strings that specify label strings with index as key for this specific processor
```
"""
def process(
self,
dir_path: str,
transforms: Optional[Callable] = ConvertImageDtype(torch.float),
) -> Dataset:
"""Generate torch `Dataset` object from list of file paths or image Tensors.
This provides clear tensor input representations for compatible models.
Returns a Dataset
* **dir_path** - String path to directory of images you'd like to process
"""
dataset = ImageDataset(root=dir_path, transforms=transforms)
return dataset
def process_batch(
self,
dir_path: str,
transforms: Optional[Callable] = ConvertImageDtype(torch.float),
mini_batch_size: int = 8,
use_gpu: bool = False,
) -> DataLoader:
"""Generate torch `Dataloader` object from data directory path.
This provides clear tensor input representations for compatible models.
Returns a `Dataloader`
* **dir_path** - String path to directory of images you'd like to process
* **mini_batch_size** - Batch size for inference
* **use_gpu** - Bool for using gpu or cpu. If set True but no gpu devices available, model will default to using cpu
"""
if use_gpu:
if torch.cuda.is_available():
device = torch.device("cuda")
else:
logger.info("GPU not available")
device = torch.device("cpu")
else:
device = torch.device("cpu")
dataset = self.process(dir_path=dir_path, transforms=transforms)
# Instead of a tensor batch, the lambda collate_fn will provide a list batch
dataloader = DataLoader(
dataset,
batch_size=mini_batch_size,
collate_fn=lambda x: [[t.to(device) for t in self._od_collate_fn(x)]],
)
return dataloader
def process_output_batch(
self, outputs: List[List[torch.Tensor]], dataset: Dataset
) -> List[List[Tuple[torch.Tensor, np.array]]]:
"""Process output of object detection model into human legible results.
Outputs from `FasterRCNNModule`
Returns batched results of list of list of tuples containing boxed images in tensor and numpy format
* **outputs** - List of batch output tensors from a model's forward pass
* **dataset** - Corresponding dataset with originial images matched with model outputs
"""
# Labeled Images
results = []
for idx, out in enumerate(outputs):
labeled_images = []
for label_idx in range(1, len(out), 3):
labels = [self.label_strings[o] for o in out[label_idx]]
unique_labels = set(labels)
label_colors_map = {}
for label in unique_labels:
label_colors_map[label] = tuple(
np.random.choice(range(256), size=3)
)
label_colors = [label_colors_map[label] for label in labels]
output_tensor, output_numpy = self.draw_bounding_boxes(
ConvertImageDtype(torch.uint8)(
dataset[idx * (len(out) // 3) + label_idx // 3]
),
out[label_idx - 1],
labels=labels,
colors=label_colors,
)
labeled_images.append((output_tensor, output_numpy))
results.append(labeled_images)
return results
def _od_collate_fn(self, data):
"""Custom collate fn to output dynamic image batches without same-dim requirements via. `stack`.
This is not technically a "correct" collate_fn for most of torch's vision models. Should be wrapped as a list
in the lambda collate fn.
"""
data = [img for img in data]
return data
@torch.no_grad()
def draw_bounding_boxes(
self,
image: torch.Tensor,
boxes: torch.Tensor,
labels: Optional[List[str]] = None,
colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
width: int = 1,
font: Optional[str] = "arial.ttf",
font_size: int = 10,
) -> Tuple[torch.Tensor, np.array]:
"""
Added and modified from TorchVision utils.
Draws bounding boxes on given image.
The values of the input image should be uint8 between 0 and 255.
Args:
image (Tensor): Tensor of shape (C x H x W)
bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
`0 <= ymin < ymax < H`.
labels (List[str]): List containing the labels of bounding boxes.
colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
be represented as `str` or `Tuple[int, int, int]`.
width (int): Width of bounding box.
font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
`/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
font_size (int): The requested font size in points.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Tensor expected, got {type(image)}")
elif image.dtype != torch.uint8:
raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
elif image.dim() != 3:
raise ValueError("Pass individual images, not batches")
ndarr = image.permute(1, 2, 0).numpy()
img_to_draw = Image.fromarray(ndarr)
img_boxes = boxes.to(torch.int64).tolist()
draw = ImageDraw.Draw(img_to_draw)
pixel_ratio = max(1, (max(ndarr.shape[0], ndarr.shape[1]) // 1000))
for i, bbox in enumerate(img_boxes):
color = None if colors is None else colors[i]
draw.rectangle(bbox, width=width * pixel_ratio, outline=color)
if labels is not None:
txt_font = (
ImageFont.load_default()
if font is None
else ImageFont.truetype(font=font, size=font_size * pixel_ratio)
)
draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1), np.array(
img_to_draw
)
| 36.892523 | 129 | 0.610133 | import logging
from typing import Optional, List, Union, Tuple, Callable
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import ConvertImageDtype
from PIL import Image, ImageDraw
from PIL import ImageFont
import numpy as np
from fastnn.utils.cv import ImageDataset
from fastnn.processors.base_processing import Processor
logger = logging.getLogger(__name__)
class ObjectDetectionProcessor(Processor):
"""Object Detection processor dealing with image files or 3xHxW formatted images and boxes, scores, labels out processing.
Since most resizing and padding transforms are done by the object detection models in PyTorch, datasets and dataloaders willl
generate batches of images as lists.
Usage:
```python
>>> processor = ObjectDetectionProcessor()
>>> processor.process(file_paths=["file_path.png"])
**Parameters:**
* **label_strings** - List of strings that specify label strings with index as key for this specific processor
```
"""
def __init__(self, label_strings: List[str]):
self.label_strings = label_strings
def process(
self,
dir_path: str,
transforms: Optional[Callable] = ConvertImageDtype(torch.float),
) -> Dataset:
"""Generate torch `Dataset` object from list of file paths or image Tensors.
This provides clear tensor input representations for compatible models.
Returns a Dataset
* **dir_path** - String path to directory of images you'd like to process
"""
dataset = ImageDataset(root=dir_path, transforms=transforms)
return dataset
def process_batch(
self,
dir_path: str,
transforms: Optional[Callable] = ConvertImageDtype(torch.float),
mini_batch_size: int = 8,
use_gpu: bool = False,
) -> DataLoader:
"""Generate torch `Dataloader` object from data directory path.
This provides clear tensor input representations for compatible models.
Returns a `Dataloader`
* **dir_path** - String path to directory of images you'd like to process
* **mini_batch_size** - Batch size for inference
* **use_gpu** - Bool for using gpu or cpu. If set True but no gpu devices available, model will default to using cpu
"""
if use_gpu:
if torch.cuda.is_available():
device = torch.device("cuda")
else:
logger.info("GPU not available")
device = torch.device("cpu")
else:
device = torch.device("cpu")
dataset = self.process(dir_path=dir_path, transforms=transforms)
# Instead of a tensor batch, the lambda collate_fn will provide a list batch
dataloader = DataLoader(
dataset,
batch_size=mini_batch_size,
collate_fn=lambda x: [[t.to(device) for t in self._od_collate_fn(x)]],
)
return dataloader
def process_output(
self,
):
pass
def process_output_batch(
self, outputs: List[List[torch.Tensor]], dataset: Dataset
) -> List[List[Tuple[torch.Tensor, np.array]]]:
"""Process output of object detection model into human legible results.
Outputs from `FasterRCNNModule`
Returns batched results of list of list of tuples containing boxed images in tensor and numpy format
* **outputs** - List of batch output tensors from a model's forward pass
* **dataset** - Corresponding dataset with originial images matched with model outputs
"""
# Labeled Images
results = []
for idx, out in enumerate(outputs):
labeled_images = []
for label_idx in range(1, len(out), 3):
labels = [self.label_strings[o] for o in out[label_idx]]
unique_labels = set(labels)
label_colors_map = {}
for label in unique_labels:
label_colors_map[label] = tuple(
np.random.choice(range(256), size=3)
)
label_colors = [label_colors_map[label] for label in labels]
output_tensor, output_numpy = self.draw_bounding_boxes(
ConvertImageDtype(torch.uint8)(
dataset[idx * (len(out) // 3) + label_idx // 3]
),
out[label_idx - 1],
labels=labels,
colors=label_colors,
)
labeled_images.append((output_tensor, output_numpy))
results.append(labeled_images)
return results
def _od_collate_fn(self, data):
"""Custom collate fn to output dynamic image batches without same-dim requirements via. `stack`.
This is not technically a "correct" collate_fn for most of torch's vision models. Should be wrapped as a list
in the lambda collate fn.
"""
data = [img for img in data]
return data
@torch.no_grad()
def draw_bounding_boxes(
self,
image: torch.Tensor,
boxes: torch.Tensor,
labels: Optional[List[str]] = None,
colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
width: int = 1,
font: Optional[str] = "arial.ttf",
font_size: int = 10,
) -> Tuple[torch.Tensor, np.array]:
"""
Added and modified from TorchVision utils.
Draws bounding boxes on given image.
The values of the input image should be uint8 between 0 and 255.
Args:
image (Tensor): Tensor of shape (C x H x W)
bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
`0 <= ymin < ymax < H`.
labels (List[str]): List containing the labels of bounding boxes.
colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
be represented as `str` or `Tuple[int, int, int]`.
width (int): Width of bounding box.
font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
`/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
font_size (int): The requested font size in points.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Tensor expected, got {type(image)}")
elif image.dtype != torch.uint8:
raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
elif image.dim() != 3:
raise ValueError("Pass individual images, not batches")
ndarr = image.permute(1, 2, 0).numpy()
img_to_draw = Image.fromarray(ndarr)
img_boxes = boxes.to(torch.int64).tolist()
draw = ImageDraw.Draw(img_to_draw)
pixel_ratio = max(1, (max(ndarr.shape[0], ndarr.shape[1]) // 1000))
for i, bbox in enumerate(img_boxes):
color = None if colors is None else colors[i]
draw.rectangle(bbox, width=width * pixel_ratio, outline=color)
if labels is not None:
txt_font = (
ImageFont.load_default()
if font is None
else ImageFont.truetype(font=font, size=font_size * pixel_ratio)
)
draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1), np.array(
img_to_draw
)
| 99 | 0 | 54 |
e762b4876e49dc54cb1de3681ceb2f8ccaffd784 | 2,170 | py | Python | src/structs/linked_list.py | zelaznymarek/data_structures | e85e9b48a6fe9adace914d887568e692c4e2344b | [
"MIT"
] | null | null | null | src/structs/linked_list.py | zelaznymarek/data_structures | e85e9b48a6fe9adace914d887568e692c4e2344b | [
"MIT"
] | null | null | null | src/structs/linked_list.py | zelaznymarek/data_structures | e85e9b48a6fe9adace914d887568e692c4e2344b | [
"MIT"
] | null | null | null |
linked_list = LinkedList()
print(linked_list.size_of() == 0)
linked_list.insert_at_start(10)
linked_list.insert_at_start(13)
linked_list.insert_at_end(15)
linked_list.insert_at_end(27)
print(linked_list.head.data == 13)
print(linked_list.size_of() == 4)
linked_list.remove(100)
print(linked_list.size_of() == 4)
linked_list.remove(10)
print(linked_list.head.data == 13)
print(linked_list.size_of() == 3)
linked_list.remove(13)
print(linked_list.head.data == 15)
print(linked_list.size_of() == 2)
linked_list.remove(27)
print(linked_list.head.data == 15)
print(linked_list.size_of() == 1)
linked_list.remove(15)
print(linked_list.head is None)
print(linked_list.size_of() == 0)
| 23.333333 | 59 | 0.628571 | class Node:
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self):
self.head = None
self.size = 0
def insert_at_start(self, data):
new_node = Node(data)
self.size += 1
if not self.head:
self.head = new_node
else:
new_node.next_node = self.head
self.head = new_node
def insert_at_end(self, data):
new_node = Node(data)
self.size += 1
actual_node = self.head
while actual_node.next_node is not None:
actual_node = actual_node.next_node
actual_node.next_node = new_node
def traverse(self):
actual_node = self.head
while actual_node is not None:
print(actual_node.data, end=' ')
actual_node = actual_node.next_node
def remove(self, data):
if self.head is None:
return
actual_node = self.head
previous_node = None
while actual_node.data != data:
if not actual_node.next_node:
print(f'Value {data} not found.')
return
previous_node = actual_node
actual_node = actual_node.next_node
self.size -= 1
if previous_node:
previous_node.next_node = actual_node.next_node
else:
self.head = actual_node.next_node
def size_of(self):
return self.size
linked_list = LinkedList()
print(linked_list.size_of() == 0)
linked_list.insert_at_start(10)
linked_list.insert_at_start(13)
linked_list.insert_at_end(15)
linked_list.insert_at_end(27)
print(linked_list.head.data == 13)
print(linked_list.size_of() == 4)
linked_list.remove(100)
print(linked_list.size_of() == 4)
linked_list.remove(10)
print(linked_list.head.data == 13)
print(linked_list.size_of() == 3)
linked_list.remove(13)
print(linked_list.head.data == 15)
print(linked_list.size_of() == 2)
linked_list.remove(27)
print(linked_list.head.data == 15)
print(linked_list.size_of() == 1)
linked_list.remove(15)
print(linked_list.head is None)
print(linked_list.size_of() == 0)
| 1,266 | -14 | 232 |
b37d8685d1dc9ffb644ec26eec8530c5ed0f4b5f | 5,435 | py | Python | dfa/dfa.py | ameesh-shah/dfa | 00043c2c4e2762ef55f5c484e08eca7925cf0f65 | [
"MIT"
] | null | null | null | dfa/dfa.py | ameesh-shah/dfa | 00043c2c4e2762ef55f5c484e08eca7925cf0f65 | [
"MIT"
] | null | null | null | dfa/dfa.py | ameesh-shah/dfa | 00043c2c4e2762ef55f5c484e08eca7925cf0f65 | [
"MIT"
] | null | null | null | from __future__ import annotations
import operator
from functools import wraps
from typing import Hashable, FrozenSet, Callable, Optional, Sequence
import attr
import funcy as fn
State = Hashable
Letter = Hashable
Alphabet = FrozenSet[Letter]
@attr.frozen(auto_detect=True)
| 32.939394 | 79 | 0.598344 | from __future__ import annotations
import operator
from functools import wraps
from typing import Hashable, FrozenSet, Callable, Optional, Sequence
import attr
import funcy as fn
State = Hashable
Letter = Hashable
Alphabet = FrozenSet[Letter]
def boolean_only(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
if self.outputs != {True, False}:
raise ValueError(f'{method} only defined for Boolean output DFAs.')
return method(self, *args, **kwargs)
return wrapped
@attr.frozen(auto_detect=True)
class DFA:
start: State
_label: Callable[[State], Letter] = attr.ib(
converter=fn.memoize
)
_transition: Callable[[State, Letter], State] = attr.ib(
converter=fn.memoize
)
inputs: Optional[Alphabet] = attr.ib(
converter=lambda x: x if x is None else frozenset(x), default=None
)
outputs: Alphabet = attr.ib(converter=frozenset, default={True, False})
_states: Optional[Sequence[State]] = None
_hash: Optional[int] = None
def __repr__(self) -> int:
from dfa.utils import dfa2dict
import pprint
if self.inputs is not None:
return pprint.pformat(dfa2dict(self))
else:
start, inputs, outputs = self.start, self.inputs, self.outputs
return f'DFA({start=},{inputs=},{outputs=})'
def normalize(self) -> DFA:
"""Normalizes the state indexing and memoizes transitions/labels."""
from dfa.utils import dfa2dict
from dfa.utils import dict2dfa
return dict2dfa(*dfa2dict(self, reindex=True))
def __hash__(self) -> int:
if self._hash is None:
_hash = hash(repr(self.normalize()))
object.__setattr__(self, "_hash", _hash) # Cache hash.
return self._hash
def __eq__(self, other: DFA) -> bool:
from dfa.utils import find_equiv_counterexample as test_equiv
from dfa.utils import dfa2dict
if not isinstance(other, DFA):
return False
bool_ = {True, False}
if (self.outputs <= bool_) and (other.outputs <= bool_):
return test_equiv(self, other) is None
else:
return dfa2dict(self, reindex=True) \
== dfa2dict(other, reindex=True)
def run(self, *, start=None, label=False):
"""Co-routine interface for simulating runs of the automaton.
- Users can send system actions (elements of self.inputs).
- Co-routine yields the current state.
If label is True, then state labels are returned instead
of states.
"""
labeler = self.dfa._label if label else lambda x: x
state = self.start if start is None else start
while True:
letter = yield labeler(state)
state = self.transition((letter,), start=state)
def trace(self, word, *, start=None):
state = self.start if start is None else start
yield state
for char in word:
assert (self.inputs is None) or (char in self.inputs)
state = self._transition(state, char)
yield state
def transition(self, word, *, start=None):
return fn.last(self.trace(word, start=start))
def label(self, word, *, start=None):
output = self._label(self.transition(word, start=start))
assert (self.outputs is None) or (output in self.outputs)
return output
def transduce(self, word, *, start=None):
return tuple(map(self._label, self.trace(word, start=start)))[:-1]
def states(self):
if self._states is None:
assert self.inputs is not None, "Need to specify inputs field!"
# Make search deterministic.
try:
inputs = sorted(self.inputs) # Try to respect inherent order.
except TypeError:
inputs = sorted(self.inputs, key=id) # Fall by to object ids.
visited, order = set(), []
stack = [self.start]
while stack:
curr = stack.pop()
if curr in visited:
continue
else:
order.append(curr)
visited.add(curr)
successors = [self._transition(curr, a) for a in inputs]
stack.extend(successors)
object.__setattr__(self, "_states", tuple(order)) # Cache states.
return frozenset(self._states)
@boolean_only
def __invert__(self):
return attr.evolve(self, label=lambda s: not self._label(s))
def _bin_op(self, other, op):
if (self.outputs != other.outputs) or (self.inputs != other.inputs):
raise ValueError(f"{op} requires common i/o interface.")
return DFA(
start=(self.start, other.start),
inputs=self.inputs, # Assumed shared alphabet
transition=lambda s, c: (
self._transition(s[0], c),
other._transition(s[1], c)
),
label=lambda s: op(self._label(s[0]), other._label(s[1])))
@boolean_only
def __xor__(self, other: DFA) -> DFA:
return self._bin_op(other, operator.xor)
@boolean_only
def __or__(self, other: DFA) -> DFA:
return self._bin_op(other, operator.or_)
@boolean_only
def __and__(self, other: DFA) -> DFA:
return self._bin_op(other, operator.and_)
| 3,412 | 1,697 | 45 |
30c2f87795348253a7bc93585e22ecd969330ca7 | 1,775 | py | Python | bot/exts/fun/ciphers.py | gurkult/gurkbot | fe42628eb01ce42539ad3a6781eb8810950b1402 | [
"MIT"
] | 24 | 2020-12-18T07:26:14.000Z | 2022-03-30T22:56:49.000Z | bot/exts/fun/ciphers.py | gurkult/gurkbot | fe42628eb01ce42539ad3a6781eb8810950b1402 | [
"MIT"
] | 143 | 2020-12-18T09:13:51.000Z | 2022-03-02T19:27:44.000Z | bot/exts/fun/ciphers.py | gurkult/gurkbot | fe42628eb01ce42539ad3a6781eb8810950b1402 | [
"MIT"
] | 44 | 2020-12-18T09:05:29.000Z | 2022-03-02T20:06:23.000Z | import hashlib
import logging
from bot.bot import Bot
from bot.constants import Colours
from discord import Embed
from discord.ext.commands import BadArgument, Cog, Context, group
logger = logging.getLogger(__name__)
class Ciphers(Cog):
"""Commands for working with ciphers, hashes and encryptions."""
@group(name="hash", invoke_without_command=True)
async def hash(
self,
ctx: Context,
algorithm: str,
*,
original: str,
) -> None:
"""Hashes the passed string and returns the result."""
if algorithm not in hashlib.algorithms_guaranteed:
raise BadArgument(
f"The algorithm `{algorithm}` is not supported. \
Run `{ctx.prefix}hash algorithms` for a list of supported algorithms."
)
func = getattr(hashlib, algorithm)
hashed = func(original.encode("utf-8")).hexdigest()
embed = Embed(
title=f"Hash ({algorithm})",
description=hashed,
colour=Colours.green,
)
await ctx.send(embed=embed)
@hash.command(
name="algorithms", aliases=("algorithm", "algos", "algo", "list", "l")
)
async def algorithms(self, ctx: Context) -> None:
"""Sends a list of all supported hashing algorithms."""
embed = Embed(
title="Supported algorithms",
description="\n".join(
f"• {algo}" for algo in hashlib.algorithms_guaranteed
), # Shouldn't need pagination
colour=Colours.green,
)
await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
"""Loading the Ciphers cog."""
bot.add_cog(Ciphers(bot))
| 29.098361 | 88 | 0.59493 | import hashlib
import logging
from bot.bot import Bot
from bot.constants import Colours
from discord import Embed
from discord.ext.commands import BadArgument, Cog, Context, group
logger = logging.getLogger(__name__)
class Ciphers(Cog):
"""Commands for working with ciphers, hashes and encryptions."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
@group(name="hash", invoke_without_command=True)
async def hash(
self,
ctx: Context,
algorithm: str,
*,
original: str,
) -> None:
"""Hashes the passed string and returns the result."""
if algorithm not in hashlib.algorithms_guaranteed:
raise BadArgument(
f"The algorithm `{algorithm}` is not supported. \
Run `{ctx.prefix}hash algorithms` for a list of supported algorithms."
)
func = getattr(hashlib, algorithm)
hashed = func(original.encode("utf-8")).hexdigest()
embed = Embed(
title=f"Hash ({algorithm})",
description=hashed,
colour=Colours.green,
)
await ctx.send(embed=embed)
@hash.command(
name="algorithms", aliases=("algorithm", "algos", "algo", "list", "l")
)
async def algorithms(self, ctx: Context) -> None:
"""Sends a list of all supported hashing algorithms."""
embed = Embed(
title="Supported algorithms",
description="\n".join(
f"• {algo}" for algo in hashlib.algorithms_guaranteed
), # Shouldn't need pagination
colour=Colours.green,
)
await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
"""Loading the Ciphers cog."""
bot.add_cog(Ciphers(bot))
| 39 | 0 | 27 |
2c8540dc90544d6a1d6cbfedfddc2ffd1d030ec1 | 234 | py | Python | exercise4-3.py | raygomez/python-exercise-4 | 5f4fdb23767f1cc04dc133497b866dfa9feeb7f9 | [
"MIT"
] | null | null | null | exercise4-3.py | raygomez/python-exercise-4 | 5f4fdb23767f1cc04dc133497b866dfa9feeb7f9 | [
"MIT"
] | null | null | null | exercise4-3.py | raygomez/python-exercise-4 | 5f4fdb23767f1cc04dc133497b866dfa9feeb7f9 | [
"MIT"
] | null | null | null | from __future__ import print_function
__author__ = 'ragomez'
number = int(raw_input('Enter a number:'))
for num in f(number):
print(num, end=',') | 18 | 42 | 0.645299 | from __future__ import print_function
__author__ = 'ragomez'
def f(data):
mylist = xrange(0, data,2)
for i in mylist:
yield i
number = int(raw_input('Enter a number:'))
for num in f(number):
print(num, end=',') | 59 | 0 | 23 |
5d81ca03df78fb0cbc1864bcfafddccac859040d | 8,661 | py | Python | python/Power Bi.nuixscript/site-packages/adal/wstrust_request.py | Nuix/Power-BI-Integration | 5204f51c497b4de4881a44ae9b1f421023d54842 | [
"Apache-2.0"
] | 1 | 2021-10-16T19:33:56.000Z | 2021-10-16T19:33:56.000Z | python/Power Bi.nuixscript/site-packages/adal/wstrust_request.py | Nuix/Power-BI-Integration | 5204f51c497b4de4881a44ae9b1f421023d54842 | [
"Apache-2.0"
] | 1 | 2021-04-30T20:41:19.000Z | 2021-04-30T20:41:19.000Z | python/Power Bi.nuixscript/site-packages/adal/wstrust_request.py | Nuix/Power-BI-Integration | 5204f51c497b4de4881a44ae9b1f421023d54842 | [
"Apache-2.0"
] | 1 | 2019-07-25T15:09:05.000Z | 2019-07-25T15:09:05.000Z | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import uuid
from datetime import datetime, timedelta
import requests
from . import log
from . import util
from . import wstrust_response
from .adal_error import AdalError
from .constants import WSTrustVersion
_USERNAME_PLACEHOLDER = '{UsernamePlaceHolder}'
_PASSWORD_PLACEHOLDER = '{PasswordPlaceHolder}'
| 50.947059 | 178 | 0.607436 | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import uuid
from datetime import datetime, timedelta
import requests
from . import log
from . import util
from . import wstrust_response
from .adal_error import AdalError
from .constants import WSTrustVersion
_USERNAME_PLACEHOLDER = '{UsernamePlaceHolder}'
_PASSWORD_PLACEHOLDER = '{PasswordPlaceHolder}'
class WSTrustRequest(object):
def __init__(self, call_context, watrust_endpoint_url, applies_to, wstrust_endpoint_version):
self._log = log.Logger('WSTrustRequest', call_context['log_context'])
self._call_context = call_context
self._wstrust_endpoint_url = watrust_endpoint_url
self._applies_to = applies_to
self._wstrust_endpoint_version = wstrust_endpoint_version
@staticmethod
def _build_security_header():
time_now = datetime.utcnow()
expire_time = time_now + timedelta(minutes=10)
time_now_str = time_now.isoformat()[:-3] + 'Z'
expire_time_str = expire_time.isoformat()[:-3] + 'Z'
security_header_xml = ("<wsse:Security s:mustUnderstand='1' xmlns:wsse='http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'>"
"<wsu:Timestamp wsu:Id=\'_0\'>"
"<wsu:Created>" + time_now_str + "</wsu:Created>"
"<wsu:Expires>" + expire_time_str + "</wsu:Expires>"
"</wsu:Timestamp>"
"<wsse:UsernameToken wsu:Id='ADALUsernameToken'>"
"<wsse:Username>" + _USERNAME_PLACEHOLDER + "</wsse:Username>"
"<wsse:Password>" + _PASSWORD_PLACEHOLDER + "</wsse:Password>"
"</wsse:UsernameToken>"
"</wsse:Security>")
return security_header_xml
@staticmethod
def _populate_rst_username_password(template, username, password):
password = WSTrustRequest._escape_password(password)
return template.replace(_USERNAME_PLACEHOLDER, username).replace(_PASSWORD_PLACEHOLDER, password)
@staticmethod
def _escape_password(password):
return password.replace('&', '&').replace('"', '"').replace("'", ''').replace('<', '<').replace('>', '>')
def _build_rst(self, username, password):
message_id = str(uuid.uuid4())
schema_location = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd'
soap_action = 'http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue'
rst_trust_namespace = 'http://docs.oasis-open.org/ws-sx/ws-trust/200512'
key_type = 'http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer'
request_type = 'http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue'
if self._wstrust_endpoint_version == WSTrustVersion.WSTRUST2005:
soap_action = 'http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue'
rst_trust_namespace = 'http://schemas.xmlsoap.org/ws/2005/02/trust'
key_type = 'http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey'
request_type = 'http://schemas.xmlsoap.org/ws/2005/02/trust/Issue'
rst_template = ("<s:Envelope xmlns:s='http://www.w3.org/2003/05/soap-envelope' xmlns:wsa='http://www.w3.org/2005/08/addressing' xmlns:wsu='{}'>".format(schema_location) +
"<s:Header>" +
"<wsa:Action s:mustUnderstand='1'>{}</wsa:Action>".format(soap_action) +
"<wsa:messageID>urn:uuid:{}</wsa:messageID>".format(message_id) +
"<wsa:ReplyTo>" +
"<wsa:Address>http://www.w3.org/2005/08/addressing/anonymous</wsa:Address>" +
"</wsa:ReplyTo>" +
"<wsa:To s:mustUnderstand='1'>{}</wsa:To>".format(self._wstrust_endpoint_url) +
WSTrustRequest._build_security_header() +
"</s:Header>" +
"<s:Body>" +
"<wst:RequestSecurityToken xmlns:wst='{}'>".format(rst_trust_namespace) +
"<wsp:AppliesTo xmlns:wsp='http://schemas.xmlsoap.org/ws/2004/09/policy'>" +
"<wsa:EndpointReference>" +
"<wsa:Address>{}</wsa:Address>".format(self._applies_to) +
"</wsa:EndpointReference>" +
"</wsp:AppliesTo>" +
"<wst:KeyType>{}</wst:KeyType>".format(key_type) +
"<wst:RequestType>{}</wst:RequestType>".format(request_type) +
"</wst:RequestSecurityToken>" +
"</s:Body>" +
"</s:Envelope>")
self._log.debug('Created RST: \n %(rst_template)s',
{"rst_template": rst_template})
return WSTrustRequest._populate_rst_username_password(rst_template, username, password)
def _handle_rstr(self, body):
wstrust_resp = wstrust_response.WSTrustResponse(self._call_context, body, self._wstrust_endpoint_version)
wstrust_resp.parse()
return wstrust_resp
def acquire_token(self, username, password):
if self._wstrust_endpoint_version == WSTrustVersion.UNDEFINED:
raise AdalError('Unsupported wstrust endpoint version. Current support version is wstrust2005 or wstrust13.')
rst = self._build_rst(username, password)
if self._wstrust_endpoint_version == WSTrustVersion.WSTRUST2005:
soap_action = 'http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue'
else:
soap_action = 'http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue'
headers = {'headers': {'Content-type':'application/soap+xml; charset=utf-8',
'SOAPAction': soap_action},
'body': rst}
options = util.create_request_options(self, headers)
self._log.debug("Sending RST to: %(wstrust_endpoint)s",
{"wstrust_endpoint": self._wstrust_endpoint_url})
operation = "WS-Trust RST"
resp = requests.post(self._wstrust_endpoint_url, headers=options['headers'], data=rst,
allow_redirects=True,
verify=self._call_context.get('verify_ssl', None),
proxies=self._call_context.get('proxies', None),
timeout=self._call_context.get('timeout', None))
util.log_return_correlation_id(self._log, operation, resp)
if resp.status_code == 429:
resp.raise_for_status() # Will raise requests.exceptions.HTTPError
if not util.is_http_success(resp.status_code):
return_error_string = u"{} request returned http error: {}".format(operation, resp.status_code)
error_response = ""
if resp.text:
return_error_string = u"{} and server response: {}".format(return_error_string, resp.text)
try:
error_response = resp.json()
except ValueError:
pass
raise AdalError(return_error_string, error_response)
else:
return self._handle_rstr(resp.text)
| 6,729 | 259 | 23 |
5dc06c7ecbbda4feb8e2fc0d03a5fa9a5fd01f7f | 845 | py | Python | telepythy/lib/__init__.py | dhagrow/telepythy | ed06510eb5ae8190387ab82e4acbfda63e2abf7f | [
"MIT"
] | 1 | 2022-01-07T16:06:31.000Z | 2022-01-07T16:06:31.000Z | telepythy/lib/__init__.py | dhagrow/telepythy | ed06510eb5ae8190387ab82e4acbfda63e2abf7f | [
"MIT"
] | 1 | 2021-08-23T21:08:14.000Z | 2021-08-23T21:08:14.000Z | telepythy/lib/__init__.py | dhagrow/telepythy | ed06510eb5ae8190387ab82e4acbfda63e2abf7f | [
"MIT"
] | null | null | null | from .service import Service
from . import utils
| 36.73913 | 61 | 0.75858 | from .service import Service
from . import utils
def serve(locs=None, address=None, embed_mode=True):
addr = utils.parse_address(address or utils.DEFAULT_ADDR)
Service(locs, embed_mode=embed_mode).serve(addr)
def connect(locs=None, address=None, embed_mode=True):
addr = utils.parse_address(address or utils.DEFAULT_ADDR)
Service(locs, embed_mode=embed_mode).connect(addr)
def serve_thread(locs=None, address=None, embed_mode=True):
addr = utils.parse_address(address or utils.DEFAULT_ADDR)
svc = Service(locs, embed_mode=embed_mode)
utils.start_thread(svc.serve, addr)
return svc
def connect_thread(locs=None, address=None, embed_mode=True):
addr = utils.parse_address(address or utils.DEFAULT_ADDR)
svc = Service(locs, embed_mode=embed_mode)
utils.start_thread(svc.connect, addr)
return svc
| 704 | 0 | 92 |
2179c15243606c1502b113f52f9ca3779a8786e1 | 2,079 | py | Python | lambda_functions/process/count_feature/utilities.py | pierrealixt/MapCampaigner | 7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad | [
"BSD-3-Clause"
] | null | null | null | lambda_functions/process/count_feature/utilities.py | pierrealixt/MapCampaigner | 7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad | [
"BSD-3-Clause"
] | 1 | 2018-07-24T13:57:03.000Z | 2018-07-24T13:57:03.000Z | lambda_functions/process/count_feature/utilities.py | pierrealixt/MapCampaigner | 7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad | [
"BSD-3-Clause"
] | null | null | null | import os
import json
import boto3
from aws import S3Data
| 24.174419 | 68 | 0.608947 | import os
import json
import boto3
from aws import S3Data
def download_overpass_file(uuid, type_id):
key = build_raw_data_overpass_path(
campaign_path=campaign_path(uuid),
type_id=type_id)
S3Data().download_file(
key=key,
type_id=type_id,
destination='/tmp')
def build_raw_data_overpass_path(campaign_path, type_id):
return '/'.join([
'{campaign_path}',
'overpass',
'{type_id}.xml']).format(
campaign_path=campaign_path,
type_id=type_id)
def to_piechart(data):
return {
'labels': list(data.keys()),
'datasets': [{
'data': list(data.values()),
'backgroundColor': ['#4286f4']
}]
}
def campaign_path(uuid):
return '/'.join([
'campaigns',
'{uuid}']).format(
uuid=uuid)
def fetch_campaign(campaign_path):
return S3Data().fetch('{campaign_path}/campaign.json'.format(
campaign_path=campaign_path))
def fetch_type(seeked_feature, functions):
return list(dict(filter(lambda function:
is_function_and_feature(
function_name=function[1]['function'],
feature=function[1]['feature'],
seeked_feature=seeked_feature),
functions.items())).values())[0]['type']
def is_function_and_feature(function_name, feature, seeked_feature):
return \
function_name == 'CountFeature' \
and \
feature == seeked_feature
def save_data(uuid, type_id, data):
with open('/tmp/data.json', 'w') as file:
json.dump(data, file)
data_path = build_render_data_path(
campaign_path=campaign_path(uuid),
type_id=type_id)
with open('/tmp/data.json', 'rb') as data:
S3Data().upload_file(
key=data_path,
body=data)
def build_render_data_path(campaign_path, type_id):
return '/'.join([
'{campaign_path}',
'render/{type_id}',
'count_feature.json']).format(
campaign_path=campaign_path,
type_id=type_id)
| 1,805 | 0 | 207 |
d5fe1334d39320c9da1b9c09dd4224c055897193 | 8,511 | py | Python | utils.py | zikang12138/Learning-Action-Completeness-from-Points | 0dcb2b70a218e975a96e269646912649d30759ba | [
"MIT"
] | 52 | 2021-08-06T07:48:52.000Z | 2022-03-30T01:53:08.000Z | utils.py | zikang12138/Learning-Action-Completeness-from-Points | 0dcb2b70a218e975a96e269646912649d30759ba | [
"MIT"
] | 7 | 2021-09-12T03:48:04.000Z | 2022-03-24T08:44:30.000Z | utils.py | zikang12138/Learning-Action-Completeness-from-Points | 0dcb2b70a218e975a96e269646912649d30759ba | [
"MIT"
] | 10 | 2021-08-14T06:29:16.000Z | 2022-03-06T14:58:56.000Z | import torch
import torch.nn as nn
import numpy as np
from scipy.interpolate import interp1d
import os
import sys
import random
import config
| 33.507874 | 146 | 0.561039 | import torch
import torch.nn as nn
import numpy as np
from scipy.interpolate import interp1d
import os
import sys
import random
import config
def upgrade_resolution(arr, scale):
x = np.arange(0, arr.shape[0])
f = interp1d(x, arr, kind='linear', axis=0, fill_value='extrapolate')
scale_x = np.arange(0, arr.shape[0], 1 / scale)
up_scale = f(scale_x)
return up_scale
def get_proposal_oic(tList, wtcam, final_score, c_pred, scale, v_len, sampling_frames, num_segments, _lambda=0.25, gamma=0.2):
t_factor = float(16 * v_len) / (scale * num_segments * sampling_frames)
temp = []
for i in range(len(tList)):
c_temp = []
temp_list = np.array(tList[i])[0]
if temp_list.any():
grouped_temp_list = grouping(temp_list)
for j in range(len(grouped_temp_list)):
inner_score = np.mean(wtcam[grouped_temp_list[j], i, 0])
len_proposal = len(grouped_temp_list[j])
outer_s = max(0, int(grouped_temp_list[j][0] - _lambda * len_proposal))
outer_e = min(int(wtcam.shape[0] - 1), int(grouped_temp_list[j][-1] + _lambda * len_proposal))
outer_temp_list = list(range(outer_s, int(grouped_temp_list[j][0]))) + list(range(int(grouped_temp_list[j][-1] + 1), outer_e + 1))
if len(outer_temp_list) == 0:
outer_score = 0
else:
outer_score = np.mean(wtcam[outer_temp_list, i, 0])
c_score = inner_score - outer_score + gamma * final_score[c_pred[i]]
t_start = grouped_temp_list[j][0] * t_factor
t_end = (grouped_temp_list[j][-1] + 1) * t_factor
c_temp.append([c_pred[i], c_score, t_start, t_end])
temp.append(c_temp)
return temp
def result2json(result):
result_file = []
for i in range(len(result)):
line = {'label': config.class_dict[result[i][0]], 'score': result[i][1],
'segment': [result[i][2], result[i][3]]}
result_file.append(line)
return result_file
def grouping(arr):
return np.split(arr, np.where(np.diff(arr) != 1)[0] + 1)
def save_best_record_thumos(test_info, file_path):
fo = open(file_path, "w")
fo.write("Step: {}\n".format(test_info["step"][-1]))
fo.write("Test_acc: {:.4f}\n".format(test_info["test_acc"][-1]))
fo.write("average_mAP[0.1:0.7]: {:.4f}\n".format(test_info["average_mAP[0.1:0.7]"][-1]))
fo.write("average_mAP[0.1:0.5]: {:.4f}\n".format(test_info["average_mAP[0.1:0.5]"][-1]))
fo.write("average_mAP[0.3:0.7]: {:.4f}\n".format(test_info["average_mAP[0.3:0.7]"][-1]))
tIoU_thresh = np.linspace(0.1, 0.7, 7)
for i in range(len(tIoU_thresh)):
fo.write("mAP@{:.1f}: {:.4f}\n".format(tIoU_thresh[i], test_info["mAP@{:.1f}".format(tIoU_thresh[i])][-1]))
fo.close()
def minmax_norm(act_map, min_val=None, max_val=None):
if min_val is None or max_val is None:
relu = nn.ReLU()
max_val = relu(torch.max(act_map, dim=1)[0])
min_val = relu(torch.min(act_map, dim=1)[0])
delta = max_val - min_val
delta[delta <= 0] = 1
ret = (act_map - min_val) / delta.detach()
ret[ret > 1] = 1
ret[ret < 0] = 0
return ret
def nms(proposals, thresh):
proposals = np.array(proposals)
x1 = proposals[:, 2]
x2 = proposals[:, 3]
scores = proposals[:, 1]
areas = x2 - x1 + 1
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(proposals[i].tolist())
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1 + 1)
iou = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(iou < thresh)[0]
order = order[inds + 1]
return keep
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmark=False
def save_config(config, file_path):
fo = open(file_path, "w")
fo.write("Configurtaions:\n")
fo.write(str(config))
fo.close()
def feature_sampling(features, start, end, num_divide):
step = (end - start) / num_divide
feature_lst = torch.zeros((num_divide, features.shape[1])).cuda()
for i in range(num_divide):
start_point = int(start + step * i)
end_point = int(start + step * (i+1))
if start_point >= end_point:
end_point += 1
sample_id = np.random.randint(start_point, end_point)
feature_lst[i] = features[sample_id]
return feature_lst.mean(dim=0)
def get_oic_score(cas_sigmoid_fuse, start, end, delta=0.25):
length = end - start + 1
inner_score = torch.mean(cas_sigmoid_fuse[start:end+1])
outer_s = max(0, int(start - delta * length))
outer_e = min(int(cas_sigmoid_fuse.shape[0] - 1), int(end + delta * length))
outer_seg = list(range(outer_s, start)) + list(range(end + 1, outer_e + 1))
if len(outer_seg) == 0:
outer_score = 0
else:
outer_score = torch.mean(cas_sigmoid_fuse[outer_seg])
return inner_score - outer_score
def select_seed(cas_sigmoid_fuse, point_anno):
point_anno_agnostic = point_anno.max(dim=2)[0]
bkg_seed = torch.zeros_like(point_anno_agnostic)
act_seed = point_anno.clone().detach()
act_thresh = 0.1
bkg_thresh = 0.95
bkg_score = cas_sigmoid_fuse[:,:,-1]
for b in range(point_anno.shape[0]):
act_idx = torch.nonzero(point_anno_agnostic[b]).squeeze(1)
""" most left """
if act_idx[0] > 0:
bkg_score_tmp = bkg_score[b,:act_idx[0]]
idx_tmp = bkg_seed[b,:act_idx[0]]
idx_tmp[bkg_score_tmp >= bkg_thresh] = 1
if idx_tmp.sum() >= 1:
start_index = idx_tmp.nonzero().squeeze(1)[-1]
idx_tmp[:start_index] = 1
else:
max_index = bkg_score_tmp.argmax(dim=0)
idx_tmp[:max_index+1] = 1
""" pseudo action point selection """
for j in range(act_idx[0] - 1, -1, -1):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[0]]
else:
break
""" most right """
if act_idx[-1] < (point_anno.shape[1] - 1):
bkg_score_tmp = bkg_score[b,act_idx[-1]+1:]
idx_tmp = bkg_seed[b,act_idx[-1]+1:]
idx_tmp[bkg_score_tmp >= bkg_thresh] = 1
if idx_tmp.sum() >= 1:
start_index = idx_tmp.nonzero().squeeze(1)[0]
idx_tmp[start_index:] = 1
else:
max_index = bkg_score_tmp.argmax(dim=0)
idx_tmp[max_index:] = 1
""" pseudo action point selection """
for j in range(act_idx[-1] + 1, point_anno.shape[1]):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[-1]]
else:
break
""" between two instances """
for i in range(len(act_idx) - 1):
if act_idx[i+1] - act_idx[i] <= 1:
continue
bkg_score_tmp = bkg_score[b,act_idx[i]+1:act_idx[i+1]]
idx_tmp = bkg_seed[b,act_idx[i]+1:act_idx[i+1]]
idx_tmp[bkg_score_tmp >= bkg_thresh] = 1
if idx_tmp.sum() >= 2:
start_index = idx_tmp.nonzero().squeeze(1)[0]
end_index = idx_tmp.nonzero().squeeze(1)[-1]
idx_tmp[start_index+1:end_index] = 1
else:
max_index = bkg_score_tmp.argmax(dim=0)
idx_tmp[max_index] = 1
""" pseudo action point selection """
for j in range(act_idx[i] + 1, act_idx[i+1]):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[i]]
else:
break
for j in range(act_idx[i+1] - 1, act_idx[i], -1):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[i+1]]
else:
break
return act_seed, bkg_seed | 8,082 | 0 | 276 |
2e8c1b0d110b26e611f1e48f251a997f89d7502b | 11,020 | py | Python | PYTHON/SelectSlice.py | Kate-Willett/Climate_Explorer | d49e65a6caa9beb1a859e16d3827022442bad324 | [
"CC0-1.0"
] | null | null | null | PYTHON/SelectSlice.py | Kate-Willett/Climate_Explorer | d49e65a6caa9beb1a859e16d3827022442bad324 | [
"CC0-1.0"
] | null | null | null | PYTHON/SelectSlice.py | Kate-Willett/Climate_Explorer | d49e65a6caa9beb1a859e16d3827022442bad324 | [
"CC0-1.0"
] | 1 | 2021-06-29T12:05:46.000Z | 2021-06-29T12:05:46.000Z | #!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 16 October 2015
# Last update: 20 July 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code can make a lat, lon gridded field of:
# a single month,
# an average of months within a year (or adjacent for DJF) up to annual - set minimum data presence
# an average of single months across a period of years (climatology) - set minimum data presence
# an average of several months across a period of years (climatology) up to annual - set minimum data presence
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt: (may not all be required actually)
# import numpy as np
# import scipy.stats
# import pdb # pdb.set_trace() or c
#
# Kate's:
#
# -----------------------
# DATA
# -----------------------
# The code requires a 3D monthly resolution gridded dataset as time, lat, lon (anomalies or monthly means)
# It also needs to know about the years/months contained
# It assumes data from January to December for each year
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# INPUTS:
# TheData: A 3D numpy array (months, latitude, longitude) of monthly means or anomalies
# TheStYr = 1973 The start year of the provided data
# TheEdYr = 2014 The end year of the provided data
# TheChosenMonth = 1981 The start year of the new climatology period
# TheChosenYear = 2010 The end year of the new climatology period
# TheTheMDI = -1e30 The missing data indicator
# TheTheMDI=-1e30 # DEFAULT
# TheTheMDITol = 0.6 The proportion of data required for a gridbox climatology to be calculated from 0 to 1
# TheTheMDITol=0.6 # DEFAULT
#
# python3
# from SelectSlice import SelectSlice
# TmpData = SelectSlice(TheData,TheStYr,TheEdYr,TheChosenMonth,TheChosenYear,TheTheMDI,TheTheMDITol)
#
# -----------------------
# OUTPUT
# -----------------------
# OUTPUTS:
# TmpData: a 3D array identical in lat, long shape to TheData for the output, utilises missing data indicator
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (20th July 2020)
# ---------
#
# Enhancements
# Now python 3 was 2.7
#
# Changes
#
# Bug fixes
#
# Version 1 (16th October 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Functions:
#########################################################################
# SelectSlice
def SelectSlice(TheData,
TheStYr,
TheEdYr,
TheChosenMonth,
TheChosenYear,
TheMDI=-1e30,
TheMDITol=0.6):
''' This code takes in a 3D gridded field of monthly mean/anomalies (time,lat,lon)
and anomalises/renormalises (climate anomalies) to a new climatology period
which is supplied along side start and end years of the data.
It assumes all data go from Jan to Dec
INPUTS:
TheData: A 3D numpy array (months, latitude, longitude) of monthly means or anomalies
TheTheStYr: The start year of the provided data
TheTheEdYr: The end year of the provided data
TheChosenMonth: a LIST of month or months to select or average over
Select your month of choice, or a range for an average
0...11 represent Jan..Dec, [2,4] for Mar-Apr-May average, [0,11] for annual average, [11,1] for Dec-Jan-Feb average
For month ranges that span 11 to 0, December will be taken from the first year of ChooseYr - will NOT work for last year!
TheChosenMonth = [11] # [11,1]
TheChosenYear: a LIST of year or years to select or average over
Select your year of choice, or a range for an average
1973...2014 for individual years, [1973,1982] for decadal average etc
TheChosenYear = [2014] # [1981, 2010]
TheTheMDI: The missing data indicator
TheTheMDI=-1e30 # DEFAULT
TheTheMDITol: The proportion of data required for a gridbox climatology to be calculated from 0 to 1
TheTheMDITol=0.6 # DEFAULT
OUTPUTS:
TheField: a 3D array identical in shape to TheData for the output, utilises missing data indicator '''
# Set up python imports
import numpy as np
import scipy.stats
import pdb # pdb.set_trace() or c
# Make empty array for the derived field filled with missing data
TheField = np.reshape(np.repeat(TheMDI,len(TheData[0,:,0])*len(TheData[0,0,:])),(len(TheData[0,:,0]),len(TheData[0,0,:])))
# Set up time pointers
NYrs = (TheEdYr - TheStYr) + 1
if (len(TheChosenMonth) > 1) | (len(TheChosenYear) > 1): # Only need to point to relevant years, this works if BOTH are > 1
StTimePointer = (TheChosenYear[0]-TheStYr)
if (len(TheChosenYear) > 1):
EdTimePointer = (TheChosenYear[1]-TheStYr)
else:
EdTimePointer = StTimePointer
else:
StTimePointer = (TheChosenYear[0]-TheStYr)*12+TheChosenMonth[0]
EdTimePointer = StTimePointer
# Extract chosen month/year average
# Easy case of single month from single year
if (len(TheChosenMonth) == 1) & (len(TheChosenYear) == 1):
print("One Month One Year")
TheField = TheData[StTimePointer,:,:]
# Easy-ish case of single month from multiple years - requires ?% presence for each gridbox
elif (len(TheChosenMonth) == 1) & (len(TheChosenYear) > 1):
print("One Month X Year")
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12))) # fills columns first
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
# Slightly harder: multiple months from a single year (unless crossing DEC-JAN)
elif (len(TheChosenMonth) > 1) & (len(TheChosenYear) == 1):
print("X Month One Year")
if (TheChosenMonth[1] > TheChosenMonth[0]): # simple run of months within a year
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer,TheChosenMonth[0]:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
else: # more complex as need to pull out from two years
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer,TheChosenMonth[0]:12]) # need +1 for ranges
subsubarr = np.append(subsubarr,subarr[StTimePointer+1,0:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
# Hardest: multiple months and multiple years
else: # now we're dealing with seasonal/annual average climatology
print("X Month X Year")
if (TheChosenMonth[1] > TheChosenMonth[0]): # simple run of months and run of years
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
else: # more complex as need to pull out month runs across years
if (EdTimePointer < TheEdYr): # then we can go to the next year to get the extra months
ExtraPointer=EdTimePointer+1
else:
ExtraPointer=EdTimePointer
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]:12,]) # need +1 for ranges
subsubarr = np.append(subsubarr,subarr[StTimePointer+1:ExtraPointer,0:TheChosenMonth[1]+1])
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
return TheField # SelectSlice
##########################################################################
## TESTING CODE ##########################################################
##########################################################################
## Check if SelectSlice works
## create a data array with an identical field for each month within year but increments annually
#TmpCandFields = np.reshape(np.array(np.repeat(range(NYrs),12*3*7),dtype=float),(NMons,3,7))
#
## Check the selection output works on actual values - all should be ltt,lnn arrays of identical numbers
# SelectSlice(TmpCandFields,1973,2014,[6],[1980],-1e30,0.6)
## One month, one year: tested for June, 1980 = 7
## This works!
## One month, multiple years: tested October, 2000-2010 = mean of 27:37 = 32
## This works!
## Multiple months, one year: tested MAM, 1991 = mean of [18,18,18] = 18, tested DJF, 1992 = mean of [19,20,20] = 19.66666.
## This works for both!
## Multiple months, multiple years: tested SON, 1973-1982 = mean of 0:9,0:9,0:9 = 4.5, tested JAN-DEC, 1981-2010 mean of 8:37, 30 times = 22.5
## This works for both!
##########################################################################
#
## GetAnomalies works!
##########################################################################
##################################################################################################
| 47.705628 | 142 | 0.591379 | #!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 16 October 2015
# Last update: 20 July 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code can make a lat, lon gridded field of:
# a single month,
# an average of months within a year (or adjacent for DJF) up to annual - set minimum data presence
# an average of single months across a period of years (climatology) - set minimum data presence
# an average of several months across a period of years (climatology) up to annual - set minimum data presence
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt: (may not all be required actually)
# import numpy as np
# import scipy.stats
# import pdb # pdb.set_trace() or c
#
# Kate's:
#
# -----------------------
# DATA
# -----------------------
# The code requires a 3D monthly resolution gridded dataset as time, lat, lon (anomalies or monthly means)
# It also needs to know about the years/months contained
# It assumes data from January to December for each year
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# INPUTS:
# TheData: A 3D numpy array (months, latitude, longitude) of monthly means or anomalies
# TheStYr = 1973 The start year of the provided data
# TheEdYr = 2014 The end year of the provided data
# TheChosenMonth = 1981 The start year of the new climatology period
# TheChosenYear = 2010 The end year of the new climatology period
# TheTheMDI = -1e30 The missing data indicator
# TheTheMDI=-1e30 # DEFAULT
# TheTheMDITol = 0.6 The proportion of data required for a gridbox climatology to be calculated from 0 to 1
# TheTheMDITol=0.6 # DEFAULT
#
# python3
# from SelectSlice import SelectSlice
# TmpData = SelectSlice(TheData,TheStYr,TheEdYr,TheChosenMonth,TheChosenYear,TheTheMDI,TheTheMDITol)
#
# -----------------------
# OUTPUT
# -----------------------
# OUTPUTS:
# TmpData: a 3D array identical in lat, long shape to TheData for the output, utilises missing data indicator
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (20th July 2020)
# ---------
#
# Enhancements
# Now python 3 was 2.7
#
# Changes
#
# Bug fixes
#
# Version 1 (16th October 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Functions:
#########################################################################
# SelectSlice
def SelectSlice(TheData,
TheStYr,
TheEdYr,
TheChosenMonth,
TheChosenYear,
TheMDI=-1e30,
TheMDITol=0.6):
''' This code takes in a 3D gridded field of monthly mean/anomalies (time,lat,lon)
and anomalises/renormalises (climate anomalies) to a new climatology period
which is supplied along side start and end years of the data.
It assumes all data go from Jan to Dec
INPUTS:
TheData: A 3D numpy array (months, latitude, longitude) of monthly means or anomalies
TheTheStYr: The start year of the provided data
TheTheEdYr: The end year of the provided data
TheChosenMonth: a LIST of month or months to select or average over
Select your month of choice, or a range for an average
0...11 represent Jan..Dec, [2,4] for Mar-Apr-May average, [0,11] for annual average, [11,1] for Dec-Jan-Feb average
For month ranges that span 11 to 0, December will be taken from the first year of ChooseYr - will NOT work for last year!
TheChosenMonth = [11] # [11,1]
TheChosenYear: a LIST of year or years to select or average over
Select your year of choice, or a range for an average
1973...2014 for individual years, [1973,1982] for decadal average etc
TheChosenYear = [2014] # [1981, 2010]
TheTheMDI: The missing data indicator
TheTheMDI=-1e30 # DEFAULT
TheTheMDITol: The proportion of data required for a gridbox climatology to be calculated from 0 to 1
TheTheMDITol=0.6 # DEFAULT
OUTPUTS:
TheField: a 3D array identical in shape to TheData for the output, utilises missing data indicator '''
# Set up python imports
import numpy as np
import scipy.stats
import pdb # pdb.set_trace() or c
# Make empty array for the derived field filled with missing data
TheField = np.reshape(np.repeat(TheMDI,len(TheData[0,:,0])*len(TheData[0,0,:])),(len(TheData[0,:,0]),len(TheData[0,0,:])))
# Set up time pointers
NYrs = (TheEdYr - TheStYr) + 1
if (len(TheChosenMonth) > 1) | (len(TheChosenYear) > 1): # Only need to point to relevant years, this works if BOTH are > 1
StTimePointer = (TheChosenYear[0]-TheStYr)
if (len(TheChosenYear) > 1):
EdTimePointer = (TheChosenYear[1]-TheStYr)
else:
EdTimePointer = StTimePointer
else:
StTimePointer = (TheChosenYear[0]-TheStYr)*12+TheChosenMonth[0]
EdTimePointer = StTimePointer
# Extract chosen month/year average
# Easy case of single month from single year
if (len(TheChosenMonth) == 1) & (len(TheChosenYear) == 1):
print("One Month One Year")
TheField = TheData[StTimePointer,:,:]
# Easy-ish case of single month from multiple years - requires ?% presence for each gridbox
elif (len(TheChosenMonth) == 1) & (len(TheChosenYear) > 1):
print("One Month X Year")
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12))) # fills columns first
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
# Slightly harder: multiple months from a single year (unless crossing DEC-JAN)
elif (len(TheChosenMonth) > 1) & (len(TheChosenYear) == 1):
print("X Month One Year")
if (TheChosenMonth[1] > TheChosenMonth[0]): # simple run of months within a year
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer,TheChosenMonth[0]:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
else: # more complex as need to pull out from two years
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer,TheChosenMonth[0]:12]) # need +1 for ranges
subsubarr = np.append(subsubarr,subarr[StTimePointer+1,0:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
# Hardest: multiple months and multiple years
else: # now we're dealing with seasonal/annual average climatology
print("X Month X Year")
if (TheChosenMonth[1] > TheChosenMonth[0]): # simple run of months and run of years
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
else: # more complex as need to pull out month runs across years
if (EdTimePointer < TheEdYr): # then we can go to the next year to get the extra months
ExtraPointer=EdTimePointer+1
else:
ExtraPointer=EdTimePointer
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]:12,]) # need +1 for ranges
subsubarr = np.append(subsubarr,subarr[StTimePointer+1:ExtraPointer,0:TheChosenMonth[1]+1])
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
return TheField # SelectSlice
##########################################################################
## TESTING CODE ##########################################################
##########################################################################
## Check if SelectSlice works
## create a data array with an identical field for each month within year but increments annually
#TmpCandFields = np.reshape(np.array(np.repeat(range(NYrs),12*3*7),dtype=float),(NMons,3,7))
#
## Check the selection output works on actual values - all should be ltt,lnn arrays of identical numbers
# SelectSlice(TmpCandFields,1973,2014,[6],[1980],-1e30,0.6)
## One month, one year: tested for June, 1980 = 7
## This works!
## One month, multiple years: tested October, 2000-2010 = mean of 27:37 = 32
## This works!
## Multiple months, one year: tested MAM, 1991 = mean of [18,18,18] = 18, tested DJF, 1992 = mean of [19,20,20] = 19.66666.
## This works for both!
## Multiple months, multiple years: tested SON, 1973-1982 = mean of 0:9,0:9,0:9 = 4.5, tested JAN-DEC, 1981-2010 mean of 8:37, 30 times = 22.5
## This works for both!
##########################################################################
#
## GetAnomalies works!
##########################################################################
##################################################################################################
| 0 | 0 | 0 |
8bed170dd471e305451ac6d6a926ef6b53e8a597 | 517 | py | Python | src/ocr-test-plate.py | ferauche/snct2021 | 510502ae61f76183f532f189332c8a7889048988 | [
"MIT"
] | null | null | null | src/ocr-test-plate.py | ferauche/snct2021 | 510502ae61f76183f532f189332c8a7889048988 | [
"MIT"
] | null | null | null | src/ocr-test-plate.py | ferauche/snct2021 | 510502ae61f76183f532f189332c8a7889048988 | [
"MIT"
] | null | null | null | import cv2
import pytesseract
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import re
camera = PiCamera()
rawCapture = PiRGBArray(camera)
time.sleep(1.2)
camera.capture(rawCapture, format="bgr")
image = cv2.cvtColor(rawCapture.array, cv2.COLOR_BGR2GRAY)
print("Lendo...")
opt = "-c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
opt += " --oem 3 --psm 6"
text = pytesseract.image_to_string(image, config = opt)
print(text)
print(re.sub(r'[^a-zA-Z0-9]','',text))
| 22.478261 | 71 | 0.758221 | import cv2
import pytesseract
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import re
camera = PiCamera()
rawCapture = PiRGBArray(camera)
time.sleep(1.2)
camera.capture(rawCapture, format="bgr")
image = cv2.cvtColor(rawCapture.array, cv2.COLOR_BGR2GRAY)
print("Lendo...")
opt = "-c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
opt += " --oem 3 --psm 6"
text = pytesseract.image_to_string(image, config = opt)
print(text)
print(re.sub(r'[^a-zA-Z0-9]','',text))
| 0 | 0 | 0 |
5ffaa237fc75904bf7161655b597df86749a2756 | 1,070 | py | Python | convert_PALS_to_subjects.py | MadsJensen/RP_scripts | b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf | [
"BSD-3-Clause"
] | null | null | null | convert_PALS_to_subjects.py | MadsJensen/RP_scripts | b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf | [
"BSD-3-Clause"
] | null | null | null | convert_PALS_to_subjects.py | MadsJensen/RP_scripts | b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import subprocess
# path to submit_to_isis
cmd = "/usr/local/common/meeg-cfin/configurations/bin/submit_to_isis"
subjects = ["0008", "0009", "0010", "0011", "0012", "0013",
"0014", "0015", "0016", "0017", "0018", "0019", "0020",
"0021", "0022"]
for subject in subjects:
convert_cmd_lh = "mri_surf2surf --srcsubject fsaverage " + \
"--trgsubject %s --hemi lh " % subject + \
"--sval-annot $SUBJECTS_DIR/fsaverage/label/lh.PALS_B12_Brodmann.annot " + \
"--tval $SUBJECTS_DIR/%s/label/lh.PALS_B12_Brodmann.annot" % subject
convert_cmd_rh = "mri_surf2surf --srcsubject fsaverage " + \
"--trgsubject %s --hemi rh " % subject + \
"--sval-annot $SUBJECTS_DIR/fsaverage/label/rh.PALS_B12_Brodmann.annot " + \
"--tval $SUBJECTS_DIR/%s/label/rh.PALS_B12_Brodmann.annot" % subject
subprocess.call([cmd, "1", convert_cmd_lh])
subprocess.call([cmd, "1", convert_cmd_rh])
| 44.583333 | 97 | 0.6 | from __future__ import print_function
import subprocess
# path to submit_to_isis
cmd = "/usr/local/common/meeg-cfin/configurations/bin/submit_to_isis"
subjects = ["0008", "0009", "0010", "0011", "0012", "0013",
"0014", "0015", "0016", "0017", "0018", "0019", "0020",
"0021", "0022"]
for subject in subjects:
convert_cmd_lh = "mri_surf2surf --srcsubject fsaverage " + \
"--trgsubject %s --hemi lh " % subject + \
"--sval-annot $SUBJECTS_DIR/fsaverage/label/lh.PALS_B12_Brodmann.annot " + \
"--tval $SUBJECTS_DIR/%s/label/lh.PALS_B12_Brodmann.annot" % subject
convert_cmd_rh = "mri_surf2surf --srcsubject fsaverage " + \
"--trgsubject %s --hemi rh " % subject + \
"--sval-annot $SUBJECTS_DIR/fsaverage/label/rh.PALS_B12_Brodmann.annot " + \
"--tval $SUBJECTS_DIR/%s/label/rh.PALS_B12_Brodmann.annot" % subject
subprocess.call([cmd, "1", convert_cmd_lh])
subprocess.call([cmd, "1", convert_cmd_rh])
| 0 | 0 | 0 |
1d90aeb3f1fa4de97feff2b72f58ae3b8de84f44 | 1,255 | py | Python | Serving/LinearRegression_streaming.py | IntelligentSensor/PHMRepository | 8684c7851970293d607d18c580cec7edbf72ad17 | [
"MIT"
] | 6 | 2021-09-27T00:23:41.000Z | 2022-03-16T00:21:27.000Z | Serving/LinearRegression_streaming.py | intelligentph/PhRepository | 8684c7851970293d607d18c580cec7edbf72ad17 | [
"MIT"
] | 4 | 2020-08-21T03:56:16.000Z | 2022-02-10T02:17:08.000Z | Serving/LinearRegression_streaming.py | IntelligentSensor/Sensor-PHM | 8684c7851970293d607d18c580cec7edbf72ad17 | [
"MIT"
] | 5 | 2020-11-25T11:46:12.000Z | 2022-02-14T02:37:06.000Z | """
Streaming Linear Regression Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.regression import StreamingLinearRegressionWithSGD
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: streaming_linear_regression_example.py <trainingDir> <testDir>",
file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
ssc = StreamingContext(sc, 1)
trainingData = ssc.textFileStream(sys.argv[1]).map(parse).cache()
testData = ssc.textFileStream(sys.argv[2]).map(parse)
numFeatures = 3
model = StreamingLinearRegressionWithSGD()
model.setInitialWeights([0.0, 0.0, 0.0])
model.trainOn(trainingData)
print(model.predictOnValues(testData.map(lambda lp: (lp.label, lp.features))))
ssc.start()
ssc.awaitTermination()
# $example off$
| 30.609756 | 86 | 0.697211 | """
Streaming Linear Regression Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.regression import StreamingLinearRegressionWithSGD
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: streaming_linear_regression_example.py <trainingDir> <testDir>",
file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
ssc = StreamingContext(sc, 1)
def parse(lp):
label = float(lp[lp.find('(') + 1: lp.find(',')])
vec = Vectors.dense(lp[lp.find('[') + 1: lp.find(']')].split(','))
return LabeledPoint(label, vec)
trainingData = ssc.textFileStream(sys.argv[1]).map(parse).cache()
testData = ssc.textFileStream(sys.argv[2]).map(parse)
numFeatures = 3
model = StreamingLinearRegressionWithSGD()
model.setInitialWeights([0.0, 0.0, 0.0])
model.trainOn(trainingData)
print(model.predictOnValues(testData.map(lambda lp: (lp.label, lp.features))))
ssc.start()
ssc.awaitTermination()
# $example off$
| 166 | 0 | 27 |
f1035aa58b20da1ecffd7f32eaaa3447c0cff2c9 | 249 | py | Python | models/test.py | Mingzheng01/pointnet | 401692e08441ff459b63786b9c65c11f78ea599e | [
"MIT"
] | null | null | null | models/test.py | Mingzheng01/pointnet | 401692e08441ff459b63786b9c65c11f78ea599e | [
"MIT"
] | null | null | null | models/test.py | Mingzheng01/pointnet | 401692e08441ff459b63786b9c65c11f78ea599e | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
n = [5, 6]
print(id(n)) # 140312184155336
updateList(n)
print(n) # [5, 6, 10]
print(id(n)) # 140312184155336
| 22.636364 | 47 | 0.522088 | import tensorflow as tf
import numpy as np
def updateList(list1):
list1 += [10]
n = [5, 6]
print(id(n)) # 140312184155336
updateList(n)
print(n) # [5, 6, 10]
print(id(n)) # 140312184155336
| 19 | 0 | 23 |
a0bb841d8a48c14ffe59ef91f3c70edf038b0872 | 761 | py | Python | problems/0001_two_sum.py | SouravDutta91/LeetCode | 7756d320cc5477a23011cea03a350c022a699a2e | [
"MIT"
] | null | null | null | problems/0001_two_sum.py | SouravDutta91/LeetCode | 7756d320cc5477a23011cea03a350c022a699a2e | [
"MIT"
] | null | null | null | problems/0001_two_sum.py | SouravDutta91/LeetCode | 7756d320cc5477a23011cea03a350c022a699a2e | [
"MIT"
] | null | null | null | '''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
solution = Solution()
nums = [2, 7, 11, 15]
target = 9
print(solution.twoSum(nums, target)) | 27.178571 | 107 | 0.595269 | '''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
class Solution:
def twoSum(self, nums: 'List[int]', target: 'int') -> 'List[int]':
result = []
for i in range(len(nums) - 1):
other = target - nums[i]
if other in nums[i+1:]:
result.append(i)
result.append(nums[i+1:].index(other) + i + 1)
return result
solution = Solution()
nums = [2, 7, 11, 15]
target = 9
print(solution.twoSum(nums, target)) | 295 | -6 | 49 |
605dd4808c430ac6c7164fe621e70d43c923a1fb | 1,813 | py | Python | example-generator.py | sanus-solutions/druid-superset | 9a972802b74112b627e9b12f55fabbfdce5f4d44 | [
"Apache-2.0"
] | null | null | null | example-generator.py | sanus-solutions/druid-superset | 9a972802b74112b627e9b12f55fabbfdce5f4d44 | [
"Apache-2.0"
] | null | null | null | example-generator.py | sanus-solutions/druid-superset | 9a972802b74112b627e9b12f55fabbfdce5f4d44 | [
"Apache-2.0"
] | null | null | null | import random, requests
from datetime import *
from faker import Faker
from random import randrange, randint
units = [
"Neonatal intensive care", "Pediatric intensive care", "Coronary care and cardiothoracic",
"Surgical intensive care", "Medical intensive care", "Long term intensive care"
]
event_types = [
["entry", "clean", "not clean"],
["dispenser", "face", "no face"],
["alert"]
]
names = ['Steven Macdonald',
'Bonnie Petty',
'Allison Daniel',
'Jennifer Beck',
'Elizabeth Newman',
'Daniel Stevenson',
'Rachael White',
'Joshua Haney',
'Katherine Cline',
'Hector Knight',
'Amanda Green',
'Brandon Martinez',
'Allison Vance',
'Jacqueline Mercado',
'Rhonda White',
'Tricia Harrison',
'Mary Murphy',
'Deborah Humphrey',
'Rachel Bates DDS',
'Diane Arnold',
'Daniel Johnson',
'Wendy Smith',
'Emily Cohen',
'Megan Garcia',
'Katherine Long',
]
if __name__ == "__main__":
headers = {'Content-Type' : 'application/json'}
url = 'http://localhost:8200/v1/post/hospital'
for i in range(200):
payload = {
'time' : datetime.utcnow().isoformat(),
'unit': random.choice(units), 'type': random.choice(event_types)[0],
'staff_name': random.choice(names), 'response': None,
'nodeID': nodeID_generator()
}
print payload
result = requests.post(url, json=payload, headers=headers).json()
print result
| 25.535211 | 92 | 0.679537 | import random, requests
from datetime import *
from faker import Faker
from random import randrange, randint
units = [
"Neonatal intensive care", "Pediatric intensive care", "Coronary care and cardiothoracic",
"Surgical intensive care", "Medical intensive care", "Long term intensive care"
]
event_types = [
["entry", "clean", "not clean"],
["dispenser", "face", "no face"],
["alert"]
]
names = ['Steven Macdonald',
'Bonnie Petty',
'Allison Daniel',
'Jennifer Beck',
'Elizabeth Newman',
'Daniel Stevenson',
'Rachael White',
'Joshua Haney',
'Katherine Cline',
'Hector Knight',
'Amanda Green',
'Brandon Martinez',
'Allison Vance',
'Jacqueline Mercado',
'Rhonda White',
'Tricia Harrison',
'Mary Murphy',
'Deborah Humphrey',
'Rachel Bates DDS',
'Diane Arnold',
'Daniel Johnson',
'Wendy Smith',
'Emily Cohen',
'Megan Garcia',
'Katherine Long',
]
def nodeID_generator():
return randint(1,25)
def random_date():
# end = datetime.strptime('9/1/2018 1:30 PM', '%m/%d/%Y %I:%M %p')
# start = datetime.strptime('9/8/2009 4:50 AM', '%m/%d/%Y %I:%M %p')
start = datetime.utcnow()
#int_delta = (delta.days * 24 * 60 * 60 * 1000000) + delta.microseconds
int_delta = 60*60*1000000 + delta.microseconds
random_second = randrange(int_delta)
return (start + timedelta(microseconds=random_second)).isoformat()
if __name__ == "__main__":
headers = {'Content-Type' : 'application/json'}
url = 'http://localhost:8200/v1/post/hospital'
for i in range(200):
payload = {
'time' : datetime.utcnow().isoformat(),
'unit': random.choice(units), 'type': random.choice(event_types)[0],
'staff_name': random.choice(names), 'response': None,
'nodeID': nodeID_generator()
}
print payload
result = requests.post(url, json=payload, headers=headers).json()
print result
| 413 | 0 | 46 |
daa4d4d5d2ba2e43db27fe3cbc2fefa6e42806b3 | 6,690 | py | Python | DeepNetwork.py | bathonSpidey/DeepNetwork | 9913d102dbe617a79b0b9cf522086b7ff0cfd8b3 | [
"MIT"
] | null | null | null | DeepNetwork.py | bathonSpidey/DeepNetwork | 9913d102dbe617a79b0b9cf522086b7ff0cfd8b3 | [
"MIT"
] | null | null | null | DeepNetwork.py | bathonSpidey/DeepNetwork | 9913d102dbe617a79b0b9cf522086b7ff0cfd8b3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 23:01:33 2021
@author: batho
"""
import numpy as np
| 43.72549 | 107 | 0.631091 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 23:01:33 2021
@author: batho
"""
import numpy as np
class Network:
def dataReshape(self,dataset):
return dataset.reshape(dataset.shape[0],-1).T
def sigmoid(self,linearResult):
activatedResult= 1/(1+np.exp(-linearResult))
return activatedResult
def sigmoidDifferential(self,activatedGradient,linearResult ):
sigmoid= 1/(1+np.exp(-linearResult))
linearGradient=activatedGradient*sigmoid*(1-sigmoid)
return linearGradient
def relu(self, linearResult):
activatedResult = np.maximum(0,linearResult)
return activatedResult
def reluDifferential(self,activatedGradient, linearResult):
linearGradient=np.array(activatedGradient, copy=True)
linearGradient[linearResult <= 0] = 0
return linearGradient
def InitializeNetwork(self, layerDimensions):
parameters={}
if (len(layerDimensions)==0 or
0 in layerDimensions or type(layerDimensions) !=list):
raise Exception("Can't initialize network with zero parameters")
for layer in range(1,len(layerDimensions)):
parameters["Weights"+str(layer)]=np.random.randn(
layerDimensions[layer],layerDimensions[layer-1]) * 0.01
parameters["bias"+str(layer)]=np.zeros((layerDimensions[layer], 1))
return parameters
def GetLinearResult(self,previousActivation, weights, bias ):
linearResult=np.dot(weights,previousActivation)+bias
cache=(previousActivation, weights, bias)
return linearResult, cache
def ActivateLinearResult(self, previousActivation, weight, bias, activation):
linearResult, linearCache = self.GetLinearResult(previousActivation,
weight, bias)
if activation == "sigmoid":
activatedResult=self.sigmoid(linearResult)
elif activation == "relu":
activatedResult=self.relu(linearResult)
cache=(linearCache,linearResult)
return activatedResult,cache
def ForwardPropagate(self,trainData,parameters):
caches = []
activatedResult = trainData
totalLayers=len(parameters) // 2
function="relu"
for layer in range(1,totalLayers+1):
previousActivation=activatedResult
if layer==totalLayers:
function="sigmoid"
activatedResult,cache=self.ActivateLinearResult(previousActivation,
parameters['Weights'+str(layer)],
parameters['bias'+str(layer)],
function)
caches.append(cache)
lastLayerActivation=activatedResult
return lastLayerActivation,caches
def ComputeCost(self, finalActivation, targetSet):
totalEntries=targetSet.shape[1]
logProduct=np.dot(targetSet, np.log(finalActivation).T) + np.dot((1-targetSet),
np.log(1-finalActivation).T)
cost=-(1/totalEntries)*np.sum(logProduct)
cost = np.squeeze(cost)
return cost
def LinearBackward(self, linearGradient,linearCache):
previousActivation, weights, bias= linearCache
totalEntries=previousActivation.shape[1]
weightsGradient=(1/totalEntries)*np.dot(linearGradient,previousActivation.T)
biasGradient=(1/totalEntries)*np.sum(linearGradient,axis=1,keepdims=True)
previousActivationGradient=np.dot(weights.T,linearGradient)
return previousActivationGradient, weightsGradient, biasGradient
def LinearBackwardActivation(self,activatedGradient,cache,activation):
linearCache,linearResult=cache
if activation=="relu":
linearGradient=self.reluDifferential(activatedGradient,linearResult)
elif activation=="sigmoid":
linearGradient=self.sigmoidDifferential(activatedGradient,linearResult)
previousActivationGradient,weightsGradient,biasGradient=\
self.LinearBackward(linearGradient,linearCache)
return previousActivationGradient,weightsGradient,biasGradient
def BackPropagate(self, lastLayerActivation,targetSet,caches):
gradients = {}
totalLayers= len(caches)
totalSamples=lastLayerActivation.shape[1]
targetSet.reshape(lastLayerActivation.shape)
lastLayerActivatedGradient=- (1/totalSamples)*(np.divide(targetSet, lastLayerActivation) \
- np.divide(1 - targetSet, 1 - lastLayerActivation))
currentCache=caches[totalLayers-1]
gradients["ActivatedGradient"+str(totalLayers-1)], gradients["WeightsGradient" + str(totalLayers)],\
gradients["BiasGradient" + str(totalLayers)] = \
self.LinearBackwardActivation(lastLayerActivatedGradient, currentCache, "sigmoid")
for layer in reversed(range(totalLayers-1)):
currentCache=caches[layer]
previousActivatedGradient, weightsGradient,biasGradient= \
self.LinearBackwardActivation(gradients["ActivatedGradient"+str(layer+1)],
currentCache,"relu")
gradients["ActivatedGradient" + str(layer)] = previousActivatedGradient
gradients["WeightsGradient" + str(layer + 1)] = weightsGradient
gradients["BiasGradient" + str(layer + 1)] = biasGradient
return gradients
def UpdateWeights(self, parameters,gradients,learningRate=0.1):
totalLayer= len(parameters)//2
update=["Weights", "WeightsGradient","bias","BiasGradient"]
for layer in range(totalLayer):
parameters[update[0] + str(layer+1)] =- learningRate*gradients[update[1] + str(layer + 1)]
parameters[update[2] + str(layer+1)] = parameters[update[2] + str(layer+1)]-\
learningRate*gradients[update[3] + str(layer + 1)]
return parameters
def Predict(self,data, target, parameters):
totalSamples=data.shape[1]
predictions=np.zeros((1,totalSamples))
probabilities,caches=self.ForwardPropagate(data,parameters)
for i in range(0, probabilities.shape[1]):
if probabilities[0,i]>.5:
predictions[0,i]=1
else:
predictions[0,i]=0
print("Accuracy: " + str(np.sum((predictions == target)/totalSamples)))
return predictions
| 6,036 | -7 | 492 |
e27ecf3ebcb33b74a44fed1170118875536cf623 | 976 | py | Python | silex_client/utils/log.py | ArtFXDev/silex_client | 657d594dcfec79e7c8f4053df9d4a5dbc0c9ac50 | [
"MIT"
] | 10 | 2021-09-21T03:26:45.000Z | 2022-03-19T00:30:03.000Z | silex_client/utils/log.py | ArtFXDev/silex_dcc | 657d594dcfec79e7c8f4053df9d4a5dbc0c9ac50 | [
"MIT"
] | 66 | 2021-09-17T09:54:23.000Z | 2022-03-29T23:31:17.000Z | silex_client/utils/log.py | ArtFXDev/silex_dcc | 657d594dcfec79e7c8f4053df9d4a5dbc0c9ac50 | [
"MIT"
] | null | null | null | """
@author: michael.haussmann
retake by le TD gang
A simple logger shortcut / wrapper.
Uses
https://logzero.readthedocs.io/
"""
import logging
import os
import sys
import logzero
from logzero import logger
# Formatting of the output log to look like
__LOG_FORMAT__ = "[SILEX]\
[%(asctime)s] %(color)s%(levelname)-10s%(end_color)s|\
[%(module)s.%(funcName)s] %(color)s%(message)-50s%(end_color)s (%(lineno)d)"
handler = logging.StreamHandler(sys.stdout) # stream to stdout for pycharm
formatter = logzero.LogFormatter(fmt=__LOG_FORMAT__)
handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(handler)
env_log_level = os.getenv("SILEX_LOG_LEVEL", "DEBUG")
env_log_level = env_log_level.upper()
if env_log_level not in logging._nameToLevel:
env_log_level = "DEBUG"
logger.error("Invalid log level (%s): Setting DEBUG as value", env_log_level)
log_level = getattr(logging, env_log_level)
logger.setLevel(log_level) # set default level
| 25.684211 | 81 | 0.748975 | """
@author: michael.haussmann
retake by le TD gang
A simple logger shortcut / wrapper.
Uses
https://logzero.readthedocs.io/
"""
import logging
import os
import sys
import logzero
from logzero import logger
# Formatting of the output log to look like
__LOG_FORMAT__ = "[SILEX]\
[%(asctime)s] %(color)s%(levelname)-10s%(end_color)s|\
[%(module)s.%(funcName)s] %(color)s%(message)-50s%(end_color)s (%(lineno)d)"
handler = logging.StreamHandler(sys.stdout) # stream to stdout for pycharm
formatter = logzero.LogFormatter(fmt=__LOG_FORMAT__)
handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(handler)
env_log_level = os.getenv("SILEX_LOG_LEVEL", "DEBUG")
env_log_level = env_log_level.upper()
if env_log_level not in logging._nameToLevel:
env_log_level = "DEBUG"
logger.error("Invalid log level (%s): Setting DEBUG as value", env_log_level)
log_level = getattr(logging, env_log_level)
logger.setLevel(log_level) # set default level
| 0 | 0 | 0 |
403ccb3bb0baedaed9f44b1c1334c3e916bd1402 | 1,891 | py | Python | python/shopping/content/workflows.py | akgarchi/googleads-shopping-samples | 053bc5500405b751c671b169748b963fc142e8cc | [
"Apache-2.0"
] | 149 | 2015-01-11T12:23:41.000Z | 2022-03-28T03:42:20.000Z | python/shopping/content/workflows.py | akgarchi/googleads-shopping-samples | 053bc5500405b751c671b169748b963fc142e8cc | [
"Apache-2.0"
] | 28 | 2015-07-14T20:42:10.000Z | 2021-12-27T01:06:12.000Z | python/shopping/content/workflows.py | akgarchi/googleads-shopping-samples | 053bc5500405b751c671b169748b963fc142e8cc | [
"Apache-2.0"
] | 250 | 2015-01-20T14:30:17.000Z | 2022-03-28T06:26:51.000Z | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run all example workflows (except for the Orders service)."""
from __future__ import absolute_import
from __future__ import print_function
import sys
from shopping.content import accounts
from shopping.content import accountstatuses
from shopping.content import accounttax
from shopping.content import common
from shopping.content import datafeeds
from shopping.content import products
from shopping.content import productstatuses
from shopping.content import shippingsettings
if __name__ == '__main__':
main(sys.argv)
| 33.767857 | 74 | 0.662612 | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run all example workflows (except for the Orders service)."""
from __future__ import absolute_import
from __future__ import print_function
import sys
from shopping.content import accounts
from shopping.content import accountstatuses
from shopping.content import accounttax
from shopping.content import common
from shopping.content import datafeeds
from shopping.content import products
from shopping.content import productstatuses
from shopping.content import shippingsettings
def main(argv):
# Authenticate and construct service.
service, config, _ = common.init(argv, __doc__)
print('--------------------------------')
accounts.workflow(service, config)
print('--------------------------------')
accountstatuses.workflow(service, config)
print('--------------------------------')
accounttax.workflow(service, config)
print('--------------------------------')
datafeeds.workflow(service, config)
print('--------------------------------')
products.workflow(service, config)
print('--------------------------------')
productstatuses.workflow(service, config)
print('--------------------------------')
shippingsettings.workflow(service, config)
print('--------------------------------')
if __name__ == '__main__':
main(sys.argv)
| 721 | 0 | 23 |
81917554f945b6226b57948fc8368df61c83d0db | 7,065 | py | Python | cscs-checks/microbenchmarks/mpi/halo_exchange/halo_cell_exchange.py | jacwah/reframe | d650bbbb2f87c6ae5f354e50b50bcfd98fafe77b | [
"BSD-3-Clause"
] | null | null | null | cscs-checks/microbenchmarks/mpi/halo_exchange/halo_cell_exchange.py | jacwah/reframe | d650bbbb2f87c6ae5f354e50b50bcfd98fafe77b | [
"BSD-3-Clause"
] | 3 | 2022-03-11T09:51:33.000Z | 2022-03-31T08:20:19.000Z | cscs-checks/microbenchmarks/mpi/halo_exchange/halo_cell_exchange.py | jacwah/reframe | d650bbbb2f87c6ae5f354e50b50bcfd98fafe77b | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
| 46.788079 | 78 | 0.479264 | # Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class HaloCellExchangeTest(rfm.RegressionTest):
def __init__(self):
self.sourcepath = 'halo_cell_exchange.c'
self.build_system = 'SingleSource'
self.build_system.cflags = ['-O2']
self.valid_systems = ['daint:gpu', 'dom:gpu', 'daint:mc', 'dom:mc',
'arolla:cn', 'tsa:cn', 'eiger:mc', 'pilatus:mc']
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-pgi',
'PrgEnv-nvidia']
self.num_tasks = 6
self.num_tasks_per_node = 1
self.num_gpus_per_node = 0
self.executable_opts = ['input.txt']
self.sanity_patterns = sn.assert_eq(
sn.count(sn.findall(r'halo_cell_exchange', self.stdout)), 9)
self.perf_patterns = {
'time_2_10': sn.extractsingle(
r'halo_cell_exchange 6 2 1 1 10 10 10'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_2_10000': sn.extractsingle(
r'halo_cell_exchange 6 2 1 1 10000 10000 10000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_2_1000000': sn.extractsingle(
r'halo_cell_exchange 6 2 1 1 1000000 1000000 1000000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_4_10': sn.extractsingle(
r'halo_cell_exchange 6 2 2 1 10 10 10'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_4_10000': sn.extractsingle(
r'halo_cell_exchange 6 2 2 1 10000 10000 10000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_4_1000000': sn.extractsingle(
r'halo_cell_exchange 6 2 2 1 1000000 1000000 1000000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_6_10': sn.extractsingle(
r'halo_cell_exchange 6 3 2 1 10 10 10'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_6_10000': sn.extractsingle(
r'halo_cell_exchange 6 3 2 1 10000 10000 10000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_6_1000000': sn.extractsingle(
r'halo_cell_exchange 6 3 2 1 1000000 1000000 1000000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float)
}
self.reference = {
'dom:mc': {
'time_2_10': (3.925395e-06, None, 0.50, 's'),
'time_2_10000': (9.721279e-06, None, 0.50, 's'),
'time_2_1000000': (4.934530e-04, None, 0.50, 's'),
'time_4_10': (5.878997e-06, None, 0.50, 's'),
'time_4_10000': (1.495080e-05, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (5.428815e-06, None, 0.50, 's'),
'time_6_10000': (1.540580e-05, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'daint:mc': {
'time_2_10': (1.5e-05, None, 0.50, 's'),
'time_2_10000': (9.1e-05, None, 0.50, 's'),
'time_2_1000000': (7.9e-04, None, 0.50, 's'),
'time_4_10': (3e-05, None, 0.50, 's'),
'time_4_10000': (1.3e-04, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (3.5e-05, None, 0.50, 's'),
'time_6_10000': (1.2e-04, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'dom:gpu': {
'time_2_10': (3.925395e-06, None, 0.50, 's'),
'time_2_10000': (9.721279e-06, None, 0.50, 's'),
'time_2_1000000': (4.934530e-04, None, 0.50, 's'),
'time_4_10': (5.878997e-06, None, 0.50, 's'),
'time_4_10000': (1.495080e-05, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (5.428815e-06, None, 0.50, 's'),
'time_6_10000': (1.540580e-05, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'daint:gpu': {
'time_2_10': (1.5e-05, None, 0.50, 's'),
'time_2_10000': (9.1e-05, None, 0.50, 's'),
'time_2_1000000': (7.9e-04, None, 0.50, 's'),
'time_4_10': (3e-05, None, 0.50, 's'),
'time_4_10000': (1.3e-04, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (3.5e-05, None, 0.50, 's'),
'time_6_10000': (1.2e-04, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'eiger:mc': {
'time_2_10': (3.46e-06, None, 0.50, 's'),
'time_2_10000': (8.51e-06, None, 0.50, 's'),
'time_2_1000000': (2.07e-04, None, 0.50, 's'),
'time_4_10': (4.46e-06, None, 0.50, 's'),
'time_4_10000': (1.08e-05, None, 0.50, 's'),
'time_4_1000000': (3.55e-04, None, 0.50, 's'),
'time_6_10': (4.53e-06, None, 0.50, 's'),
'time_6_10000': (1.04e-05, None, 0.50, 's'),
'time_6_1000000': (3.55e-04, None, 0.50, 's')
},
'pilatus:mc': {
'time_2_10': (3.46e-06, None, 0.50, 's'),
'time_2_10000': (8.51e-06, None, 0.50, 's'),
'time_2_1000000': (2.07e-04, None, 0.50, 's'),
'time_4_10': (4.46e-06, None, 0.50, 's'),
'time_4_10000': (1.08e-05, None, 0.50, 's'),
'time_4_1000000': (3.55e-04, None, 0.50, 's'),
'time_6_10': (4.53e-06, None, 0.50, 's'),
'time_6_10000': (1.04e-05, None, 0.50, 's'),
'time_6_1000000': (3.55e-04, None, 0.50, 's')
},
}
self.maintainers = ['AJ']
self.strict_check = False
self.tags = {'benchmark'}
@run_before('compile')
def pgi_workaround(self):
if self.current_system.name in ['daint', 'dom']:
if self.current_environ.name == 'PrgEnv-pgi':
self.variables = {
'CUDA_HOME': '$CUDATOOLKIT_HOME',
}
if self.current_environ.name == 'PrgEnv-nvidia':
self.skip_if(self.current_system.name == 'eiger')
self.skip_if(self.current_system.name == 'pilatus')
| 6,666 | 106 | 22 |
9791879d1c1f35de52f9cb7e5249578c2580c89b | 5,398 | py | Python | ojm.py | evuez/ojm | 382452836779211b574e7958258ca17105d5654e | [
"MIT"
] | null | null | null | ojm.py | evuez/ojm | 382452836779211b574e7958258ca17105d5654e | [
"MIT"
] | null | null | null | ojm.py | evuez/ojm | 382452836779211b574e7958258ca17105d5654e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from time import strftime
from os import path, remove
from glob import glob
from uuid import uuid4
STORAGE = 'ojm.data'
_models = {}
def register(model):
"""
Register a class to allow object loading from JSON.
"""
_models[model.__name__] = model
def storable(obj):
"""
Remove fields that can't / shouldn't be stored.
"""
return {
a:get_data(a) for a in dir(obj)
if not a.startswith('_')
and not hasattr(
getattr(obj, a),
'__call__'
)
}
| 27.968912 | 77 | 0.475732 | # -*- coding: utf-8 -*-
import json
from time import strftime
from os import path, remove
from glob import glob
from uuid import uuid4
STORAGE = 'ojm.data'
class ModelNotFound(Exception):
pass
_models = {}
def register(model):
"""
Register a class to allow object loading from JSON.
"""
_models[model.__name__] = model
def storable(obj):
"""
Remove fields that can't / shouldn't be stored.
"""
def get_data(a):
if a.startswith('linked_'):
return getattr(getattr(obj, a), 'uuid', None)
else:
return getattr(obj, a)
return {
a:get_data(a) for a in dir(obj)
if not a.startswith('_')
and not hasattr(
getattr(obj, a),
'__call__'
)
}
class Model(object):
def __init__(self):
self.uuid = uuid4().hex
self.created = None
self.updated = None
def save(self):
"""
Save current instance to a JSON file.
"""
fname = path.join(
STORAGE,
'{0}_{1}'.format(
self.__class__.__name__.lower(),
self.uuid
)
)
if path.isfile(fname):
raise IOError("Cannot overwrite existing model!")
self.created = strftime('%Y-%m-%d %H:%M:%S')
data = storable(self)
with open(fname, 'w') as out:
json.dump(
obj=data,
fp=out,
separators=(',',':'),
default=storable
)
def update(self):
"""
Update JSON file with current instance.
"""
fname = path.join(
STORAGE,
'{0}_{1}'.format(
self.__class__.__name__.lower(),
self.uuid
)
)
if not path.isfile(fname):
raise IOError("Cannot update unsaved model!")
self.updated = strftime('%Y-%m-%d %H:%M:%S')
data = storable(self)
with open(fname, 'w') as out:
json.dump(
obj=data,
fp=out,
separators=(',',':'),
default=storable
)
def delete(self):
"""
Delete saved instance.
"""
fname = path.join(
STORAGE,
'{0}_{1}'.format(
self.__class__.__name__.lower(),
self.uuid
)
)
try:
remove(fname)
except OSError:
raise ModelNotFound
@classmethod
def load(cls, uuid=None, str_=None):
"""
Load an object.
If `uuid` is provided, will try to laod using this uuid.
`str_` must be a JSON string. If `uuid` isn't provided but `str_` is,
it will try to load the object using this JSON string.
"""
if not any((uuid, str_)):
return None
if uuid is None:
data = json.loads(str_)
else:
fname = path.join(
STORAGE,
'{0}_{1}'.format(
cls.__name__.lower(),
uuid
)
)
try:
with open(fname, 'r') as out:
data = json.load(out)
except IOError:
raise ModelNotFound("Model {0} not found".format(fname))
obj = cls()
for attr in storable(obj).keys():
element = None
if attr not in data or attr.startswith('_'):
continue
if attr.startswith('embedded_') and attr.endswith('s'):
element = []
for embedded in data[attr]:
element.append(_models[
attr[9:-1].title()
].loads(json.dumps(embedded)))
elif attr.startswith('embedded_'):
element = _models[
attr[9:].title()
].loads(json.dumps(data[attr]))
elif attr.startswith('linked_') and attr.endswith('s'):
element = []
for linked in data[attr]:
element.append(_models[attr[7:-1].title()].load(linked))
elif attr.startswith('linked_'):
element = _models[attr[7:].title()].load(data[attr])
if element is None:
element = data[attr]
if not attr.startswith('_'):
setattr(obj, attr, element)
return obj
@classmethod
def loads(cls, str_):
"""
Short for `Model.load(str_=json)`.
"""
return cls.load(str_=str_)
@classmethod
def load_all(cls, start=0, stop=None):
"""
Load every objects that are instances of the current class.
`start` and `stop` can be provided to limit output.
"""
return [
cls.load(path.basename(f).partition('_')[2])
for f in glob(path.join(
STORAGE,
'{0}_*'.format(cls.__name__.lower())
))[start:stop]
]
def duplicate(self):
"""
When loading an existing object, to create a copy of this object and
be able to save it, a new UUID must be generated.
This method simply generate a new UUID.
"""
self.uuid = uuid4().hex
return self
| 224 | 4,509 | 72 |
1bf08b75140963ba2e0adf3ecf31287e7cfd92ef | 5,308 | py | Python | indico/modules/events/timetable/controllers/display.py | bkmgit/indico | d77ee121e35880a416b9b05e6098ea912d870b5c | [
"MIT"
] | 1 | 2021-06-11T20:02:10.000Z | 2021-06-11T20:02:10.000Z | indico/modules/events/timetable/controllers/display.py | bkmgit/indico | d77ee121e35880a416b9b05e6098ea912d870b5c | [
"MIT"
] | null | null | null | indico/modules/events/timetable/controllers/display.py | bkmgit/indico | d77ee121e35880a416b9b05e6098ea912d870b5c | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from io import BytesIO
from flask import jsonify, request, session
from werkzeug.exceptions import Forbidden, NotFound
from indico.legacy.pdfinterface.conference import SimplifiedTimeTablePlain, TimetablePDFFormat, TimeTablePlain
from indico.modules.events.contributions import contribution_settings
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.layout import layout_settings
from indico.modules.events.timetable.forms import TimetablePDFExportForm
from indico.modules.events.timetable.legacy import TimetableSerializer
from indico.modules.events.timetable.util import (get_timetable_offline_pdf_generator, render_entry_info_balloon,
serialize_event_info)
from indico.modules.events.timetable.views import WPDisplayTimetable
from indico.modules.events.util import get_theme
from indico.modules.events.views import WPSimpleEventDisplay
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
from indico.web.util import jsonify_data, jsonify_template
class RHTimetableEntryInfo(RHTimetableProtectionBase):
"""Display timetable entry info balloon."""
| 49.607477 | 120 | 0.696873 | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from io import BytesIO
from flask import jsonify, request, session
from werkzeug.exceptions import Forbidden, NotFound
from indico.legacy.pdfinterface.conference import SimplifiedTimeTablePlain, TimetablePDFFormat, TimeTablePlain
from indico.modules.events.contributions import contribution_settings
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.layout import layout_settings
from indico.modules.events.timetable.forms import TimetablePDFExportForm
from indico.modules.events.timetable.legacy import TimetableSerializer
from indico.modules.events.timetable.util import (get_timetable_offline_pdf_generator, render_entry_info_balloon,
serialize_event_info)
from indico.modules.events.timetable.views import WPDisplayTimetable
from indico.modules.events.util import get_theme
from indico.modules.events.views import WPSimpleEventDisplay
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
from indico.web.util import jsonify_data, jsonify_template
class RHTimetableProtectionBase(RHDisplayEventBase):
def _check_access(self):
RHDisplayEventBase._check_access(self)
published = contribution_settings.get(self.event, 'published')
if not published and not self.event.can_manage(session.user):
raise NotFound(_('The contributions of this event have not been published yet'))
class RHTimetable(RHTimetableProtectionBase):
view_class = WPDisplayTimetable
view_class_simple = WPSimpleEventDisplay
def _process_args(self):
RHTimetableProtectionBase._process_args(self)
self.timetable_layout = request.args.get('layout') or request.args.get('ttLyt')
self.theme, self.theme_override = get_theme(self.event, request.args.get('view'))
def _process(self):
self.event.preload_all_acl_entries()
if self.theme is None:
event_info = serialize_event_info(self.event)
timetable_data = TimetableSerializer(self.event).serialize_timetable(strip_empty_days=True)
timetable_settings = layout_settings.get(self.event, 'timetable_theme_settings')
return self.view_class.render_template('display.html', self.event, event_info=event_info,
timetable_data=timetable_data, timetable_settings=timetable_settings,
timetable_layout=self.timetable_layout,
published=contribution_settings.get(self.event, 'published'))
else:
return self.view_class_simple(self, self.event, self.theme, self.theme_override).display()
class RHTimetableEntryInfo(RHTimetableProtectionBase):
"""Display timetable entry info balloon."""
def _process_args(self):
RHTimetableProtectionBase._process_args(self)
self.entry = self.event.timetable_entries.filter_by(id=request.view_args['entry_id']).first_or_404()
def _check_access(self):
if not self.entry.can_view(session.user):
raise Forbidden
def _process(self):
html = render_entry_info_balloon(self.entry)
return jsonify(html=html)
class RHTimetableExportPDF(RHTimetableProtectionBase):
def _process(self):
form = TimetablePDFExportForm(formdata=request.args, csrf_enabled=False)
if form.validate_on_submit():
form_data = form.data_for_format
pdf_format = TimetablePDFFormat(form_data)
if not form.advanced.data:
pdf_format.contribsAtConfLevel = True
pdf_format.breaksAtConfLevel = True
pdf_class = SimplifiedTimeTablePlain
additional_params = {}
else:
pdf_class = TimeTablePlain
additional_params = {'firstPageNumber': form.firstPageNumber.data,
'showSpeakerAffiliation': form_data['showSpeakerAffiliation'],
'showSessionDescription': form_data['showSessionDescription']}
if request.args.get('download') == '1':
pdf = pdf_class(self.event, session.user, sortingCrit=None, ttPDFFormat=pdf_format,
pagesize=form.pagesize.data, **additional_params)
return send_file('timetable.pdf', BytesIO(pdf.getPDFBin()), 'application/pdf')
else:
url = url_for(request.endpoint, **dict(request.view_args, download='1', **request.args.to_dict(False)))
return jsonify_data(flash=False, redirect=url, redirect_no_loading=True)
return jsonify_template('events/timetable/timetable_pdf_export.html', form=form,
back_url=url_for('.timetable', self.event))
class RHTimetableExportDefaultPDF(RHTimetableProtectionBase):
def _process(self):
pdf = get_timetable_offline_pdf_generator(self.event)
return send_file('timetable.pdf', BytesIO(pdf.getPDFBin()), 'application/pdf')
| 3,390 | 263 | 251 |
90a8ac88c134de85ef0fff569cb91c072a5406f2 | 9,413 | py | Python | sphinx_autodoc_typehints.py | bpeake-illuscio/sphinx-autodoc-typehints | cd822aacb96bb250e8d533cd7131e986f1938f08 | [
"MIT"
] | null | null | null | sphinx_autodoc_typehints.py | bpeake-illuscio/sphinx-autodoc-typehints | cd822aacb96bb250e8d533cd7131e986f1938f08 | [
"MIT"
] | null | null | null | sphinx_autodoc_typehints.py | bpeake-illuscio/sphinx-autodoc-typehints | cd822aacb96bb250e8d533cd7131e986f1938f08 | [
"MIT"
] | null | null | null | import inspect
import typing
from typing import get_type_hints, TypeVar, Any, AnyStr, Generic, Union
from sphinx.util import logging
from sphinx.util.inspect import Signature
try:
from inspect import unwrap
except ImportError:
def unwrap(func, *, stop=None):
"""This is the inspect.unwrap() method copied from Python 3.5's standard library."""
if stop is None:
else:
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
logger = logging.getLogger(__name__)
| 39.384937 | 97 | 0.576756 | import inspect
import typing
from typing import get_type_hints, TypeVar, Any, AnyStr, Generic, Union
from sphinx.util import logging
from sphinx.util.inspect import Signature
try:
from inspect import unwrap
except ImportError:
def unwrap(func, *, stop=None):
"""This is the inspect.unwrap() method copied from Python 3.5's standard library."""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
logger = logging.getLogger(__name__)
def format_annotation(annotation):
if inspect.isclass(annotation) and annotation.__module__ == 'builtins':
if annotation.__qualname__ == 'NoneType':
return '``None``'
else:
return ':py:class:`{}`'.format(annotation.__qualname__)
annotation_cls = annotation if inspect.isclass(annotation) else type(annotation)
class_name = None
if annotation_cls.__module__ == 'typing':
params = None
prefix = ':py:class:'
module = 'typing'
extra = ''
if inspect.isclass(getattr(annotation, '__origin__', None)):
annotation_cls = annotation.__origin__
try:
if Generic in annotation_cls.mro():
module = annotation_cls.__module__
except TypeError:
pass # annotation_cls was either the "type" object or typing.Type
if annotation is Any:
return ':py:data:`~typing.Any`'
elif annotation is AnyStr:
return ':py:data:`~typing.AnyStr`'
elif isinstance(annotation, TypeVar):
return '\\%r' % annotation
elif (annotation is Union or getattr(annotation, '__origin__', None) is Union or
hasattr(annotation, '__union_params__')):
prefix = ':py:data:'
class_name = 'Union'
if hasattr(annotation, '__union_params__'):
params = annotation.__union_params__
elif hasattr(annotation, '__args__'):
params = annotation.__args__
if params and len(params) == 2 and (hasattr(params[1], '__qualname__') and
params[1].__qualname__ == 'NoneType'):
class_name = 'Optional'
params = (params[0],)
elif annotation_cls.__qualname__ == 'Tuple' and hasattr(annotation, '__tuple_params__'):
params = annotation.__tuple_params__
if annotation.__tuple_use_ellipsis__:
params += (Ellipsis,)
elif annotation_cls.__qualname__ == 'Callable':
prefix = ':py:data:'
arg_annotations = result_annotation = None
if hasattr(annotation, '__result__'):
arg_annotations = annotation.__args__
result_annotation = annotation.__result__
elif getattr(annotation, '__args__', None):
arg_annotations = annotation.__args__[:-1]
result_annotation = annotation.__args__[-1]
if arg_annotations in (Ellipsis, (Ellipsis,)):
params = [Ellipsis, result_annotation]
elif arg_annotations is not None:
params = [
'\\[{}]'.format(
', '.join(format_annotation(param) for param in arg_annotations)),
result_annotation
]
elif hasattr(annotation, 'type_var'):
# Type alias
class_name = annotation.name
params = (annotation.type_var,)
elif getattr(annotation, '__args__', None) is not None:
params = annotation.__args__
elif hasattr(annotation, '__parameters__'):
params = annotation.__parameters__
if params:
extra = '\\[{}]'.format(', '.join(format_annotation(param) for param in params))
if not class_name:
class_name = annotation_cls.__qualname__.title()
return '{}`~{}.{}`{}'.format(prefix, module, class_name, extra)
elif annotation is Ellipsis:
return '...'
elif inspect.isclass(annotation) or inspect.isclass(getattr(annotation, '__origin__', None)):
if not inspect.isclass(annotation):
annotation_cls = annotation.__origin__
extra = ''
if Generic in annotation_cls.mro():
params = (getattr(annotation, '__parameters__', None) or
getattr(annotation, '__args__', None))
extra = '\\[{}]'.format(', '.join(format_annotation(param) for param in params))
return ':py:class:`~{}.{}`{}'.format(annotation.__module__, annotation_cls.__qualname__,
extra)
return str(annotation)
def process_signature(app, what: str, name: str, obj, options, signature, return_annotation):
if not callable(obj):
return
if what in ('class', 'exception'):
obj = getattr(obj, '__init__', getattr(obj, '__new__', None))
if not getattr(obj, '__annotations__', None):
return
obj = unwrap(obj)
signature = Signature(obj)
parameters = [
param.replace(annotation=inspect.Parameter.empty)
for param in signature.signature.parameters.values()
]
if parameters:
if what in ('class', 'exception'):
del parameters[0]
elif what == 'method':
outer = inspect.getmodule(obj)
for clsname in obj.__qualname__.split('.')[:-1]:
outer = getattr(outer, clsname)
method_name = obj.__name__
if method_name.startswith("__") and not method_name.endswith("__"):
# If the method starts with double underscore (dunder)
# Python applies mangling so we need to prepend the class name.
# This doesn't happen if it always ends with double underscore.
class_name = obj.__qualname__.split('.')[-2]
method_name = "_{c}{m}".format(c=class_name, m=method_name)
method_object = outer.__dict__[method_name]
if not isinstance(method_object, (classmethod, staticmethod)):
del parameters[0]
signature.signature = signature.signature.replace(
parameters=parameters,
return_annotation=inspect.Signature.empty)
return signature.format_args().replace('\\', '\\\\'), None
def process_docstring(app, what, name, obj, options, lines):
if isinstance(obj, property):
obj = obj.fget
if callable(obj):
if what in ('class', 'exception'):
obj = getattr(obj, '__init__')
obj = unwrap(obj)
try:
type_hints = get_type_hints(obj)
except (AttributeError, TypeError):
# Introspecting a slot wrapper will raise TypeError
return
except NameError as exc:
logger.warning('Cannot resolve forward reference in type annotations of "%s": %s',
name, exc)
type_hints = obj.__annotations__
for argname, annotation in type_hints.items():
if argname.endswith('_'):
argname = '{}\\_'.format(argname[:-1])
formatted_annotation = format_annotation(annotation)
if argname == 'return':
if what in ('class', 'exception'):
# Don't add return type None from __init__()
continue
insert_index = len(lines)
for i, line in enumerate(lines):
if line.startswith(':rtype:'):
insert_index = None
break
elif line.startswith(':return:') or line.startswith(':returns:'):
insert_index = i
if insert_index is not None:
if insert_index == len(lines):
# Ensure that :rtype: doesn't get joined with a paragraph of text, which
# prevents it being interpreted.
lines.append('')
insert_index += 1
lines.insert(insert_index, ':rtype: {}'.format(formatted_annotation))
else:
searchfor = ':param {}:'.format(argname)
for i, line in enumerate(lines):
if line.startswith(searchfor):
lines.insert(i, ':type {}: {}'.format(argname, formatted_annotation))
break
def builder_ready(app):
if app.config.set_type_checking_flag:
typing.TYPE_CHECKING = True
def setup(app):
app.add_config_value('set_type_checking_flag', False, 'html')
app.connect('builder-inited', builder_ready)
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-process-docstring', process_docstring)
return dict(parallel_read_safe=True)
| 8,384 | 0 | 183 |
40d6394520b75b6986167f25296dfc637da5f4e2 | 4,492 | py | Python | DungeonCrawl/Model/ActionResolvers/MoveActionResolver.py | BoogyWinterfell/friday-fun | 686e8c53415e748db437e74c3e2b02afcd9695bf | [
"MIT"
] | null | null | null | DungeonCrawl/Model/ActionResolvers/MoveActionResolver.py | BoogyWinterfell/friday-fun | 686e8c53415e748db437e74c3e2b02afcd9695bf | [
"MIT"
] | null | null | null | DungeonCrawl/Model/ActionResolvers/MoveActionResolver.py | BoogyWinterfell/friday-fun | 686e8c53415e748db437e74c3e2b02afcd9695bf | [
"MIT"
] | null | null | null | from typing import List
from Abstract.ActionResolver import ActionResolver
from Abstract.GameAction import GameAction
from Abstract.EngineGameInfo import EngineGameInfo
from DungeonCrawl.DungeonCrawlEngineGameInfo import DungeonCrawlEngineGameInfo
from DungeonCrawl.DungeonCrawlUtils import count_dungeoneer_weapons, get_dungeoneer_items_by_type
from DungeonCrawl.Model.Actions.MoveAction import MoveAction
from DungeonCrawl.Model.GameObjects.Abstract.DungeonCrawlGameObject import DungeonCrawlGameObject
from DungeonCrawl.Model.GameObjects.Abstract.Item import Item
from DungeonCrawl.Model.GameObjects.Abstract.Weapon import Weapon
from DungeonCrawl.Model.GameObjects.Dungeoneer import Dungeoneer
# TODO: Consider moving these functions to utilities, Reconsider entire Pure Data Design Decision.
| 52.232558 | 135 | 0.69301 | from typing import List
from Abstract.ActionResolver import ActionResolver
from Abstract.GameAction import GameAction
from Abstract.EngineGameInfo import EngineGameInfo
from DungeonCrawl.DungeonCrawlEngineGameInfo import DungeonCrawlEngineGameInfo
from DungeonCrawl.DungeonCrawlUtils import count_dungeoneer_weapons, get_dungeoneer_items_by_type
from DungeonCrawl.Model.Actions.MoveAction import MoveAction
from DungeonCrawl.Model.GameObjects.Abstract.DungeonCrawlGameObject import DungeonCrawlGameObject
from DungeonCrawl.Model.GameObjects.Abstract.Item import Item
from DungeonCrawl.Model.GameObjects.Abstract.Weapon import Weapon
from DungeonCrawl.Model.GameObjects.Dungeoneer import Dungeoneer
class MoveActionResolver(ActionResolver):
def __init__(self):
self.resolve_dict = {1: (0, 1), 2: (1, 0), 3: (0, -1), 4: (-1, 0)}
def resolve_action(self, actions: List[GameAction], game_state: EngineGameInfo) -> EngineGameInfo:
info = DungeonCrawlEngineGameInfo(**game_state.__dict__)
for action in [action for action in actions if type(action) == MoveAction]:
move = MoveAction(**action.__dict__)
self.move_entity(move, info)
new_game_state = self.resolve_collisions(info)
return new_game_state
def move_entity(self, action: MoveAction, game_state: DungeonCrawlEngineGameInfo):
player = game_state.players[action['caller_name']]
moving_items = [item for item in player.items if item['name'] == action['moved_object_name']]
if len(moving_items) > 1:
raise Exception("There were at least two items with the same name.")
moving_item = moving_items[0]
if moving_item:
x_move, y_move = self.resolve_dict[action['direction']]
current_x = moving_item['x_tile']
current_y = moving_item['y_tile']
self.move_to_tile(game_state, moving_item, current_x, current_y, current_x + x_move,
current_y + y_move)
def resolve_collisions(self, game_state: DungeonCrawlEngineGameInfo) -> EngineGameInfo:
for row in game_state.grid:
for tile in row:
for tile_object in tile:
dungeoneers = [x for x in tile.objects_on_tile if isinstance(x, Dungeoneer)]
weapons_count = [count_dungeoneer_weapons(d) for d in dungeoneers]
max_weapons = max(weapons_count)
fight_winners = [d for d in dungeoneers if count_dungeoneer_weapons(d) == max_weapons]
fight_losers = [d for d in dungeoneers if count_dungeoneer_weapons(d) < max_weapons]
self.update_losers(fight_losers, game_state)
max_weapon_losers = max([count_dungeoneer_weapons(d) for d in fight_losers])
self.update_winners(fight_winners, game_state, max_weapon_losers)
return game_state
def update_winners(self, fight_winners, game_state, max_weapon_losers):
for winner in fight_winners:
weapons = get_dungeoneer_items_by_type(winner, Weapon)
weapons_count = count_dungeoneer_weapons(winner)
for i in range(0, min([max_weapon_losers, weapons_count])):
weapon = winner['items'].pop(weapons[len(weapons - 1)])
self.move_item_back(game_state, weapon, weapon['initial_x'], weapon['initial_y'])
def update_losers(self, fight_losers, game_state):
for loser in fight_losers:
for item in loser['items']:
self.move_item_back(game_state, item, item['initial_x'], item['initial_y'])
self.move_to_tile(game_state, loser, loser['x_tile'],
loser['y_tile'], loser['initial_y'], loser['initial_x'])
# TODO: Consider moving these functions to utilities, Reconsider entire Pure Data Design Decision.
def move_to_tile(self, game_state: DungeonCrawlEngineGameInfo, object_to_move: DungeonCrawlGameObject, current_x, current_y, x, y):
game_state.grid[current_x][current_y].objects_on_tile.pop(object_to_move)
object_to_move['x_tile'] = x
object_to_move['y_tile'] = y
game_state.grid[x][y].objects_on_tile.add(object_to_move)
def move_item_back(self, game_state: DungeonCrawlEngineGameInfo, object_to_move: Item, x, y):
object_to_move['x_tile'] = x
object_to_move['y_tile'] = y
game_state.grid[x][y].objects_on_tile.add(object_to_move)
| 3,431 | 20 | 237 |
f9a60e9bba65feb39ffcf676f32b2f49d5004405 | 1,512 | py | Python | tests/test_loops.py | nathfroech/flake8_pylint_comparison | 1f6d5063b3055687e880b5b436346ce4b5ae95da | [
"MIT"
] | null | null | null | tests/test_loops.py | nathfroech/flake8_pylint_comparison | 1f6d5063b3055687e880b5b436346ce4b5ae95da | [
"MIT"
] | null | null | null | tests/test_loops.py | nathfroech/flake8_pylint_comparison | 1f6d5063b3055687e880b5b436346ce4b5ae95da | [
"MIT"
] | null | null | null | import pytest
from hamcrest import assert_that, contains_inanyorder
from tests.testing_utils import param_wrapper, run_flake8, run_pylint
params = [
# code, flake8 rules, pylint rules
param_wrapper((
'values = []',
'for i in range(10):',
' values.append(10)',
), {'B007'}, set(), id='simple_loop'),
param_wrapper((
'values = []',
'for i in range(10):',
' for j in range(10):',
' for k in range(10):',
' values.append(i + j)',
), {'B007'}, set(), id='nested_loop'),
param_wrapper((
'def strange_generator():',
' for x in range(10):',
' for y in range(10):',
' for z in range(10):',
' for w in range(10):',
' yield x, (y, (z, w))',
'',
'',
'values = []',
'for i, (j, (k, l)) in strange_generator():',
' values.append(j, l)',
), {'B007', 'WPS405', 'WPS414'}, set(), id='unpacking'),
]
@pytest.mark.parametrize('content,flake8_errors,pylint_errors', params)
| 33.6 | 92 | 0.571429 | import pytest
from hamcrest import assert_that, contains_inanyorder
from tests.testing_utils import param_wrapper, run_flake8, run_pylint
params = [
# code, flake8 rules, pylint rules
param_wrapper((
'values = []',
'for i in range(10):',
' values.append(10)',
), {'B007'}, set(), id='simple_loop'),
param_wrapper((
'values = []',
'for i in range(10):',
' for j in range(10):',
' for k in range(10):',
' values.append(i + j)',
), {'B007'}, set(), id='nested_loop'),
param_wrapper((
'def strange_generator():',
' for x in range(10):',
' for y in range(10):',
' for z in range(10):',
' for w in range(10):',
' yield x, (y, (z, w))',
'',
'',
'values = []',
'for i, (j, (k, l)) in strange_generator():',
' values.append(j, l)',
), {'B007', 'WPS405', 'WPS414'}, set(), id='unpacking'),
]
@pytest.mark.parametrize('content,flake8_errors,pylint_errors', params)
def test_detects_unused_loop_variables(content, flake8_errors, pylint_errors, file_to_lint):
file_to_lint.write_text(content)
found_flake8_errors = run_flake8(file_to_lint)
assert_that(set(found_flake8_errors), contains_inanyorder(*flake8_errors))
found_pylint_errors = run_pylint(file_to_lint)
assert_that(set(found_pylint_errors), contains_inanyorder(*pylint_errors))
| 370 | 0 | 22 |
639501802fb9488bea5ca438c95f4015af14ca5b | 1,444 | py | Python | Scrapy/Scrapy/spiders/basco.py | silveriogabriel/Scraping-Scrapy | 341554ffa7f055c91235816abcb799f9ac4b4428 | [
"MIT"
] | null | null | null | Scrapy/Scrapy/spiders/basco.py | silveriogabriel/Scraping-Scrapy | 341554ffa7f055c91235816abcb799f9ac4b4428 | [
"MIT"
] | null | null | null | Scrapy/Scrapy/spiders/basco.py | silveriogabriel/Scraping-Scrapy | 341554ffa7f055c91235816abcb799f9ac4b4428 | [
"MIT"
] | null | null | null | import scrapy
| 36.1 | 94 | 0.548476 | import scrapy
class BascoSpider(scrapy.Spider):
name = 'basco'
start_urls = ['http://www.basco.com.br/produtos/']
def parse(self, response):
self.produtos = []
self.titulos = []
self.imagems = []
self.descricoes = []
links = ['http://www.basco.com.br/produtos']
for i in response.css('#menu_secundario div ::attr(href)').getall():
links.append(i)
for link in links:
yield scrapy.Request(link, callback=self.parse_produtos)
def parse_produtos(self ,response):
self.products = response.css('#menu_produtos li ::attr(href)').getall()
for produto in self.products:
yield scrapy.Request(produto)
try:
self.titulos.append(response.css('h1 ::text').get())
except:
self.titulos.append('NULL')
try:
self.imagems.append(response.css('#grande ::attr(src)').get())
except:
self.imagems.append('NULL')
try:
self.descricoes.append(response.css('.esquerda .conteudo ::text').getall()[5])
except:
self.descricoes.append('NULL')
if len(self.descricoes) == 27:
for i in range(len(self.titulos)):
self.produtos.append([self.titulos[i],self.imagems[i],self.descricoes[i]])
yield{'basco': self.produtos} | 1,267 | 140 | 23 |
e904ea4282882301d69b1dd575369bf096878386 | 6,379 | py | Python | tests/test_plPlug.py | sdss/coordio | 61f5c962b8e3f335259168c9f8e872b4d3fe25d8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plPlug.py | sdss/coordio | 61f5c962b8e3f335259168c9f8e872b4d3fe25d8 | [
"BSD-3-Clause"
] | 9 | 2021-02-04T00:01:30.000Z | 2021-11-14T22:42:49.000Z | tests/test_plPlug.py | sdss/coordio | 61f5c962b8e3f335259168c9f8e872b4d3fe25d8 | [
"BSD-3-Clause"
] | null | null | null | from coordio.utils import radec2wokxy, wokxy2radec
import time
import matplotlib.pyplot as plt
import numpy
import coordio.fitData as fitData
import os
from astropy.coordinates import SkyCoord
from astropy import units as u
filedir = os.path.dirname(os.path.abspath(__file__))
# apo plate 15017
apo = {}
apo["utcJD"] = 2459249.6184
apo["alt"] = 54 # at the JD supplied...
apo["file"] = os.path.join(filedir, "plPlugMapP-15017.par")
# lco plate 12377
lco = {}
lco["utcJD"] = 2459249.8428
lco["alt"] = 45.18 # at the JD supplied
lco["file"] = os.path.join(filedir, "plPlugMapP-12377.par")
if __name__ == "__main__":
print("APO")
print("-----------")
run_field("APO", plot=True)
print("\n\n")
print("LCO")
print("-----------")
run_field("LCO", plot=True)
plt.show()
# print("\n\n")
| 28.605381 | 79 | 0.557297 | from coordio.utils import radec2wokxy, wokxy2radec
import time
import matplotlib.pyplot as plt
import numpy
import coordio.fitData as fitData
import os
from astropy.coordinates import SkyCoord
from astropy import units as u
filedir = os.path.dirname(os.path.abspath(__file__))
# apo plate 15017
apo = {}
apo["utcJD"] = 2459249.6184
apo["alt"] = 54 # at the JD supplied...
apo["file"] = os.path.join(filedir, "plPlugMapP-15017.par")
# lco plate 12377
lco = {}
lco["utcJD"] = 2459249.8428
lco["alt"] = 45.18 # at the JD supplied
lco["file"] = os.path.join(filedir, "plPlugMapP-12377.par")
def parsePlugmap(plPlugFile):
with open(plPlugFile) as f:
rawLines = f.readlines()
info = {}
info["xFocal"] = []
info["yFocal"] = []
info["fiberType"] = []
info["ra"] = []
info["dec"] = []
for line in rawLines:
if line.startswith("ha "):
info["ha"] = float(line.split()[1])
if line.startswith("temp "):
info["temp"] = float(line.split()[-1])
if line.startswith("raCen "):
info["raCen"] = float(line.split()[-1])
if line.startswith("decCen "):
info["decCen"] = float(line.split()[-1])
if line.startswith("PLUGMAPOBJ "):
if " QSO " in line:
# assume this is a boss fiber
split = line.split(" QSO ")[-1].split()
info["xFocal"].append(float(split[0]))
info["yFocal"].append(float(split[1]))
split = line.split(" OBJECT ")[-1].split()
info["ra"].append(float(split[0]))
info["dec"].append(float(split[1]))
info["fiberType"].append("Boss")
elif " STAR_BHB " in line:
split = line.split(" STAR_BHB ")[-1].split()
info["xFocal"].append(float(split[0]))
info["yFocal"].append(float(split[1]))
split = line.split(" OBJECT ")[-1].split()
info["ra"].append(float(split[0]))
info["dec"].append(float(split[1]))
info["fiberType"].append("Apogee")
return info
def run_field(siteName, plot=False):
if siteName == "LCO":
dd = lco
else:
dd = apo
plateData = parsePlugmap(dd["file"])
xWok, yWok, fieldWarn, ha, pa = radec2wokxy(
plateData["ra"], plateData["dec"], dd["utcJD"], plateData["fiberType"],
plateData["raCen"], plateData["decCen"], 0, siteName, dd["utcJD"]
)
dHA = ha - plateData["ha"]
# convert to hours
dHA = 24/360.*dHA
# convert to days
dHA = dHA/24
# update time of observation to be at designed hour angle
timeObs = dd["utcJD"] - dHA
xWok, yWok, fieldWarn, ha, pa = radec2wokxy(
plateData["ra"], plateData["dec"], timeObs, plateData["fiberType"],
plateData["raCen"], plateData["decCen"], 0, siteName, timeObs
)
print("obs ha", ha)
print("design ha", plateData["ha"])
xFocal = numpy.array(plateData["xFocal"])
yFocal = numpy.array(plateData["yFocal"])
# lco xy is backwards
if siteName == "LCO":
xFocal = xFocal*-1
yFocal = yFocal*-1
# if plot:
# plt.figure(figsize=(8,8))
# plt.plot(xFocal, yFocal, 'x')
# plt.axis("equal")
# plt.title("focal")
# plt.figure(figsize=(8,8))
# plt.plot(xWok, yWok, 'x')
# plt.axis("equal")
# plt.title("wok")
dx = xFocal - xWok
dy = yFocal - yWok
rmsErr = numpy.sqrt(numpy.mean(dx**2+dy**2))*1000
if plot:
plt.figure()
plt.title("%s\nRaw RMS error %.2f microns"%(siteName, rmsErr))
plt.hist(numpy.sqrt(dx**2+dy**2)*1000)
plt.xlabel("err (micron)")
plt.figure(figsize=(8,8))
plt.title(
"%s\nRaw Residuals\nRMS error %.2f microns"%(siteName, rmsErr)
)
plt.quiver(xWok,yWok,dx,dy, angles="xy")
plt.xlabel("x wok (mm)")
plt.ylabel("y wok (mm)")
plt.axis("equal")
plt.savefig("%sErrs.png"%siteName, dpi=150)
# fit translation, rotation, scale
fitTransRotScale = fitData.ModelFit(
model=fitData.TransRotScaleModel(),
measPos=numpy.array([xWok, yWok]).T,
nomPos=numpy.array([xFocal, yFocal]).T,
doRaise=True,
)
xyOff, rotAngle, scale = fitTransRotScale.model.getTransRotScale()
print("xy translation (micron)", xyOff * 1000)
print("rot (deg)", rotAngle)
print("scale", scale)
posErr = fitTransRotScale.getPosError()
print("posErr shape", posErr.shape)
rmsErr = numpy.sqrt(numpy.mean(posErr[:,0]**2 + posErr[:,1]**2))*1000
print("fit rms error (micron)", rmsErr)
if siteName == "LCO":
assert rmsErr < 1
if plot:
# plt.figure()
# plt.hist(posErr[:,0]*1000)
# plt.xlabel("fit x err (micron)")
# plt.figure()
# plt.hist(posErr[:,1]*1000)
# plt.xlabel("fit y err (micron)")
plt.figure()
plt.title("%s Fit\nRMS error %.2f microns"%(siteName, rmsErr))
plt.hist(numpy.sqrt(posErr[:,0]**2+posErr[:,1]**2)*1000)
plt.xlabel("fit err (micron)")
plt.figure(figsize=(8,8))
plt.title(
"%s Fit Residuals\nRMS error %.2f microns"%(siteName, rmsErr)
)
plt.quiver(xWok,yWok,posErr[:,0], posErr[:,1], angles="xy")
plt.xlabel("x wok (mm)")
plt.ylabel("y wok (mm)")
plt.axis("equal")
plt.savefig("%sfitErrs.png"%siteName, dpi=150)
# run the reverse
ra, dec, fieldWarn = wokxy2radec(
xWok, yWok, plateData["fiberType"], plateData["raCen"],
plateData["decCen"], 0, siteName, timeObs
)
sk1 = SkyCoord(ra=ra * u.deg, dec=dec * u.deg)
sk2 = SkyCoord(ra=plateData["ra"] * u.deg, dec=plateData["dec"] * u.deg)
angSep = sk1.separation(sk2)
asec = numpy.array(angSep)*3600
assert numpy.max(asec) < 0.5 # less than 0.5 arcsecs round trip
if plot:
plt.figure()
plt.hist(asec)
plt.title("angular sep (arcsec)")
def test_utils():
run_field("APO")
run_field("LCO")
if __name__ == "__main__":
print("APO")
print("-----------")
run_field("APO", plot=True)
print("\n\n")
print("LCO")
print("-----------")
run_field("LCO", plot=True)
plt.show()
# print("\n\n")
| 5,481 | 0 | 69 |
961b1b6516caff18447da65c62981e65debf064f | 16,322 | py | Python | ver1_0/openassembly/pirate_reputation/templatetags/reputationtags.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | 1 | 2015-11-05T08:22:19.000Z | 2015-11-05T08:22:19.000Z | ver1_0/openassembly/pirate_reputation/templatetags/reputationtags.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | null | null | null | ver1_0/openassembly/pirate_reputation/templatetags/reputationtags.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | 1 | 2018-02-03T18:25:41.000Z | 2018-02-03T18:25:41.000Z | from django import template
from django import forms
import datetime
import sys
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from pirate_reputation.models import Reputation, ReputationDimension, ReputationEvent, AbuseTicket, FeedbackTicket
from pirate_consensus.models import Consensus
from pirate_forum.models import get_rangelist
from pirate_core import namespace_get
import settings
from notification import models as notification
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from customtags.decorators import block_decorator
register = template.Library()
block = block_decorator(register)
get_namespace = namespace_get('pp_reputation')
@block
def pp_get_reputation_events_graph(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation_events_graph user=request.object x=8 y=100 %}
Do stuff with {{ pp_reputation.graph_html }}
{% endpp_get_reputation %}
This template tag dynamically generates the html required for a (x,y) graph
where x is the activity rate and y is the time dimension. This graph shows
users a basic idea of the user's activity rate.
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
#must be divisible equally by days
min_days = kwargs.get('min_days', None)
#must be divisible equally by days
graph_type = kwargs.get('type', None)
if graph_type == None:
raise ValueError("pp_get_reputation_events_graph requires type argument of 'rating','spectrum', or 'activity'")
elif graph_type == 'activity':
today = datetime.datetime.now()
DD = datetime.timedelta(days=x)
earlier = today - DD
reps = ReputationEvent.objects.filter(initiator=user).order_by('-created_dt').filter(created_dt__gte=earlier)
try:
daylength = (reps[0].created_dt - reps[len(reps) - 1].created_dt).days + 2
except:
daylength = 1
days = min(x, daylength)
#if days == 2:
# days = 1 #side case for first day activity
# x=24
# min_days = 1
#elif days > min_days:
# days = min_days
# x = 1
#else: x = x * days
html, rate_list, mtrx, min_rate, max_rate, mean = grab_graph(reps, x, y, days, min_days)
namespace['days'] = daylength
elif graph_type == 'spectrum' or 'rating':
rate_list, min_rate, max_rate, mean = dist_graph(x, y, user, graph_type)
namespace['x'] = x
namespace['rate_list'] = rate_list
namespace['min'] = min_rate
namespace['max'] = max_rate
namespace['mean'] = int(round(mean))
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_reputation_events(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation_events user=request.object %}
Do stuff with {{ pp_reputation.reputation_events }}.
{% endpp_get_reputation %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
page = kwargs.get('page', 1)
if page is None:
page = 1
if user is not None and isinstance(user, User):
#get argument score
rep = ReputationEvent.objects.filter(initiator=user).order_by('-created_dt')
cnt = rep.count()
else:
rep = []
cnt = 0
namespace['count'] = cnt
paginator = Paginator(rep, 10)
try:
rep = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
rep = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
rep = paginator.page(paginator.num_pages)
except:
raise
namespace['reputation_events'] = rep
output = nodelist.render(context)
context.pop()
return output
@block
def abuse_ticket_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify arguments.
Usage is as follows:
{% pp_profile_form POST=request.POST object=request.object %}
Do stuff with {{ pp_profile.form }}.
{% endpp_profile_form %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
POST = kwargs.get('POST', None)
if POST and POST.get("form_id") == "report_abuse":
form = ReportAbuseForm(POST)
#new_arg = form.save(commit=False)
if form.is_valid():
report_abuse_new = form.save(commit=False)
report_abuse_new.user = user
report_abuse_new.save()
namespace['complete'] = True
notification.send([settings.DEFAULT_FROM_EMAIL], "abuse_feedback", {
"notice_message": "New Abuse Ticket Received Check out the Admin"})
else:
namespace['errors'] = form.errors
else:
form = ReportAbuseForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def feedback_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify arguments.
Usage is as follows:
{% pp_profile_form POST=request.POST object=request.object %}
Do stuff with {{ pp_profile.form }}.
{% endpp_profile_form %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
POST = kwargs.get('POST', None)
if POST and POST.get("form_id") == "feedback":
form = FeedbackForm(POST)
#new_arg = form.save(commit=False)
if form.is_valid():
feedback_new = form.save(commit=False)
feedback_new.user = user
feedback_new.save()
namespace['complete'] = True
notification.send([settings.DEFAULT_FROM_EMAIL], "abuse_feedback", {
"notice_message": "New Feedback Received Check out the Admin"})
else:
namespace['errors'] = form.errors
else:
form = FeedbackForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_reputation(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation user=request.object %}
Do stuff with {{ pp_reputation.reputation }}.
{% endpp_get_reputation %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
if user is not None and isinstance(user, User):
#get argument score
scores = {}
tot_score = 0
for dim in ReputationDimension.objects.all():
rep = Reputation.objects.get_user_score(user, dim)
try:
scores[str(dim)] = rep.score
tot_score += rep.score
except:
pass
#rep does not yet exist
else:
scores = {}
tot_score = 0
namespace['reputation_keys'] = scores.items()
namespace['reputation'] = tot_score
output = nodelist.render(context)
context.pop()
return output
#returns a graph of the distribution of votes for this user, based on dtype
#argument which is equal to 'spectrum' or 'rating' based on the opinion/quality
#grabs an activity graph for the list of reputation events
#generates dynamic html using pixels to create a graph
"""
x: x length
y: y lenght
dayslots: pixels per day
mcheck: if we need to check the matrix, False for empty graphs
numcheck: if x vector determines pixel color, i.e. activity versus opinion graph
"""
#shows distribution of votes on this user
# activity graph designed when length of time is greater than x and we
#must only take a chunk of the events
#returns graph from past activity, when less than x
| 33.863071 | 158 | 0.612241 | from django import template
from django import forms
import datetime
import sys
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from pirate_reputation.models import Reputation, ReputationDimension, ReputationEvent, AbuseTicket, FeedbackTicket
from pirate_consensus.models import Consensus
from pirate_forum.models import get_rangelist
from pirate_core import namespace_get
import settings
from notification import models as notification
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from customtags.decorators import block_decorator
register = template.Library()
block = block_decorator(register)
get_namespace = namespace_get('pp_reputation')
@block
def pp_get_reputation_events_graph(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation_events_graph user=request.object x=8 y=100 %}
Do stuff with {{ pp_reputation.graph_html }}
{% endpp_get_reputation %}
This template tag dynamically generates the html required for a (x,y) graph
where x is the activity rate and y is the time dimension. This graph shows
users a basic idea of the user's activity rate.
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
#must be divisible equally by days
min_days = kwargs.get('min_days', None)
#must be divisible equally by days
graph_type = kwargs.get('type', None)
if graph_type == None:
raise ValueError("pp_get_reputation_events_graph requires type argument of 'rating','spectrum', or 'activity'")
elif graph_type == 'activity':
today = datetime.datetime.now()
DD = datetime.timedelta(days=x)
earlier = today - DD
reps = ReputationEvent.objects.filter(initiator=user).order_by('-created_dt').filter(created_dt__gte=earlier)
try:
daylength = (reps[0].created_dt - reps[len(reps) - 1].created_dt).days + 2
except:
daylength = 1
days = min(x, daylength)
#if days == 2:
# days = 1 #side case for first day activity
# x=24
# min_days = 1
#elif days > min_days:
# days = min_days
# x = 1
#else: x = x * days
html, rate_list, mtrx, min_rate, max_rate, mean = grab_graph(reps, x, y, days, min_days)
namespace['days'] = daylength
elif graph_type == 'spectrum' or 'rating':
rate_list, min_rate, max_rate, mean = dist_graph(x, y, user, graph_type)
namespace['x'] = x
namespace['rate_list'] = rate_list
namespace['min'] = min_rate
namespace['max'] = max_rate
namespace['mean'] = int(round(mean))
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_reputation_events(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation_events user=request.object %}
Do stuff with {{ pp_reputation.reputation_events }}.
{% endpp_get_reputation %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
page = kwargs.get('page', 1)
if page is None:
page = 1
if user is not None and isinstance(user, User):
#get argument score
rep = ReputationEvent.objects.filter(initiator=user).order_by('-created_dt')
cnt = rep.count()
else:
rep = []
cnt = 0
namespace['count'] = cnt
paginator = Paginator(rep, 10)
try:
rep = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
rep = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
rep = paginator.page(paginator.num_pages)
except:
raise
namespace['reputation_events'] = rep
output = nodelist.render(context)
context.pop()
return output
class FeedbackForm(forms.ModelForm):
def save(self, commit=True):
new_prof = super(FeedbackForm, self).save(commit=commit)
return new_prof
class Meta:
model = FeedbackTicket
exclude = ('user', 'created_dt')
form_id = forms.CharField(widget=forms.HiddenInput(), initial="feedback")
feedback = forms.CharField(max_length=500, widget=forms.Textarea)
class ReportAbuseForm(forms.ModelForm):
def save(self, commit=True):
new_prof = super(ReportAbuseForm, self).save(commit=commit)
return new_prof
class Meta:
model = AbuseTicket
exclude = ('user', 'created_dt', 'fixed')
form_id = forms.CharField(widget=forms.HiddenInput(), initial="report_abuse")
description_of_abuse = forms.CharField(max_length=500, widget=forms.Textarea)
@block
def abuse_ticket_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify arguments.
Usage is as follows:
{% pp_profile_form POST=request.POST object=request.object %}
Do stuff with {{ pp_profile.form }}.
{% endpp_profile_form %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
POST = kwargs.get('POST', None)
if POST and POST.get("form_id") == "report_abuse":
form = ReportAbuseForm(POST)
#new_arg = form.save(commit=False)
if form.is_valid():
report_abuse_new = form.save(commit=False)
report_abuse_new.user = user
report_abuse_new.save()
namespace['complete'] = True
notification.send([settings.DEFAULT_FROM_EMAIL], "abuse_feedback", {
"notice_message": "New Abuse Ticket Received Check out the Admin"})
else:
namespace['errors'] = form.errors
else:
form = ReportAbuseForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def feedback_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify arguments.
Usage is as follows:
{% pp_profile_form POST=request.POST object=request.object %}
Do stuff with {{ pp_profile.form }}.
{% endpp_profile_form %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
POST = kwargs.get('POST', None)
if POST and POST.get("form_id") == "feedback":
form = FeedbackForm(POST)
#new_arg = form.save(commit=False)
if form.is_valid():
feedback_new = form.save(commit=False)
feedback_new.user = user
feedback_new.save()
namespace['complete'] = True
notification.send([settings.DEFAULT_FROM_EMAIL], "abuse_feedback", {
"notice_message": "New Feedback Received Check out the Admin"})
else:
namespace['errors'] = form.errors
else:
form = FeedbackForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_reputation(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation user=request.object %}
Do stuff with {{ pp_reputation.reputation }}.
{% endpp_get_reputation %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
if user is not None and isinstance(user, User):
#get argument score
scores = {}
tot_score = 0
for dim in ReputationDimension.objects.all():
rep = Reputation.objects.get_user_score(user, dim)
try:
scores[str(dim)] = rep.score
tot_score += rep.score
except:
pass
#rep does not yet exist
else:
scores = {}
tot_score = 0
namespace['reputation_keys'] = scores.items()
namespace['reputation'] = tot_score
output = nodelist.render(context)
context.pop()
return output
#returns a graph of the distribution of votes for this user, based on dtype
#argument which is equal to 'spectrum' or 'rating' based on the opinion/quality
def dist_graph(x,y,user,dtype):
rate_list, max_rate, min_rate, mean = distribution_graph(x,user,dtype=dtype)
#mtrx = build_graph(rate_list, max_rate, y)
#num = x/11
#px = '1px'
#dayslots=1;mcheck=True
#html = generate_graph_html(x,y,dayslots,mtrx,mcheck,True,'1px',dtype)
return rate_list, min_rate, max_rate, mean
#grabs an activity graph for the list of reputation events
def grab_graph(reps, x, y, days, min_days):
dayslots = max(min(x/days,24),1) #must be at least one slot per day or less than equal to 24
padding = min(0,x - (days*24))
#iterate through rates and build matrix of boolean values
if len(reps) != 0:
if days < x: rate_list, max_rate, min_rate, mean = activity_graph_greater(reps, x, y, days, dayslots)
else: rate_list, max_rate, min_rate, mean = activity_graph_greater(reps, x, y, days, dayslots)
mtrx = build_graph(rate_list, max_rate, y)
mtrx = [[0 for i in range(y)] for j in range(padding)] + mtrx
mcheck = True
else: #there are no reputation events as of yet
rate_list = []
mtrx = [i for i in range(x)]
min_rate = 0
max_rate = 0
mean = 0
#generate html
mcheck = False
html = generate_graph_html(x,y,dayslots,mtrx,mcheck,False,'1px', None)
return html, rate_list, mtrx, min_rate, max_rate, mean
#generates dynamic html using pixels to create a graph
"""
x: x length
y: y lenght
dayslots: pixels per day
mcheck: if we need to check the matrix, False for empty graphs
numcheck: if x vector determines pixel color, i.e. activity versus opinion graph
"""
def generate_graph_html(x,y,dayslots,mtrx,mcheck, numcheck, px, dtype):
if dtype == 'spectrum': num = x/11
elif dtype == 'rating': num = x/5
html = "<div class='master_graph'><div class='graph'>"
for i in range(len(mtrx)+2):html+='<img style="border:0;margin:' + px + ';" src="/static/border_pixel.gif">'
html+='</div>'
for i in range(y):
html += "<div class='graph'>" +'<img style="border:0;margin:' + px + ';" src="/static/border_pixel.gif">'
for j in range(len(mtrx)):
if mcheck:
if mtrx[j][i] == 1:
if numcheck:
html += '<img style="border:0;margin:'+px+';" src="/static/pixel_' + str((j/num)) + '.gif">'
else: html += '<img style="border:0;margin:' + px + ';" src="/static/pixel_0.gif">'
else: html += '<img style="border:0;margin:' + px + ';" src="/static/trans_pixel.gif">'
else: html += '<img style="border:0;margin:' + px + ';" src="/static/trans_pixel.gif">'
html += '<img style="border:0;margin:' + px + ';" src="/static/border_pixel.gif">'+ "</div>"
html += "<div class='graph'>"
for i in range(len(mtrx)+2):html+='<img style="border:0;margin:' + px + ';" src="/static/border_pixel.gif">'
html+='</div></div>'
return html
def build_graph(rate_list, max_rate, y):
#iterate through matrix and build html/css graph
mtrx = []
for rate in rate_list:
try: rt = float(rate)/max_rate * y
except: rt = 0
col = []
for j in range(y):
if rt > j: col.append(1)
else: col.append(0)
col.reverse()
mtrx.append(col)
return mtrx
#shows distribution of votes on this user
def distribution_graph(x,user,dtype='spectrum'):
contype = ContentType.objects.get_for_model(user)
cons, is_new = Consensus.objects.get_or_create(content_type=contype,
object_pk=user.pk,
vote_type= contype,
parent_pk=user.pk)
if is_new: cons.intiate_vote_distributions()
if dtype == 'spectrum':
l = cons.spectrum.get_list()
idx = 1
elif dtype == 'rating':
l = cons.rating.get_list()
idx = 2
m_ax = 0
ret_list = []
num = x/len(l)
m_in = sys.maxint
tot = 0
for spec in l:
if spec[idx] > m_ax:
m_ax = spec[idx]
if spec[idx] < m_in:
m_in = spec[idx]
tot+=spec[idx]
ret_list.extend([spec[idx] for i in range(num)])
mean = float(tot)/len(l)
return ret_list, m_ax, m_in, mean
# activity graph designed when length of time is greater than x and we
#must only take a chunk of the events
def activity_graph_greater(reps,x,y,days,dayslots):
today = datetime.datetime.now()
DD = datetime.timedelta(days=x)
rate_list = []
earlier = today - DD
itr = 0
min_rate = sys.maxint
max_rate = 0
rate = 0
currep = reps[itr]
for i in range(x,0,-1):
while currep.created_dt.day == (earlier + datetime.timedelta(days=i)).day and currep.created_dt.month == (earlier + datetime.timedelta(days=i)).month:
itr+=1 # next rep event
rate+=1
try: currep = reps[itr]
except: break
rate_list.append(rate)
if rate > max_rate: max_rate = rate
if rate < min_rate: min_rate = rate
rate = 0
mean = sum(rate_list)/float(len(rate_list))
rate_list.reverse()
return rate_list, max_rate, min_rate, mean
#returns graph from past activity, when less than x
def activity_graph(reps,x,y,days,dayslots):
tday = reps[0].created_dt.day
rate = 0
min_rate = sys.maxint
max_rate = 0
num_days = 0
rate_list = []
tot_rate = 0.0
range_list = []
r = 0
for i in range(dayslots):
range_list.append((r,r+24/dayslots))
r = r + 24/dayslots
slots_list = [0 for i in range_list]
for rep in reps: #iterate through each reputation event and segment into days and slots per day
tot_rate+=1
tmp_idx = 0
if rep.created_dt.day == tday:
for i in range_list: #check which slot in the day this event belongs to, iterate
if rep.created_dt.hour in range(i[0],i[1]):
slot_idx = tmp_idx
tmp_idx+=1
try:slots_list[slot_idx] += 1
except:pass #no data yet
if rep.created_dt.day != tday:
for rate in slots_list:
if rate < min_rate: #check min rate
min_rate = rate
for rate in slots_list:
if rate > max_rate: #check max rate
max_rate = rate
if tday < rep.created_dt.day: diff = (tday - rep.created_dt.day) * len(range_list[0])
else: diff = len(range_list[0])
rate_list = slots_list + rate_list
for i in range(diff-1):
rate_list = [0 for i in range(dayslots)] + rate_list
tday = rep.created_dt.day
num_days+=diff
slots_list = [0 for i in range_list]
for i in range_list: #check which slot in the day this event belongs to, iterate
if rep.created_dt.hour in range(i[0],i[1]):
slot_idx = tmp_idx
tmp_idx+=1
slots_list[slot_idx] += 1
for rate in slots_list:
if rate < min_rate: #check min rate
min_rate = rate
for rate in slots_list:
if rate > max_rate: #check max rate
max_rate = rate
if tday < rep.created_dt.day: diff = (tday - rep.created_dt.day) * len(range_list[0])
else: diff = len(range_list[0])
rate_list = slots_list + rate_list
for i in range(diff-1):
rate_list = [0 for i in range(dayslots)] + rate_list
tday = rep.created_dt.day
num_days+=diff
mean = tot_rate/days
return rate_list, max_rate, min_rate, mean
| 7,189 | 587 | 204 |
48845d95b4825fc29c5c786687f87174fa2bb38b | 27 | py | Python | urbanoctowaddle/__init__.py | TaiSakuma/urbanoctowaddle | 0d297f0c47c97cc34d8816c78121b555efd79e7c | [
"BSD-3-Clause"
] | null | null | null | urbanoctowaddle/__init__.py | TaiSakuma/urbanoctowaddle | 0d297f0c47c97cc34d8816c78121b555efd79e7c | [
"BSD-3-Clause"
] | null | null | null | urbanoctowaddle/__init__.py | TaiSakuma/urbanoctowaddle | 0d297f0c47c97cc34d8816c78121b555efd79e7c | [
"BSD-3-Clause"
] | null | null | null | from .waddle import Waddle
| 13.5 | 26 | 0.814815 | from .waddle import Waddle
| 0 | 0 | 0 |
cc16e90f54c4fe075e7a60b9f940edd68be3e727 | 52 | py | Python | white_matter/utils/__init__.py | alex4200/Long-range-micro-connectome | 833aad78bc71e49a5059b276e65d3fef21686f9d | [
"BSD-3-Clause"
] | 9 | 2019-05-01T13:12:17.000Z | 2021-11-23T10:34:56.000Z | white_matter/utils/__init__.py | alex4200/Long-range-micro-connectome | 833aad78bc71e49a5059b276e65d3fef21686f9d | [
"BSD-3-Clause"
] | 2 | 2022-02-03T13:56:22.000Z | 2022-02-04T07:16:37.000Z | white_matter/utils/__init__.py | alex4200/Long-range-micro-connectome | 833aad78bc71e49a5059b276e65d3fef21686f9d | [
"BSD-3-Clause"
] | 1 | 2022-02-03T12:05:12.000Z | 2022-02-03T12:05:12.000Z | from .query_streamlines import StreamlineDownloader
| 26 | 51 | 0.903846 | from .query_streamlines import StreamlineDownloader
| 0 | 0 | 0 |
2c3dd1949bb2955c67ca4bee97e74cf524bc7b22 | 5,089 | py | Python | gaea/gaea/log/main.py | Yo-main/akingbee.com | 144940df99900226073eb4bf721a6ab407a3911d | [
"MIT"
] | null | null | null | gaea/gaea/log/main.py | Yo-main/akingbee.com | 144940df99900226073eb4bf721a6ab407a3911d | [
"MIT"
] | 20 | 2019-10-06T20:24:49.000Z | 2022-02-28T01:55:49.000Z | gaea/gaea/log/main.py | yo-main/akingbee | a8b4b307e2262f98eb93459c6dd4207e707cee1e | [
"MIT"
] | null | null | null | import os
import time
import json
import datetime
import logging
from logging.handlers import RotatingFileHandler
import pprint
from gaea.config import CONFIG
# class ContextFilter(logging.Filter):
# def filter(self, record):
# if flask.has_request_context():
# # when logging out, the user_id is already set
# if not hasattr(record, "user_id"):
# record.user_id = flask.session.get("user_id") or 0
# record.request_form = {
# key: item
# for key, item in flask.request.form.items()
# if key not in ("password", "pwd")
# }
# record.request_id = flask.g.request_uuid
# record.request_path = flask.request.path
# record.request_method = flask.request.method
# record.request_user_agent = flask.request.user_agent
# record.request_ip_address = flask.request.remote_addr
# return True
class CustomLogger(logging.Logger):
"""Custom logger"""
# pylint: disable=arguments-differ
EXCLUDED_FIELDS = (
"msg",
"asctime",
"args",
"filename",
"module",
"created",
"msecs",
"relativeCreated",
"thread",
"threadName",
"processName",
"process",
"levelno",
)
LOG_LEVEL = CONFIG.get("LOG_LEVEL", logging.INFO)
logger = CustomLogger(CONFIG.SERVICE_NAME)
logger.setLevel(LOG_LEVEL)
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
json_formatter = JSONFormatter(datefmt=DATE_FORMAT)
pretty_json_formatter = PrettyJSONFormatter(datefmt=DATE_FORMAT)
normal_formatter = logging.Formatter(
fmt="{asctime} | {levelname:8s} | {message}",
datefmt=DATE_FORMAT,
style="{",
)
if CONFIG.get("LOG_TO_FILE"):
log_file_name = f"{CONFIG.SERVICE_NAME}.log"
log_path = os.path.join(CONFIG.LOGS_FOLDER_NAME, log_file_name)
file_handler = RotatingFileHandler(log_path, "a", 1_000_000, 100)
file_handler.setLevel(LOG_LEVEL)
file_handler.setFormatter(json_formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
if CONFIG.get("LOG_FORMAT") == "minimize":
stream_log_format = normal_formatter
elif CONFIG.get("LOG_FORMAT") == "json":
stream_log_format = json_formatter
else:
stream_log_format = pretty_json_formatter
stream_handler.setFormatter(stream_log_format)
logger.addHandler(stream_handler)
# add context within flask.request context
# def init_logging_filter():
# context_filter = ContextFilter()
# logger.addFilter(context_filter)
| 26.925926 | 79 | 0.612694 | import os
import time
import json
import datetime
import logging
from logging.handlers import RotatingFileHandler
import pprint
from gaea.config import CONFIG
# class ContextFilter(logging.Filter):
# def filter(self, record):
# if flask.has_request_context():
# # when logging out, the user_id is already set
# if not hasattr(record, "user_id"):
# record.user_id = flask.session.get("user_id") or 0
# record.request_form = {
# key: item
# for key, item in flask.request.form.items()
# if key not in ("password", "pwd")
# }
# record.request_id = flask.g.request_uuid
# record.request_path = flask.request.path
# record.request_method = flask.request.method
# record.request_user_agent = flask.request.user_agent
# record.request_ip_address = flask.request.remote_addr
# return True
class CustomLogger(logging.Logger):
"""Custom logger"""
# pylint: disable=arguments-differ
def debug(self, msg, *args, exc_info=False, stack_info=False, **kwargs):
self._log(
logging.DEBUG,
msg,
args,
exc_info=exc_info,
stack_info=stack_info,
extra={"extra": kwargs},
)
def info(self, msg, *args, exc_info=False, stack_info=False, **kwargs):
self._log(
logging.INFO,
msg,
args,
exc_info=exc_info,
stack_info=stack_info,
extra={"extra": kwargs},
)
def warning(self, msg, *args, exc_info=False, stack_info=False, **kwargs):
self._log(
logging.WARNING,
msg,
args,
exc_info=exc_info,
stack_info=stack_info,
extra={"extra": kwargs},
)
def critical(self, msg, *args, exc_info=False, stack_info=False, **kwargs):
self._log(
logging.CRITICAL,
msg,
args,
exc_info=exc_info,
stack_info=stack_info,
extra={"extra": kwargs},
)
def error(self, msg, *args, exc_info=False, stack_info=False, **kwargs):
self._log(
logging.ERROR,
msg,
args,
exc_info=exc_info,
stack_info=stack_info,
extra={"extra": kwargs},
)
def exception(self, msg, *args, stack_info=False, exc_info=True, **kwargs):
self._log(
logging.ERROR,
msg,
args,
exc_info=exc_info,
stack_info=stack_info,
extra={"extra": kwargs},
)
EXCLUDED_FIELDS = (
"msg",
"asctime",
"args",
"filename",
"module",
"created",
"msecs",
"relativeCreated",
"thread",
"threadName",
"processName",
"process",
"levelno",
)
class BaseFormatter(logging.Formatter):
converter = time.gmtime
def formatTime(self, record, datefmt):
return datetime.datetime.fromtimestamp(
record.created, tz=datetime.timezone.utc
).isoformat()
def format(self, record):
record.message = record.getMessage()
setattr(record, "timestamp", self.formatTime(record, self.datefmt))
return {
key: item
for key, item in record.__dict__.items()
if item is not None and key not in EXCLUDED_FIELDS
}
class JSONFormatter(BaseFormatter):
def format(self, record):
log_data = super().format(record)
return json.dumps(log_data, default=str)
class PrettyJSONFormatter(BaseFormatter):
def format(self, record):
log_data = super().format(record)
return pprint.pformat(log_data)
LOG_LEVEL = CONFIG.get("LOG_LEVEL", logging.INFO)
logger = CustomLogger(CONFIG.SERVICE_NAME)
logger.setLevel(LOG_LEVEL)
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
json_formatter = JSONFormatter(datefmt=DATE_FORMAT)
pretty_json_formatter = PrettyJSONFormatter(datefmt=DATE_FORMAT)
normal_formatter = logging.Formatter(
fmt="{asctime} | {levelname:8s} | {message}",
datefmt=DATE_FORMAT,
style="{",
)
if CONFIG.get("LOG_TO_FILE"):
log_file_name = f"{CONFIG.SERVICE_NAME}.log"
log_path = os.path.join(CONFIG.LOGS_FOLDER_NAME, log_file_name)
file_handler = RotatingFileHandler(log_path, "a", 1_000_000, 100)
file_handler.setLevel(LOG_LEVEL)
file_handler.setFormatter(json_formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
if CONFIG.get("LOG_FORMAT") == "minimize":
stream_log_format = normal_formatter
elif CONFIG.get("LOG_FORMAT") == "json":
stream_log_format = json_formatter
else:
stream_log_format = pretty_json_formatter
stream_handler.setFormatter(stream_log_format)
logger.addHandler(stream_handler)
# add context within flask.request context
# def init_logging_filter():
# context_filter = ContextFilter()
# logger.addFilter(context_filter)
| 2,093 | 134 | 283 |
70b89eb4aa7c16b4b0f366374cd0ae72f88c24de | 524 | py | Python | Tutorials/Graph/ChangeBackgroundColor/ChangeBackgroundColour.py | BlueTurtle01/PydotTutorials | d1d1cebd41069c9f93a8490dc5dff0ab06ea3101 | [
"MIT"
] | null | null | null | Tutorials/Graph/ChangeBackgroundColor/ChangeBackgroundColour.py | BlueTurtle01/PydotTutorials | d1d1cebd41069c9f93a8490dc5dff0ab06ea3101 | [
"MIT"
] | null | null | null | Tutorials/Graph/ChangeBackgroundColor/ChangeBackgroundColour.py | BlueTurtle01/PydotTutorials | d1d1cebd41069c9f93a8490dc5dff0ab06ea3101 | [
"MIT"
] | null | null | null | import pydot
# Create the graph
# include the "bgcolor" argument with a string value.
graph = pydot.Dot("my_graph", graph_type="graph", bgcolor="yellow")
# I have added a node so we can better see that our graph creation has worked. This is naturally a trivial
# graph as it has no edges, but as a minimum working example it suffices.
# Add the node - replace "Node Name" with your desired Node Title in string form
graph.add_node(pydot.Node("Node Name"))
# Save the output
graph.write_png("ChangeBackgroundColour.png")
| 34.933333 | 106 | 0.757634 | import pydot
# Create the graph
# include the "bgcolor" argument with a string value.
graph = pydot.Dot("my_graph", graph_type="graph", bgcolor="yellow")
# I have added a node so we can better see that our graph creation has worked. This is naturally a trivial
# graph as it has no edges, but as a minimum working example it suffices.
# Add the node - replace "Node Name" with your desired Node Title in string form
graph.add_node(pydot.Node("Node Name"))
# Save the output
graph.write_png("ChangeBackgroundColour.png")
| 0 | 0 | 0 |
cae2f1c9fb4552a8433e08a5926dbec94066575a | 5,024 | py | Python | src/restfx/middleware/middlewares/session.py | mgbin088/restfx | 86a499a9a4396829e2c40428feb8b2ee13406d52 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-01-28T00:54:10.000Z | 2021-01-28T00:54:10.000Z | src/restfx/middleware/middlewares/session.py | mgbin088/restfx | 86a499a9a4396829e2c40428feb8b2ee13406d52 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | src/restfx/middleware/middlewares/session.py | mgbin088/restfx | 86a499a9a4396829e2c40428feb8b2ee13406d52 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | import string
import time
from ...app_context import AppContext
from ...middleware.interface import MiddlewareBase
from ...session.interfaces import ISessionProvider
from ...util import md5, b64
"""
session id 算法:
其中包含 useragent 和 remote_addr 用于标识一个远程客户端
其值组合成 remote_addr#useragent,计算得到 md5 (length=32)
在得到的 md5 后添加随机字符(length=32),得到一个 64 位长的串
使用 app id 分别对 md5 和随机串进行 xor 计算,得到的即是 密文 (session id)
"""
_salt = 'hyjiacan'
_salt_chars = [ord(ch) for ch in list(_salt)]
_padding_len = 24
_padding_chars = string.ascii_letters + string.digits
| 32.623377 | 89 | 0.591959 | import string
import time
from ...app_context import AppContext
from ...middleware.interface import MiddlewareBase
from ...session.interfaces import ISessionProvider
from ...util import md5, b64
"""
session id 算法:
其中包含 useragent 和 remote_addr 用于标识一个远程客户端
其值组合成 remote_addr#useragent,计算得到 md5 (length=32)
在得到的 md5 后添加随机字符(length=32),得到一个 64 位长的串
使用 app id 分别对 md5 和随机串进行 xor 计算,得到的即是 密文 (session id)
"""
_salt = 'hyjiacan'
_salt_chars = [ord(ch) for ch in list(_salt)]
_padding_len = 24
_padding_chars = string.ascii_letters + string.digits
def _get_padding_chars():
import random
return _salt_chars + [ord(ch) for ch in random.sample(_padding_chars, _padding_len)]
def _encrypt_session_id(key, app_id):
a = [ord(ch) for ch in key]
b = _get_padding_chars()
for i in range(32):
a[i] = a[i] ^ app_id[i]
b[i] = b[i] ^ app_id[i]
return b64.enc_str(bytes(a + b))
def _decrypt_session_id(session_id, app_id):
temp = list(b64.dec_bytes(session_id))
a = temp[0:32]
b = temp[32:]
key = []
padding = []
for i in range(32):
key.append(chr(a[i] ^ app_id[i]))
padding.append(chr(b[i] ^ app_id[i]))
if not ''.join(padding).startswith(_salt):
return None
return ''.join(key)
class SessionMiddleware(MiddlewareBase):
def __init__(self, session_provider: ISessionProvider,
session_name='sessionid',
cookie_max_age=None,
cookie_expires=None,
cookie_path="/",
cookie_domain=None,
cookie_secure=False,
cookie_samesite=None,
cookie_httponly=True
):
self.session_provider = session_provider
self.session_name = session_name
self.cookie_max_age = cookie_max_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_samesite = cookie_samesite
self.cookie_httponly = cookie_httponly
def process_request(self, request, meta, **kwargs):
context = AppContext.get(request.app_id)
if context is None or self.session_provider is None:
return
"""
:type: HttpSession
"""
key = md5.hash_str(b64.enc_bytes('%s#%s#%s' % (
request.remote_addr, str(request.user_agent), context.app_id
)))
app_id = [ord(ch) for ch in list(context.app_id)]
new_session_id = _encrypt_session_id(key, app_id)
# 只要 cookie 中没有 session_id 那么就新建一个 session
if self.session_name not in request.cookies:
request.session = self.session_provider.create(new_session_id)
return
# noinspection PyBroadException
try:
old_session_id = request.cookies[self.session_name]
cookie_key = _decrypt_session_id(old_session_id, app_id)
except Exception as e:
context.logger.warning('Cannot decode session id from cookie: %s' % repr(e))
request.session = self.session_provider.create(new_session_id)
return
# 校验 session_id 合法性
if key != cookie_key:
context.logger.warning('Invalid session key: expected "%s", got "%s"' % (
new_session_id, cookie_key))
request.session = self.session_provider.create(new_session_id)
return
request.session = self.session_provider.get(old_session_id)
# session 已经过期或session被清除
if request.session is None:
request.session = self.session_provider.create(new_session_id)
return
now = time.time()
# session 过期
if self.session_provider.is_expired(request.session):
request.session = self.session_provider.create(new_session_id)
return
request.session.last_access_time = now
def process_invoke(self, request, meta, **kwargs):
pass
def process_return(self, request, meta, data, **kwargs):
pass
def process_response(self, request, meta, response, **kwargs):
# 在响应结束时才写入,以减少 IO
if not request.session:
return
request.session.flush()
response.set_cookie(self.session_name,
request.session.id,
max_age=self.cookie_max_age,
expires=self.cookie_expires,
path=self.cookie_path,
domain=self.cookie_domain,
secure=self.cookie_secure,
httponly=self.cookie_httponly,
samesite=self.cookie_samesite,
)
def dispose(self):
self.session_provider.dispose()
| 4,234 | 19 | 272 |
b95f7a54169f5382aa3a3dee0c6801b497a47e32 | 479 | py | Python | assignements/simple_queue_publish.py | Antoine01100/BachelorDIM-Lectures-Algorithms-2020 | 78b02c75127666618ae3724e231be2c0b936a1b7 | [
"MIT"
] | null | null | null | assignements/simple_queue_publish.py | Antoine01100/BachelorDIM-Lectures-Algorithms-2020 | 78b02c75127666618ae3724e231be2c0b936a1b7 | [
"MIT"
] | null | null | null | assignements/simple_queue_publish.py | Antoine01100/BachelorDIM-Lectures-Algorithms-2020 | 78b02c75127666618ae3724e231be2c0b936a1b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 13:55:28 2020
@author: bouvaran
"""
import mykeys
import pika
AMQP_URL = mykeys.cloudamplink
connection = pika.BlockingConnection(pika.URLParameters(AMQP_URL))
channel = connection.channel()
channel.queue_declare(queue='presentation’')
channel.basic_publish(exchange='',
routing_key='presentation’',
body='Hello World!')
print("[Antoine_le_bg] salut la pleb")
connection.close()
| 19.16 | 66 | 0.680585 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 13:55:28 2020
@author: bouvaran
"""
import mykeys
import pika
AMQP_URL = mykeys.cloudamplink
connection = pika.BlockingConnection(pika.URLParameters(AMQP_URL))
channel = connection.channel()
channel.queue_declare(queue='presentation’')
channel.basic_publish(exchange='',
routing_key='presentation’',
body='Hello World!')
print("[Antoine_le_bg] salut la pleb")
connection.close()
| 0 | 0 | 0 |
b206e2339dcf609d1ed6ee6f2a50da09a2762c08 | 7,140 | py | Python | csv2sql/core/type_inference.py | ymoch/csv2sql | 22e20c1ccb7a5b21bacec6bd94b72d3c2e06bb4a | [
"MIT"
] | 7 | 2017-03-07T03:05:12.000Z | 2021-03-19T17:12:46.000Z | csv2sql/core/type_inference.py | ymoch/csv2sql | 22e20c1ccb7a5b21bacec6bd94b72d3c2e06bb4a | [
"MIT"
] | 15 | 2017-02-06T17:11:01.000Z | 2018-08-18T02:55:17.000Z | csv2sql/core/type_inference.py | ymoch/csv2sql | 22e20c1ccb7a5b21bacec6bd94b72d3c2e06bb4a | [
"MIT"
] | 5 | 2017-02-05T18:20:00.000Z | 2021-11-14T20:20:42.000Z | """Type pattern."""
import re
import decimal
import operator
import itertools
import functools
from csv2sql.core.error import InterpretationError, TypeInferenceError
_COMPATIBLE_PREDICATES = {
'int': functools.partial(_compatible, int),
'float': functools.partial(_compatible, float),
}
_DEFAULT_NULL_VALUE = ''
_PREDICATE_GENERATORS = {
'compatible': _create_compatible_predicate,
'less-than': functools.partial(_create_compare_predicate, operator.lt),
'less-than-or-equal-to': functools.partial(
_create_compare_predicate, operator.le),
'greater-than': functools.partial(_create_compare_predicate, operator.gt),
'greater-than-or-equal-to': functools.partial(
_create_compare_predicate, operator.ge),
'shorter-than': _create_shorter_than_predicate,
'match': _create_match_predicate,
'all-of': _create_all_of_predicate,
'any-of': _create_any_of_predicate,
'any': _create_any_predicate,
'not': _create_not_predicate,
}
def interpret_predicate(obj):
"""Interpret a predicate."""
try:
predicate_type = obj['type']
except:
raise InterpretationError('Predicate type must be specified.')
try:
predicate_generator = _PREDICATE_GENERATORS[predicate_type]
except:
raise InterpretationError(
'Predicate type`{0}` is invalid'.format(predicate_type))
args = obj.get('args', []) # `args` is an optional value.
if isinstance(args, (str, bytes)) or not hasattr(args, '__iter__'):
args = [args]
predicate = predicate_generator(args) # Can raise InterpretationError.
return predicate
def interpret_patterns(obj):
"""Interpret the type-pattern object."""
return [_interpret_one_type_pattern(item) for item in obj]
class TypeInferrer:
"""Infers the type while reading items."""
def __init__(self, patterns, null_value=_DEFAULT_NULL_VALUE):
"""Initialize."""
self._iterator = iter(patterns)
self._null_value = null_value
try:
self._current = next(self._iterator)
except StopIteration:
raise TypeInferenceError('Type pattern is empty.')
def read_item(self, item):
"""Read `item` and consume type patterns
while their predicates are not satisfied.
When the value is NULL, not consume any pattern.
"""
if item == self._null_value:
return
try:
while not self._current[1](item):
self._current = next(self._iterator)
except StopIteration:
raise TypeInferenceError(
'Matching pattern is not found for: {0}'.format(item))
@property
def type_name(self):
"""Return the current type pattern."""
return self._current[0]
def decide_types(patterns, reader, column_names, **kwargs):
"""Decide the types and returns the list of types.
Given `null_value`, it is treated as NULL and type inference skips it.
Given `index_types` as a list of (index, typename),
the types of the specified columns will not be calculated
and will be set the pre-defined type names.
"""
null_value = kwargs.get('null_value', _DEFAULT_NULL_VALUE)
index_types = kwargs.get('index_types', [])
typename_maps = dict(
(int(index), typename) for (index, typename) in index_types)
inferences = [
_Inference(index, patterns, null_value)
for index in range(len(column_names))
if index not in typename_maps.keys()]
for row, inference in itertools.product(reader, inferences):
inference.read_row(row)
typename_maps.update(
dict((item.index, item.type_name) for item in inferences)
)
type_names = [typename_maps[index] for index in range(len(column_names))]
return type_names
| 29.262295 | 78 | 0.658123 | """Type pattern."""
import re
import decimal
import operator
import itertools
import functools
from csv2sql.core.error import InterpretationError, TypeInferenceError
def _compatible(cast_type, value):
try:
cast_type(value)
except ValueError:
return False
return True
_COMPATIBLE_PREDICATES = {
'int': functools.partial(_compatible, int),
'float': functools.partial(_compatible, float),
}
_DEFAULT_NULL_VALUE = ''
def _create_compatible_predicate(args):
if len(args) != 1:
raise InterpretationError(
'Compatible predicate takes only 1 argument, '
'given {0}.'.format(len(args)))
cast_type_name = args[0]
try:
return _COMPATIBLE_PREDICATES[cast_type_name]
except:
raise InterpretationError(
'Compatible predicate takes one of ({0}), '
'given {1}'.format(
'|'.join(list(_COMPATIBLE_PREDICATES)),
cast_type_name
)
)
def _create_compare_predicate(operator_, args):
if len(args) != 1:
raise InterpretationError(
'Compare predicate takes only 1 argument, '
'given {0}.'.format(len(args)))
try:
comp_value = decimal.Decimal(args[0])
except:
raise InterpretationError(
'Compare predicate takes only a decimal argument, '
'given {0}.'.format(args[0]))
return lambda value: operator_(decimal.Decimal(value), comp_value)
def _create_shorter_than_predicate(args):
if len(args) != 1:
raise InterpretationError(
'Shorter-than predicate takes only 1 argument, '
'given {0}.'.format(len(args)))
try:
max_length = int(args[0])
except:
raise InterpretationError(
'Shorter-than predicate takes only an integer argument, '
'given {0}.'.format(args[0]))
return lambda value: len(value) < max_length
def _create_match_predicate(args):
if len(args) != 1:
raise InterpretationError(
'Match predicate takes only 1 argument, '
'given {0}.'.format(len(args)))
pattern = re.compile(args[0])
return lambda value: bool(pattern.search(value))
def _create_all_of_predicate(args):
predicates = [interpret_predicate(obj) for obj in args]
return lambda value: all(predicate(value) for predicate in predicates)
def _create_any_of_predicate(args):
predicates = [interpret_predicate(obj) for obj in args]
return lambda value: any(predicate(value) for predicate in predicates)
def _create_not_predicate(args):
positive_predicate = interpret_predicate(args[0])
return lambda value: not positive_predicate(value)
def _always_true(_):
return True
def _create_any_predicate(args):
if args:
raise InterpretationError('Match predicate takes no argument.')
return _always_true
_PREDICATE_GENERATORS = {
'compatible': _create_compatible_predicate,
'less-than': functools.partial(_create_compare_predicate, operator.lt),
'less-than-or-equal-to': functools.partial(
_create_compare_predicate, operator.le),
'greater-than': functools.partial(_create_compare_predicate, operator.gt),
'greater-than-or-equal-to': functools.partial(
_create_compare_predicate, operator.ge),
'shorter-than': _create_shorter_than_predicate,
'match': _create_match_predicate,
'all-of': _create_all_of_predicate,
'any-of': _create_any_of_predicate,
'any': _create_any_predicate,
'not': _create_not_predicate,
}
def interpret_predicate(obj):
"""Interpret a predicate."""
try:
predicate_type = obj['type']
except:
raise InterpretationError('Predicate type must be specified.')
try:
predicate_generator = _PREDICATE_GENERATORS[predicate_type]
except:
raise InterpretationError(
'Predicate type`{0}` is invalid'.format(predicate_type))
args = obj.get('args', []) # `args` is an optional value.
if isinstance(args, (str, bytes)) or not hasattr(args, '__iter__'):
args = [args]
predicate = predicate_generator(args) # Can raise InterpretationError.
return predicate
def _interpret_one_type_pattern(obj):
typename = obj['typename']
predicate = interpret_predicate(obj['predicate'])
return typename, predicate
def interpret_patterns(obj):
"""Interpret the type-pattern object."""
return [_interpret_one_type_pattern(item) for item in obj]
class TypeInferrer:
"""Infers the type while reading items."""
def __init__(self, patterns, null_value=_DEFAULT_NULL_VALUE):
"""Initialize."""
self._iterator = iter(patterns)
self._null_value = null_value
try:
self._current = next(self._iterator)
except StopIteration:
raise TypeInferenceError('Type pattern is empty.')
def read_item(self, item):
"""Read `item` and consume type patterns
while their predicates are not satisfied.
When the value is NULL, not consume any pattern.
"""
if item == self._null_value:
return
try:
while not self._current[1](item):
self._current = next(self._iterator)
except StopIteration:
raise TypeInferenceError(
'Matching pattern is not found for: {0}'.format(item))
@property
def type_name(self):
"""Return the current type pattern."""
return self._current[0]
class _Inference:
def __init__(self, index, patterns, null_value):
"""Initialize."""
self._index = int(index)
self._key = operator.itemgetter(self._index)
self._inferrer = TypeInferrer(patterns, null_value)
def read_row(self, row):
"""Read a row."""
item = self._key(row)
self._inferrer.read_item(item)
@property
def index(self):
"""Return the index."""
return self._index
@property
def type_name(self):
"""Return the type name."""
return self._inferrer.type_name
def decide_types(patterns, reader, column_names, **kwargs):
"""Decide the types and returns the list of types.
Given `null_value`, it is treated as NULL and type inference skips it.
Given `index_types` as a list of (index, typename),
the types of the specified columns will not be calculated
and will be set the pre-defined type names.
"""
null_value = kwargs.get('null_value', _DEFAULT_NULL_VALUE)
index_types = kwargs.get('index_types', [])
typename_maps = dict(
(int(index), typename) for (index, typename) in index_types)
inferences = [
_Inference(index, patterns, null_value)
for index in range(len(column_names))
if index not in typename_maps.keys()]
for row, inference in itertools.product(reader, inferences):
inference.read_row(row)
typename_maps.update(
dict((item.index, item.type_name) for item in inferences)
)
type_names = [typename_maps[index] for index in range(len(column_names))]
return type_names
| 2,467 | 557 | 276 |
6844e5481b83263b1031e6308182eb8b247ee480 | 712 | py | Python | scout/server/blueprints/diagnoses/views.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | scout/server/blueprints/diagnoses/views.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | scout/server/blueprints/diagnoses/views.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | from flask import Blueprint
from scout.server.extensions import store
from scout.server.utils import templated, public_endpoint
from . import controllers
omim_bp = Blueprint("diagnoses", __name__, template_folder="templates")
@omim_bp.route("/diagnoses/<omim_nr>", methods=["GET"])
@templated("diagnoses/omim_term.html")
def omim_diagnosis(omim_nr):
"""Display information specific to one OMIM diagnosis"""
data = controllers.omim_entry(store, omim_nr)
return data
@omim_bp.route("/diagnoses", methods=["GET"])
@templated("diagnoses/omim_terms.html")
def omim_diagnoses():
"""Display all OMIM diagnoses available in database"""
data = {"terms": store.disease_terms()}
return data
| 27.384615 | 71 | 0.745787 | from flask import Blueprint
from scout.server.extensions import store
from scout.server.utils import templated, public_endpoint
from . import controllers
omim_bp = Blueprint("diagnoses", __name__, template_folder="templates")
@omim_bp.route("/diagnoses/<omim_nr>", methods=["GET"])
@templated("diagnoses/omim_term.html")
def omim_diagnosis(omim_nr):
"""Display information specific to one OMIM diagnosis"""
data = controllers.omim_entry(store, omim_nr)
return data
@omim_bp.route("/diagnoses", methods=["GET"])
@templated("diagnoses/omim_terms.html")
def omim_diagnoses():
"""Display all OMIM diagnoses available in database"""
data = {"terms": store.disease_terms()}
return data
| 0 | 0 | 0 |
9bfb08978f09c1ff3df136b1dd1eaf9e25cc2385 | 11,470 | py | Python | sccc_contestbot/__init__.py | queragion2726/sccc_contestbot | 91d59196aee11b5859bb8504288ba317523b47fd | [
"Apache-2.0"
] | 2 | 2020-01-04T10:00:35.000Z | 2020-02-25T05:08:24.000Z | sccc_contestbot/__init__.py | queragion2726/sccc_contestbot | 91d59196aee11b5859bb8504288ba317523b47fd | [
"Apache-2.0"
] | 1 | 2020-07-22T18:25:13.000Z | 2020-07-22T18:25:38.000Z | sccc_contestbot/__init__.py | queragion2726/sccc_contestbot | 91d59196aee11b5859bb8504288ba317523b47fd | [
"Apache-2.0"
] | 1 | 2021-03-23T13:48:47.000Z | 2021-03-23T13:48:47.000Z | import asyncio
import logging
import threading
import time
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from typing import List
import slack
import sqlalchemy
from sqlalchemy.orm import sessionmaker, scoped_session
import settings
from .models import Base, Contest, Subscriber, ContestData
from .sub_manager import SubManager, AleadyExistsEception, NoSuchUserException
from .contest_manager import ContestManager, RenewalFlag
from .time_strategy import TimeStrategy
from sccc_contestbot.collectors import CollectManager
from sccc_contestbot.logger import init_logger
init_logger(__name__)
logger = logging.getLogger(__name__)
| 32.128852 | 88 | 0.564778 | import asyncio
import logging
import threading
import time
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from typing import List
import slack
import sqlalchemy
from sqlalchemy.orm import sessionmaker, scoped_session
import settings
from .models import Base, Contest, Subscriber, ContestData
from .sub_manager import SubManager, AleadyExistsEception, NoSuchUserException
from .contest_manager import ContestManager, RenewalFlag
from .time_strategy import TimeStrategy
from sccc_contestbot.collectors import CollectManager
from sccc_contestbot.logger import init_logger
init_logger(__name__)
logger = logging.getLogger(__name__)
class ContestBot:
def __init__(self, **kwargs):
"""
봇 초기화
Args:
BOT_SLACK_TOKEN : slack bot api token이 필요합니다.
BOT_DB_HOST :
BOT_DB_PORT :
BOT_DB_NAME :
BOT_DB_USERNAME :
BOT_DB_PASSWORD :
DB_ENGINE : sqlalchemy 엔진을 직접 생성한 경우,
인자로 넘겨 줄 수 있습니다. 주로 테스트를 위해
사용합니다.
"""
# TODO: 파라미터 설명 보충
logger.info("------콘테스트 봇 초기화------")
# Slack client 초기화
token = kwargs["BOT_SLACK_TOKEN"]
self.event_loop = asyncio.get_event_loop()
self.rtm_client = slack.RTMClient(
token=token, run_async=True, loop=self.event_loop
)
self.web_client = slack.WebClient(
token=token, run_async=True, loop=self.event_loop
)
slack.RTMClient.run_on(event="message")(self.message_listener)
logger.info("슬랙 클라이언트 초기화 완료")
# DB 엔진 초기화
if "DB_ENGINE" in kwargs:
self.engine = kwargs["DB_ENGINE"]
else:
self.engine = sqlalchemy.create_engine(
"postgresql://{}:{}@{}:{}/{}".format(
kwargs["BOT_DB_USERNAME"],
kwargs["BOT_DB_PASSWORD"],
kwargs["BOT_DB_HOST"],
kwargs["BOT_DB_PORT"],
kwargs["BOT_DB_NAME"],
)
)
if not self.test_db(self.engine):
Base.metadata.create_all(self.engine)
logger.info("테이블 생성을 완료했습니다.")
logger.info("DB 엔진 생성 완료")
# DB API 처리를 위한 Bot만의 ThreadPoolExecutor 생성
# 스레드 별 DB 세션 생성을 위한 initializer
# 각 스레드는 스레드만의 scoped_session을 가지게 된다.
self.thread_local_data = threading.local()
def thread_session_maker(thread_local_data, engine):
thread_local_data.Session = scoped_session(sessionmaker(bind=engine))
self.thread_pool_executor = ThreadPoolExecutor(
initializer=thread_session_maker,
initargs=(self.thread_local_data, self.engine),
)
# 구독자 관리자 생성
self.sub_manager = SubManager(self.event_loop, self.thread_local_data)
# 콘테스트 관리자 생성
self.contest_manager = ContestManager(
self.event_loop, self.thread_local_data, self.renewal_call_back
)
# 컬렉터 관리자 생성
self.collect_manager = CollectManager(
self.event_loop, self.contest_update_call_back
)
from .collectors.boj_collector import BOJCollector
from .collectors.cf_collector import CFCollector
self.collect_manager.register(BOJCollector)
self.collect_manager.register(CFCollector)
def test_db(self, engine, connect_try_count=5) -> bool:
"""
DB 커넥션을 테스트해보고,
만약 테이블이 작성되지 않았을 경우 False를 반환합니다.
"""
# DB 커넥션 대기
logger.info("DB 커넥트 대기 중")
try_count = 0
while True:
try:
self.engine.execute("SELECT 1")
break
except sqlalchemy.exc.DatabaseError:
logger.info("연결 실패...")
time.sleep(min(10, 0.001 * (2 ** (try_count + 7))))
try_count += 1
if try_count == connect_try_count:
raise RuntimeError("DB 연결 실패")
logger.info("DB 커넥션 성공")
logger.info("contests 테이블이 존재하는지 확인합니다.")
try:
self.engine.execute("SELECT * FROM contests")
logger.info("정상적으로 존재합니다.")
except sqlalchemy.exc.ProgrammingError:
logger.info("존재하지 않습니다. 테이블을 생성합니다.")
return False
return True
def run(self):
"""
봇을 실행합니다.
"""
loop = self.event_loop
try:
with self.thread_pool_executor as pool:
# 루프의 기본 실행자를 pool로 설정합니다
loop.set_default_executor(pool)
# 크롤링 시작 예약
self.collect_manager.run()
self.rtm_client.start()
loop.run_forever()
finally:
loop.close()
def renewal_call_back(self, contest: ContestData, flag: RenewalFlag):
"""
기존에 저장된 콘테스트와 ContestManager가 받은 컨테스트가 다를 경우,
이 콜백이 호출됩니다.
받은 flag 종류에 따라 다른메시지를 전송하고, 새로바뀐 데이터로 알림을 다시 설정합니다.
"""
format_dict = {
"name": contest.contest_name,
"datetime": str(contest.start_date.astimezone(settings.LOCAL_TIMEZONE)),
"URL": contest.URL,
}
if flag == RenewalFlag.CREATED:
txt = settings.NEW_NOTICE_TXT % format_dict
msg = settings.NEW_NOTICE_MESSAGE % format_dict
elif flag == RenewalFlag.CHANGED:
txt = settings.MODIFIED_NOTICE_TXT % format_dict
msg = settings.MODIFIED_NOTICE_MESSAGE % format_dict
# 알림 추가
for time_strategy in settings.NOTI_STRATEGIES:
time_strategy = time_strategy.value
delay = (
contest.start_date
- datetime.now(tz=settings.LOCAL_TIMEZONE)
- time_strategy.delta
).total_seconds()
if delay > 0:
self.event_loop.call_later(
delay, self.noti_call_back, contest, time_strategy
)
self.web_client.chat_postMessage(
channel=settings.POST_CHANNEL, text=txt, blocks=msg,
)
def noti_call_back(self, contest: ContestData, time_strategy: TimeStrategy):
"""
알림 콜백입니다. 대회가 얼마나 남았는지 포스트합니다.
또한 이제 제거해야할 대회라면, 삭제를 요청합니다.
"""
async def _impl_noti():
if not (await self.contest_manager.is_latest(contest)):
# 알림이 설정되었고 그 사이에 변경이 발생했다면
# 이 알림은 무시됩니다.
return
if time_strategy == settings.NOTI_STRATEGIES.END:
# 대회가 시작한 시점에 실행됩니다.
# 대회를 삭제합니다.
await self.contest_manager.delete_contest(contest)
return
format_dict = {
"name": contest.contest_name,
"datetime": str(contest.start_date.astimezone(settings.LOCAL_TIMEZONE)),
"URL": contest.URL,
"remain": time_strategy.displayText,
}
await self.web_client.chat_postMessage(
channel=settings.POST_CHANNEL,
text=settings.NOTI_NOTICE_TXT,
blocks=settings.NOTI_NOTICE_MESSAGE % format_dict,
)
self.event_loop.create_task(_impl_noti())
def contest_update_call_back(self, contests: List[ContestData]):
"""
collector가 크롤링에 성공하였다면 이 콜백이 실행됩니다.
"""
for contest in contests:
self.event_loop.create_task(self.contest_manager.renewal_contest(contest))
async def message_listener(self, **payload):
"""
슬랙 채널에서 받아온 메시지들을 분류해 처리합니다.
"""
data = payload["data"]
if "user" in data:
# 유저의 메시지
if settings.SUBSCRIBE_KEYWORD == data["text"]:
# 구독자 등록
await self.add_subscriber(**payload)
elif settings.UNSUBSCRIBE_KEYWORD == data["text"]:
# 구독자 제거
await self.delete_subscriber(**payload)
elif settings.HELP_KEYWORD == data["text"]:
# 도움말 메시지
await self.post_help_message(**payload)
elif "!TEST" == data["text"]:
# 테스트용
await self.post_test_message(**payload)
if (
"subtype" in data
and data["subtype"] == "bot_message"
and "thread_ts" not in data
and "blocks" in data
):
# 구독자에게 알림을 보내줘야하는 메시지를 판단합니다.
if data["text"] == settings.HELP_DISPLAY_TXT:
return
await self.post_subscriber(**payload)
async def add_subscriber(self, **payload):
"""
구독자를 추가하고 결과를 포스트합니다.
"""
logger.info("add_subscriber 호출")
data = payload["data"]
web_client = payload["web_client"]
channel_id = data["channel"]
try:
await self.sub_manager.add_subscriber(data["user"])
await web_client.chat_postMessage(
channel=channel_id, text=settings.APPEND_SUCCESS
)
except AleadyExistsEception:
await web_client.chat_postMessage(
channel=channel_id, text=settings.ALREADY_EXISTS,
)
async def delete_subscriber(self, **payload):
"""
구독자를 삭제하고 결과를 포스트합니다.
"""
logger.info("delete_subscriber 호출")
data = payload["data"]
web_client = payload["web_client"]
channel_id = data["channel"]
try:
await self.sub_manager.delete_subscriber(data["user"])
await web_client.chat_postMessage(
channel=channel_id, text=settings.DELETE_SUCCESS
)
except NoSuchUserException:
await web_client.chat_postMessage(
channel=channel_id, text=settings.NO_SUCH_USER
)
async def post_help_message(self, **payload):
"""
도움말 메시지를 출력합니다.
"""
logger.info("post_help_message 호출")
data = payload["data"]
web_client = payload["web_client"]
channel_id = data["channel"]
await web_client.chat_postMessage(
channel=channel_id,
text=settings.HELP_DISPLAY_TXT,
blocks=[
{
"type": "section",
"text": {"type": "mrkdwn", "text": settings.HELP_MESSAGE},
}
],
)
async def post_test_message(self, **payload):
logger.info("post_test_message 호출")
await payload["web_client"].chat_postMessage(
channel=settings.POST_CHANNEL,
text="test!",
blocks=[{"type": "section", "text": {"type": "mrkdwn", "text": "Test"}}],
)
async def post_subscriber(self, **payload):
data = payload["data"]
web_client = payload["web_client"]
thread_ts = data["ts"]
subscribers = await self.sub_manager.get_subscriber()
sub_text = " ".join(f"<@{user}>" for user in subscribers)
if sub_text == "":
return
# 알림용 텍스트는, 스레드의 원텍스트를 가져옵니다.
display_noti_text = data["blocks"][0]["text"]["text"]
await web_client.chat_postMessage(
channel=settings.POST_CHANNEL,
text=display_noti_text,
blocks=[
{"type": "context", "elements": [{"type": "mrkdwn", "text": sub_text}]}
],
thread_ts=thread_ts,
)
| 2,123 | 9,889 | 23 |
985b5ef5d5021bc83c0b152f4da2f8ee2e62e77c | 2,000 | py | Python | src/stateful_examples/river_crossing.py | FRYoussef/property-based-testing-poker | b5890d8d6934b9f7b17b0173c492a8ae68410e51 | [
"MIT"
] | null | null | null | src/stateful_examples/river_crossing.py | FRYoussef/property-based-testing-poker | b5890d8d6934b9f7b17b0173c492a8ae68410e51 | [
"MIT"
] | null | null | null | src/stateful_examples/river_crossing.py | FRYoussef/property-based-testing-poker | b5890d8d6934b9f7b17b0173c492a8ae68410e51 | [
"MIT"
] | null | null | null | import unittest
from enum import Enum
from hypothesis import settings, note
from hypothesis.stateful import RuleBasedStateMachine, rule, invariant, precondition
if __name__ == "__main__":
RiverCrossing.TestCase.settings = settings(max_examples=100, stateful_step_count=50)
RiverCrossingTest = RiverCrossing.TestCase
unittest.main() | 32.258065 | 105 | 0.654 | import unittest
from enum import Enum
from hypothesis import settings, note
from hypothesis.stateful import RuleBasedStateMachine, rule, invariant, precondition
class Side(Enum):
Left = 0
Right = 1
def __str__(self) -> str:
return 'L' if self.value == Side.Left.value else 'R'
def switch(self):
return Side.Left if self.value == Side.Right.value else self.Right
class RiverCrossing(RuleBasedStateMachine):
def __init__(self) -> None:
super(RiverCrossing, self).__init__()
self.shepherd = Side.Left
self.wolf = Side.Left
self.goat = Side.Left
self.cabbage = Side.Left
def is_disaster(self):
return (self.shepherd != self.goat) and ((self.wolf == self.goat) or (self.cabbage == self.goat))
def is_final_state(self):
r = Side.Right
return self.shepherd == r and self.wolf == r and self.goat == r and self.cabbage == r
@rule()
def cross_shepherd(self) -> None:
self.shepherd = self.shepherd.switch()
@precondition(lambda self: self.shepherd == self.wolf)
@rule()
def cross_shepherd_wolf(self) -> None:
self.shepherd = self.shepherd.switch()
self.wolf = self.wolf.switch()
@precondition(lambda self: self.shepherd == self.goat)
@rule()
def cross_shepherd_goat(self) -> None:
self.shepherd = self.shepherd.switch()
self.goat = self.goat.switch()
@precondition(lambda self: self.shepherd == self.cabbage)
@rule()
def cross_shepherd_cabbage(self) -> None:
self.shepherd = self.shepherd.switch()
self.cabbage = self.cabbage.switch()
@invariant()
def solve(self) -> None:
note(f"S({self.shepherd}) W({self.wolf}) G({self.goat}) C({self.cabbage})")
assert not self.is_final_state()
if __name__ == "__main__":
RiverCrossing.TestCase.settings = settings(max_examples=100, stateful_step_count=50)
RiverCrossingTest = RiverCrossing.TestCase
unittest.main() | 1,048 | 561 | 46 |
0e32a0b20377d21cfdbd17fa5c41d4ac67f6c84c | 634 | py | Python | apps/users/urls.py | vuonghv/brs | 9cdf9431ac69fd7a33d8bf4240a7d49a49ae4a80 | [
"MIT"
] | 1 | 2021-01-13T23:42:54.000Z | 2021-01-13T23:42:54.000Z | apps/users/urls.py | vuonghv/brs | 9cdf9431ac69fd7a33d8bf4240a7d49a49ae4a80 | [
"MIT"
] | 1 | 2015-10-09T06:19:29.000Z | 2015-10-09T06:19:29.000Z | apps/users/urls.py | vuonghv/brs | 9cdf9431ac69fd7a33d8bf4240a7d49a49ae4a80 | [
"MIT"
] | 8 | 2015-10-09T02:00:34.000Z | 2016-07-08T15:00:37.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^signup/$', views.SignupUserView.as_view(), name='signup'),
url(r'^login/$', views.LoginUserView.as_view(), name='login'),
url(r'^logout/$', views.logout_user, name='logout'),
url(r'^follow/(?P<pk>[0-9]+)/$', views.follow_user, name='follow'),
url(r'^unfollow/(?P<pk>[0-9]+)/$', views.unfollow_user, name='unfollow'),
url(r'^(?P<pk>[0-9]+)/followers/$', views.ListFollowersView.as_view(), name='followers'),
url(r'^(?P<pk>[0-9]+)/following/$', views.ListFollowingView.as_view(), name='following'),
]
| 42.266667 | 97 | 0.608833 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^signup/$', views.SignupUserView.as_view(), name='signup'),
url(r'^login/$', views.LoginUserView.as_view(), name='login'),
url(r'^logout/$', views.logout_user, name='logout'),
url(r'^follow/(?P<pk>[0-9]+)/$', views.follow_user, name='follow'),
url(r'^unfollow/(?P<pk>[0-9]+)/$', views.unfollow_user, name='unfollow'),
url(r'^(?P<pk>[0-9]+)/followers/$', views.ListFollowersView.as_view(), name='followers'),
url(r'^(?P<pk>[0-9]+)/following/$', views.ListFollowingView.as_view(), name='following'),
]
| 0 | 0 | 0 |
e033764b8f85a9224d591af351a901f2c66c2958 | 5,244 | py | Python | ccbb_pyutils/parallel_process_fastqs.py | ucsd-ccbb/ccbb-ucsd-pyutils | 944ff2230c66ab70016bcac393e05a7e12f1cfd5 | [
"MIT"
] | null | null | null | ccbb_pyutils/parallel_process_fastqs.py | ucsd-ccbb/ccbb-ucsd-pyutils | 944ff2230c66ab70016bcac393e05a7e12f1cfd5 | [
"MIT"
] | null | null | null | ccbb_pyutils/parallel_process_fastqs.py | ucsd-ccbb/ccbb-ucsd-pyutils | 944ff2230c66ab70016bcac393e05a7e12f1cfd5 | [
"MIT"
] | null | null | null | # standard libraries
import datetime
import logging
import multiprocessing
import timeit
import traceback
from ccbb_pyutils.bio_seq_utilities import pair_hiseq_read_files
from ccbb_pyutils.files_and_paths import get_basename_fps_tuples, get_file_name_pieces, \
get_filepaths_from_wildcard
__author__ = 'Amanda Birmingham'
__maintainer__ = "Amanda Birmingham"
__email__ = "abirmingham@ucsd.edu"
__status__ = "prototype"
| 38.558824 | 115 | 0.703852 | # standard libraries
import datetime
import logging
import multiprocessing
import timeit
import traceback
from ccbb_pyutils.bio_seq_utilities import pair_hiseq_read_files
from ccbb_pyutils.files_and_paths import get_basename_fps_tuples, get_file_name_pieces, \
get_filepaths_from_wildcard
__author__ = 'Amanda Birmingham'
__maintainer__ = "Amanda Birmingham"
__email__ = "abirmingham@ucsd.edu"
__status__ = "prototype"
def get_elapsed_time_to_now(start_time, process_name=None):
end_time = timeit.default_timer()
elapsed_seconds = end_time - start_time
m, s = divmod(elapsed_seconds, 60)
h, m = divmod(m, 60)
result = "elapsed time: %d:%02d:%02d" % (h, m, s)
if process_name is not None:
result = "{0} ".format(process_name) + result
return result
def time_function(process_name, func_name, pass_process_name_to_func, *func_args):
logging.info("Starting {0} at {1}".format(process_name, datetime.datetime.now()))
start_time = timeit.default_timer()
if pass_process_name_to_func:
try:
func_result = func_name(process_name, *func_args)
except Exception as e:
logging.info(traceback.format_exc())
raise e
else:
try:
func_result = func_name(*func_args)
except Exception as e:
logging.info(traceback.format_exc())
raise e
logging.info(get_elapsed_time_to_now(start_time, process_name))
return process_name, func_result
def parallel_process_files(file_dir, file_suffix, num_processes, func_for_one_file, func_fixed_inputs_list,
pass_process_name_to_func=False):
logging.info("Starting parallel processing at {0}".format(datetime.datetime.now()))
start_time = timeit.default_timer()
results = []
relevant_filepaths = get_filepaths_from_wildcard(file_dir, file_suffix)
process_arguments = []
for curr_fp in relevant_filepaths:
fp_list = [curr_fp]
_, curr_base, _ = get_file_name_pieces(curr_fp)
curr_args_list = [curr_base, func_for_one_file, pass_process_name_to_func]
curr_args_list.extend(func_fixed_inputs_list)
curr_args_list.extend(fp_list)
process_arguments.append(tuple(curr_args_list))
with multiprocessing.Pool(processes=num_processes) as pool:
results = pool.starmap(time_function, process_arguments)
logging.info(get_elapsed_time_to_now(start_time, "parallel processing"))
return results
def serial_process_files(file_dir, file_suffix, func_for_one_file, func_fixed_inputs_list,
pass_process_name_to_func=False,
prefix_asterisk=True, subdirs_are_basename=False):
logging.info("Starting serial processing at {0}".format(datetime.datetime.now()))
start_time = timeit.default_timer()
results = []
relevant_basename_fp_tuples_list = get_basename_fps_tuples(file_dir, file_suffix,
prefix_asterisk=prefix_asterisk,
subdirs_are_basename=subdirs_are_basename)
# process_arguments = []
for curr_basename_fp_tuple in relevant_basename_fp_tuples_list:
curr_base = curr_basename_fp_tuple[0]
curr_fp = curr_basename_fp_tuple[1]
fp_list = [curr_fp]
curr_args_list = [curr_base, func_for_one_file, pass_process_name_to_func]
curr_args_list.extend(func_fixed_inputs_list)
curr_args_list.extend(fp_list)
# process_arguments.append(tuple(curr_args_list))
curr_result = time_function(*curr_args_list)
results.append(curr_result)
logging.info(get_elapsed_time_to_now(start_time, "serial processing"))
return results
def parallel_process_paired_reads(fastq_dir, file_suffix, num_processes, func_for_one_pair, func_fixed_inputs_list,
pass_process_name_to_func=False):
logging.info("Starting parallel processing at {0}".format(datetime.datetime.now()))
start_time = timeit.default_timer()
results = []
fastq_filepaths = get_filepaths_from_wildcard(fastq_dir, file_suffix)
paired_fastqs_by_base, failure_msgs = pair_hiseq_read_files(fastq_filepaths)
if failure_msgs is not None:
logging.info(failure_msgs)
else:
process_arguments = []
sorted_bases = sorted(paired_fastqs_by_base.keys())
for curr_base in sorted_bases:
fp_list = paired_fastqs_by_base[curr_base]
curr_args_list = [curr_base, func_for_one_pair, pass_process_name_to_func]
curr_args_list.extend(func_fixed_inputs_list)
curr_args_list.extend(fp_list)
process_arguments.append(tuple(curr_args_list))
with multiprocessing.Pool(processes=num_processes) as pool:
results = pool.starmap(time_function, process_arguments)
logging.info(get_elapsed_time_to_now(start_time, "parallel processing"))
return results
def concatenate_parallel_results(results_tuples):
results_lines = ["{0}: {1}\n".format(x[0], x[1] if x[1] is not None else "finished")
for x in results_tuples]
return "".join(results_lines)
| 4,674 | 0 | 138 |
bf904e02c66e86111f4b19123d219550390b8449 | 9,963 | py | Python | tests/test_student.py | chrispyles/pybryt | 23f8bfce3179638ec5b4efe3555d3bb4e7321dc0 | [
"MIT"
] | null | null | null | tests/test_student.py | chrispyles/pybryt | 23f8bfce3179638ec5b4efe3555d3bb4e7321dc0 | [
"MIT"
] | null | null | null | tests/test_student.py | chrispyles/pybryt | 23f8bfce3179638ec5b4efe3555d3bb4e7321dc0 | [
"MIT"
] | null | null | null | """"""
import os
import nbformat
import pkg_resources
import pytest
import tempfile
from copy import deepcopy
from functools import lru_cache
from textwrap import dedent
from unittest import mock
from pybryt import (
check, generate_student_impls, ReferenceImplementation, ReferenceResult, StudentImplementation)
from pybryt.execution.memory_footprint import MemoryFootprint
from .test_reference import generate_reference_notebook
__PYBRYT_TRACING__ = False
def generate_student_notebook():
"""
"""
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
import pybryt
""")))
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
def median(S):
sorted_S = sorted(S)
size_of_set = len(S)
middle = size_of_set // 2
is_set_size_even = (size_of_set % 2) == 0
if is_set_size_even:
return (sorted_S[middle-1] + sorted_S[middle]) / 2
else:
return sorted_S[middle]
""")))
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
import numpy as np
np.random.seed(42)
for _ in range(10):
vals = [np.random.randint(-1000, 1000) for _ in range(np.random.randint(1, 1000))]
val = median(vals)
""")))
return nb
@lru_cache(1)
def test_constructor():
"""
"""
nb, stu = generate_impl()
assert stu.nb is nb
assert isinstance(stu.footprint, MemoryFootprint)
assert len(stu.footprint.values) == 993
with mock.patch("pybryt.student.execute_notebook") as mocked_exec:
mocked_exec.return_value = MemoryFootprint()
mocked_exec.return_value.set_executed_notebook(nb)
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as ntf:
nbformat.write(nb, ntf.name)
stu = StudentImplementation(ntf.name)
assert stu.footprint.num_steps == -1
assert stu.footprint.values == []
assert stu.footprint.calls == []
assert stu.nb == nb
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as output_ntf:
stu = StudentImplementation(ntf.name, output=output_ntf.name)
assert nbformat.read(output_ntf.name, as_version=nbformat.NO_CONVERT) == nb
with pytest.raises(TypeError, match="path_or_nb is of unsupported type <class 'int'>"):
StudentImplementation(1)
def test_load_and_dump():
"""
"""
_, stu = generate_impl()
with tempfile.NamedTemporaryFile() as ntf:
stu.dump(ntf.name)
stu2 = StudentImplementation.load(ntf.name)
assert len(stu.footprint.values) == len(stu2.footprint.values)
assert stu.footprint.num_steps == stu2.footprint.num_steps
enc_stu = stu.dumps()
stu2 = StudentImplementation.loads(enc_stu)
assert len(stu.footprint.values) == len(stu2.footprint.values)
assert stu.footprint.num_steps == stu2.footprint.num_steps
def test_check():
"""
"""
ref = ReferenceImplementation.compile(generate_reference_notebook(), name="foo")
nb, stu = generate_impl()
res = stu.check(ref)
assert isinstance(res, ReferenceResult)
res = stu.check([ref])
assert isinstance(res, list) and len(res) == 1 and isinstance(res[0], ReferenceResult)
with pytest.raises(TypeError, match="check cannot take values of type <class 'int'>"):
stu.check(1)
def test_check_cm(capsys):
"""
"""
ref = ReferenceImplementation.compile(generate_reference_notebook(), name="foo")
_, stu = generate_impl()
with mock.patch.object(check, "_cache_check") as mocked_cache:
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
mocked_frame_tracer.return_value.get_footprint.return_value = stu.footprint
check_cm = check(ref, cache=False)
with check_cm:
pass
mocked_cache.assert_not_called()
mocked_frame_tracer.return_value.start_trace.assert_called()
mocked_frame_tracer.return_value.end_trace.assert_called()
captured = capsys.readouterr()
expected = dedent("""\
REFERENCE: foo
SATISFIED: True
MESSAGES:
- SUCCESS: Sorted the sample correctly
- SUCCESS: Computed the size of the sample
- SUCCESS: computed the correct median
""")
assert captured.out == expected
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
mocked_frame_tracer.return_value.get_footprint.return_value = stu.footprint
ref_filename = pkg_resources.resource_filename(__name__, os.path.join("files", "expected_ref.pkl"))
check_cm = check(ref_filename)
with check_cm:
pass
mocked_cache.assert_called()
check_cm2 = check([ref_filename])
assert check_cm._ref == check_cm2._ref
captured = capsys.readouterr()
expected = dedent("""\
REFERENCE: foo
SATISFIED: True
MESSAGES:
- SUCCESS: Sorted the sample correctly
- SUCCESS: Computed the size of the sample
- SUCCESS: computed the correct median
""")
assert captured.out == expected
# test errors
with pytest.raises(ValueError, match="Cannot check against an empty list of references"):
check([])
with pytest.raises(TypeError, match="Invalid values in the reference list"):
check([ref, "path", 1])
# check by annotation group
with mock.patch.object(StudentImplementation, "from_footprint") as mocked_ff, \
mock.patch("pybryt.student.FrameTracer"), \
mock.patch("pybryt.student.generate_report"):
ref = ReferenceImplementation("groups", [])
for run_group in ["1", "2", None]:
with check(ref, group=run_group):
pass
mocked_ff.return_value.check.assert_called_with([ref], group=run_group)
# check caching
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
with mock.patch("pybryt.student.StudentImplementation") as mocked_stu, \
mock.patch("pybryt.student.generate_report") as mocked_generate, \
mock.patch("pybryt.student.os.makedirs") as mocked_makedirs:
mocked_stu.from_footprint.return_value.check.return_value = [mock.MagicMock()]
mocked_stu.from_footprint.return_value.check.return_value[0].name = "foo"
check_cm = check(ref)
with check_cm:
check_cm._footprint = stu.footprint
mocked_makedirs.assert_called_with(".pybryt_cache", exist_ok=True)
mocked_stu.from_footprint.return_value.dump.assert_called()
mocked_stu.from_footprint.return_value.check.return_value[0].dump.assert_called_with(".pybryt_cache/foo_results.pkl")
def test_from_cache():
"""
"""
with mock.patch("pybryt.student.glob") as mocked_glob, \
mock.patch.object(StudentImplementation, "load") as mocked_load, \
mock.patch.object(StudentImplementation, "combine") as mocked_combine:
mocked_glob.return_value = [".pybryt_cache/student_impl_foo.pkl", ".pybryt_cache/student_impl_bar.pkl"]
StudentImplementation.from_cache(combine=False)
mocked_load.assert_has_calls([mock.call(fp) for fp in mocked_glob.return_value])
mocked_combine.assert_not_called()
StudentImplementation.from_cache()
mocked_combine.assert_called()
def test_combine():
"""
"""
_, stu = generate_impl()
stu2 = deepcopy(stu)
stu2.footprint.add_value([1, 2, 3, 4], stu2.footprint.num_steps + 1)
comb = StudentImplementation.combine([stu, stu2])
assert len(comb.footprint.values) == len(stu.footprint.values) + 1
assert comb.footprint.num_steps == stu.footprint.num_steps + stu2.footprint.num_steps
assert comb.footprint.get_timestamp(-1) == stu.footprint.num_steps + stu2.footprint.num_steps
def test_generate_student_impls():
"""
"""
num_notebooks = 6
nb, stu = generate_impl()
nbs = [nb] * num_notebooks
with mock.patch("pybryt.student.execute_notebook") as mocked_execute:
mocked_execute.return_value = deepcopy(stu.footprint)
stus = generate_student_impls(nbs)
assert all(s == stu for s in stus)
with mock.patch("pybryt.student.Process") as mocked_process:
with mock.patch("pybryt.student.Queue") as mocked_queue:
mocked_queue.return_value = mock.MagicMock(wraps=MockedQueue())
stus = generate_student_impls(nbs, parallel=True)
assert all(s == stu for s in stus)
| 34.714286 | 129 | 0.643882 | """"""
import os
import nbformat
import pkg_resources
import pytest
import tempfile
from copy import deepcopy
from functools import lru_cache
from textwrap import dedent
from unittest import mock
from pybryt import (
check, generate_student_impls, ReferenceImplementation, ReferenceResult, StudentImplementation)
from pybryt.execution.memory_footprint import MemoryFootprint
from .test_reference import generate_reference_notebook
__PYBRYT_TRACING__ = False
def generate_student_notebook():
"""
"""
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
import pybryt
""")))
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
def median(S):
sorted_S = sorted(S)
size_of_set = len(S)
middle = size_of_set // 2
is_set_size_even = (size_of_set % 2) == 0
if is_set_size_even:
return (sorted_S[middle-1] + sorted_S[middle]) / 2
else:
return sorted_S[middle]
""")))
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
import numpy as np
np.random.seed(42)
for _ in range(10):
vals = [np.random.randint(-1000, 1000) for _ in range(np.random.randint(1, 1000))]
val = median(vals)
""")))
return nb
@lru_cache(1)
def _generate_impl_cached():
nb = generate_student_notebook()
return nb, StudentImplementation(nb)
def generate_impl():
return deepcopy(_generate_impl_cached())
def test_constructor():
"""
"""
nb, stu = generate_impl()
assert stu.nb is nb
assert isinstance(stu.footprint, MemoryFootprint)
assert len(stu.footprint.values) == 993
with mock.patch("pybryt.student.execute_notebook") as mocked_exec:
mocked_exec.return_value = MemoryFootprint()
mocked_exec.return_value.set_executed_notebook(nb)
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as ntf:
nbformat.write(nb, ntf.name)
stu = StudentImplementation(ntf.name)
assert stu.footprint.num_steps == -1
assert stu.footprint.values == []
assert stu.footprint.calls == []
assert stu.nb == nb
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as output_ntf:
stu = StudentImplementation(ntf.name, output=output_ntf.name)
assert nbformat.read(output_ntf.name, as_version=nbformat.NO_CONVERT) == nb
with pytest.raises(TypeError, match="path_or_nb is of unsupported type <class 'int'>"):
StudentImplementation(1)
def test_load_and_dump():
"""
"""
_, stu = generate_impl()
with tempfile.NamedTemporaryFile() as ntf:
stu.dump(ntf.name)
stu2 = StudentImplementation.load(ntf.name)
assert len(stu.footprint.values) == len(stu2.footprint.values)
assert stu.footprint.num_steps == stu2.footprint.num_steps
enc_stu = stu.dumps()
stu2 = StudentImplementation.loads(enc_stu)
assert len(stu.footprint.values) == len(stu2.footprint.values)
assert stu.footprint.num_steps == stu2.footprint.num_steps
def test_check():
"""
"""
ref = ReferenceImplementation.compile(generate_reference_notebook(), name="foo")
nb, stu = generate_impl()
res = stu.check(ref)
assert isinstance(res, ReferenceResult)
res = stu.check([ref])
assert isinstance(res, list) and len(res) == 1 and isinstance(res[0], ReferenceResult)
with pytest.raises(TypeError, match="check cannot take values of type <class 'int'>"):
stu.check(1)
def test_errors():
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell("raise Exception()"))
with pytest.warns(UserWarning, match="Executing student notebook produced errors in the notebook"):
stu = StudentImplementation(nb)
assert len(stu.errors) == 1
assert stu.errors[0]["ename"] == "Exception"
assert stu.errors[0]["evalue"] == ""
assert stu.errors[0]["output_type"] == "error"
assert isinstance(stu.errors[0]["traceback"], list)
assert len(stu.errors[0]) == 4
def test_check_cm(capsys):
"""
"""
ref = ReferenceImplementation.compile(generate_reference_notebook(), name="foo")
_, stu = generate_impl()
with mock.patch.object(check, "_cache_check") as mocked_cache:
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
mocked_frame_tracer.return_value.get_footprint.return_value = stu.footprint
check_cm = check(ref, cache=False)
with check_cm:
pass
mocked_cache.assert_not_called()
mocked_frame_tracer.return_value.start_trace.assert_called()
mocked_frame_tracer.return_value.end_trace.assert_called()
captured = capsys.readouterr()
expected = dedent("""\
REFERENCE: foo
SATISFIED: True
MESSAGES:
- SUCCESS: Sorted the sample correctly
- SUCCESS: Computed the size of the sample
- SUCCESS: computed the correct median
""")
assert captured.out == expected
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
mocked_frame_tracer.return_value.get_footprint.return_value = stu.footprint
ref_filename = pkg_resources.resource_filename(__name__, os.path.join("files", "expected_ref.pkl"))
check_cm = check(ref_filename)
with check_cm:
pass
mocked_cache.assert_called()
check_cm2 = check([ref_filename])
assert check_cm._ref == check_cm2._ref
captured = capsys.readouterr()
expected = dedent("""\
REFERENCE: foo
SATISFIED: True
MESSAGES:
- SUCCESS: Sorted the sample correctly
- SUCCESS: Computed the size of the sample
- SUCCESS: computed the correct median
""")
assert captured.out == expected
# test errors
with pytest.raises(ValueError, match="Cannot check against an empty list of references"):
check([])
with pytest.raises(TypeError, match="Invalid values in the reference list"):
check([ref, "path", 1])
# check by annotation group
with mock.patch.object(StudentImplementation, "from_footprint") as mocked_ff, \
mock.patch("pybryt.student.FrameTracer"), \
mock.patch("pybryt.student.generate_report"):
ref = ReferenceImplementation("groups", [])
for run_group in ["1", "2", None]:
with check(ref, group=run_group):
pass
mocked_ff.return_value.check.assert_called_with([ref], group=run_group)
# check caching
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
with mock.patch("pybryt.student.StudentImplementation") as mocked_stu, \
mock.patch("pybryt.student.generate_report") as mocked_generate, \
mock.patch("pybryt.student.os.makedirs") as mocked_makedirs:
mocked_stu.from_footprint.return_value.check.return_value = [mock.MagicMock()]
mocked_stu.from_footprint.return_value.check.return_value[0].name = "foo"
check_cm = check(ref)
with check_cm:
check_cm._footprint = stu.footprint
mocked_makedirs.assert_called_with(".pybryt_cache", exist_ok=True)
mocked_stu.from_footprint.return_value.dump.assert_called()
mocked_stu.from_footprint.return_value.check.return_value[0].dump.assert_called_with(".pybryt_cache/foo_results.pkl")
def test_from_cache():
"""
"""
with mock.patch("pybryt.student.glob") as mocked_glob, \
mock.patch.object(StudentImplementation, "load") as mocked_load, \
mock.patch.object(StudentImplementation, "combine") as mocked_combine:
mocked_glob.return_value = [".pybryt_cache/student_impl_foo.pkl", ".pybryt_cache/student_impl_bar.pkl"]
StudentImplementation.from_cache(combine=False)
mocked_load.assert_has_calls([mock.call(fp) for fp in mocked_glob.return_value])
mocked_combine.assert_not_called()
StudentImplementation.from_cache()
mocked_combine.assert_called()
def test_combine():
"""
"""
_, stu = generate_impl()
stu2 = deepcopy(stu)
stu2.footprint.add_value([1, 2, 3, 4], stu2.footprint.num_steps + 1)
comb = StudentImplementation.combine([stu, stu2])
assert len(comb.footprint.values) == len(stu.footprint.values) + 1
assert comb.footprint.num_steps == stu.footprint.num_steps + stu2.footprint.num_steps
assert comb.footprint.get_timestamp(-1) == stu.footprint.num_steps + stu2.footprint.num_steps
def test_generate_student_impls():
"""
"""
num_notebooks = 6
nb, stu = generate_impl()
nbs = [nb] * num_notebooks
with mock.patch("pybryt.student.execute_notebook") as mocked_execute:
mocked_execute.return_value = deepcopy(stu.footprint)
stus = generate_student_impls(nbs)
assert all(s == stu for s in stus)
with mock.patch("pybryt.student.Process") as mocked_process:
class MockedQueue:
def __init__(self, *args, **kwargs):
self.calls = 0
def empty(self, *args, **kwargs):
if self.calls >= num_notebooks:
return True
self.calls += 1
return False
def get(self, *args, **kwargs):
return (nb, stu)
with mock.patch("pybryt.student.Queue") as mocked_queue:
mocked_queue.return_value = mock.MagicMock(wraps=MockedQueue())
stus = generate_student_impls(nbs, parallel=True)
assert all(s == stu for s in stus)
| 882 | -3 | 203 |
b4e10b947ebfeca07132cef83cf18dbccada5bc5 | 3,122 | py | Python | model.py | guocheng2018/ner_bilstm_crf | 603f2995f6a73ebf63bc4416e98ea2656dbe9eaa | [
"MIT"
] | 15 | 2019-08-06T06:35:11.000Z | 2021-11-01T08:41:37.000Z | model.py | guocheng2018/ner_bilstm_crf | 603f2995f6a73ebf63bc4416e98ea2656dbe9eaa | [
"MIT"
] | null | null | null | model.py | guocheng2018/ner_bilstm_crf | 603f2995f6a73ebf63bc4416e98ea2656dbe9eaa | [
"MIT"
] | 2 | 2020-12-22T08:08:29.000Z | 2021-01-06T04:35:24.000Z | # Author: GC
from typing import List
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchcrf import CRF
class BiLSTM_CRF(nn.Module):
"""
Args:
vocab_size: size of word vocabulary
num_tags: total tags
embed_dim: word embedding dimension
hidden_dim: output dimension of BiLSTM at each step
dropout: dropout rate (apply on embeddings)
Attributes:
vocab_size: size of word vocabulary
num_tags: total tags
"""
def _get_emissions(
self, seqs: torch.LongTensor, masks: torch.ByteTensor
) -> torch.Tensor:
"""Get emission scores from BiLSTM
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
emission scores (seq_len, batch_size, num_tags)
"""
embeds = self.embeds(seqs) # (seq_len, batch_size, embed_dim)
embeds = self.dropout(embeds)
packed = pack_padded_sequence(embeds, masks.sum(0))
lstm_out, _ = self.lstm(packed)
lstm_out, _ = pad_packed_sequence(lstm_out) # (seq_len, batch_size, hidden_dim)
# Space Transform (seq_len, batch_size, num_tags)
emissions = self.hidden2tag(lstm_out)
return emissions
def loss(
self, seqs: torch.LongTensor, tags: torch.LongTensor, masks: torch.ByteTensor
) -> torch.Tensor:
"""Negative log likelihood loss
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
tags: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
loss
"""
emissions = self._get_emissions(seqs, masks)
loss = -self.crf(emissions, tags, mask=masks, reduction="mean")
return loss
def decode(
self, seqs: torch.LongTensor, masks: torch.ByteTensor
) -> List[List[int]]:
"""Viterbi decode
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
List of list containing the best tag sequence for each batch
"""
emissions = self._get_emissions(seqs, masks)
best_tags = self.crf.decode(emissions, mask=masks)
return best_tags
| 32.520833 | 88 | 0.6246 | # Author: GC
from typing import List
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchcrf import CRF
class BiLSTM_CRF(nn.Module):
"""
Args:
vocab_size: size of word vocabulary
num_tags: total tags
embed_dim: word embedding dimension
hidden_dim: output dimension of BiLSTM at each step
dropout: dropout rate (apply on embeddings)
Attributes:
vocab_size: size of word vocabulary
num_tags: total tags
"""
def __init__(
self,
vocab_size: int,
num_tags: int,
embed_dim: int,
hidden_dim: int,
dropout: float,
) -> None:
super(BiLSTM_CRF, self).__init__()
self.vocab_size = vocab_size
self.num_tags = num_tags
# Layers
self.dropout = nn.Dropout(dropout)
self.embeds = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(embed_dim, hidden_dim // 2, bidirectional=True)
self.hidden2tag = nn.Linear(hidden_dim, num_tags)
self.crf = CRF(num_tags)
def _get_emissions(
self, seqs: torch.LongTensor, masks: torch.ByteTensor
) -> torch.Tensor:
"""Get emission scores from BiLSTM
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
emission scores (seq_len, batch_size, num_tags)
"""
embeds = self.embeds(seqs) # (seq_len, batch_size, embed_dim)
embeds = self.dropout(embeds)
packed = pack_padded_sequence(embeds, masks.sum(0))
lstm_out, _ = self.lstm(packed)
lstm_out, _ = pad_packed_sequence(lstm_out) # (seq_len, batch_size, hidden_dim)
# Space Transform (seq_len, batch_size, num_tags)
emissions = self.hidden2tag(lstm_out)
return emissions
def loss(
self, seqs: torch.LongTensor, tags: torch.LongTensor, masks: torch.ByteTensor
) -> torch.Tensor:
"""Negative log likelihood loss
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
tags: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
loss
"""
emissions = self._get_emissions(seqs, masks)
loss = -self.crf(emissions, tags, mask=masks, reduction="mean")
return loss
def decode(
self, seqs: torch.LongTensor, masks: torch.ByteTensor
) -> List[List[int]]:
"""Viterbi decode
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
List of list containing the best tag sequence for each batch
"""
emissions = self._get_emissions(seqs, masks)
best_tags = self.crf.decode(emissions, mask=masks)
return best_tags
| 540 | 0 | 27 |
334b5c96c0c80b3fbce0e0dc2a93bc9f82a0ef74 | 15,290 | py | Python | backend/api/admin/user.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 7 | 2018-05-20T08:56:08.000Z | 2022-03-11T15:50:54.000Z | backend/api/admin/user.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:12:51.000Z | 2022-01-13T01:25:27.000Z | backend/api/admin/user.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 5 | 2016-10-09T14:52:09.000Z | 2020-12-25T01:04:35.000Z | # -*- coding: utf-8 -*-
import conf
import errors
import logbook
import bottle
import posixpath
from urllib.parse import urljoin
from memdb.token import UserToken
from model import User, autocommit, MessageTemplate
from model import display
from memdb.token import PasswordResetToken
from api import get, post, put, delete, AdminApi, local_properties, request_base_url, options, ADMIN_TOKEN_NAME, \
enable_cors
from api.check_params import check_params
from api.validator import Date, IndexSizeLimit, FilterMode, IntRange, Email, Visibility, String, \
TokenId, List, StringWithLimits, ValidateError, Bool, ModelId, SortFields
from api.admin.role import TokenAdmin, TokenManager, Roles
from model.account.role import Role
from utils.i18n import preferred_language
UserIdExpand = ModelId(User, errors.UserNotFound)
PasswordValidator = StringWithLimits(conf.user.min_password_length, conf.user.max_password_length)
| 35.311778 | 114 | 0.578875 | # -*- coding: utf-8 -*-
import conf
import errors
import logbook
import bottle
import posixpath
from urllib.parse import urljoin
from memdb.token import UserToken
from model import User, autocommit, MessageTemplate
from model import display
from memdb.token import PasswordResetToken
from api import get, post, put, delete, AdminApi, local_properties, request_base_url, options, ADMIN_TOKEN_NAME, \
enable_cors
from api.check_params import check_params
from api.validator import Date, IndexSizeLimit, FilterMode, IntRange, Email, Visibility, String, \
TokenId, List, StringWithLimits, ValidateError, Bool, ModelId, SortFields
from api.admin.role import TokenAdmin, TokenManager, Roles
from model.account.role import Role
from utils.i18n import preferred_language
UserIdExpand = ModelId(User, errors.UserNotFound)
class PasswordResetTokenValidator(object):
def __call__(self, value):
try:
return PasswordResetToken.get(value)
except errors.PasswordResetTokenInvalid as e:
raise ValidateError(e.message)
PasswordValidator = StringWithLimits(conf.user.min_password_length, conf.user.max_password_length)
class UserApi(AdminApi):
ADMIN_FRONTEND_PATH = "/admin/"
@post("user/")
@check_params(
token=TokenManager,
email=(Email, IndexSizeLimit),
password=PasswordValidator,
role=Roles,
name=StringWithLimits(max_length=256),
)
@autocommit
def new_user(self, token, email, role, password=None, name=None):
"""
Registration of new user.
:param Email email: User email (Email_);
:param str password: User Password. If the it is empty then the password
recovery email will be sent to the email.
:param str role: User role.
:param str name: User display name [optional]
:returns dict user_info: User info.
**Example**::
{
"user_info": {
{"name": "Super Admin",
"deleted": null,
"email": "admin@test.ru",
"role": {'localized_name': {'en': 'Administrator', 'ru': 'Администратор'},
'role_id': 'admin'}
"created": "2015-04-24T11:14:22"}
}
}
"""
if not Role.validate(token.role, role):
raise errors.UserInvalidRole()
if token.role != Role.ADMIN and token.role == role:
# user can administrate only users with low priority
raise errors.UserInvalidRole()
user_info = User.new_user(email, password, role, name=name)
if not password:
self.send_password_reset_email(email, request_base_url(), for_new_user=True)
return {"user_info": display(user_info)}
@post('auth/')
@enable_cors
@check_params(email=(Email, IndexSizeLimit), password=PasswordValidator, return_user_info=Bool)
def login(self, email, password, return_user_info=False):
"""
Auth user by email and password. This method setup cookie which can be used in next requests.
:param Email email: User Email_.
:param str password: User password (flat text).
:param Bool return_user_info: Return user info of logged user.
:return dict user_info: User info
**Example**::
{
"user_info": {
{"name": "Super Admin",
"deleted": null,
"email": "admin@test.ru",
"role": {'localized_name': {'en': 'Administrator', 'ru': 'Администратор'},
'role_id': 'admin'}
"created": "2015-04-24T11:14:22"}
}
}
"""
new_token, user_info = User.login(email, password)
setattr(local_properties, 'user_token', new_token)
cookie_flags = {"httponly": True}
if conf.api.secure_cookie and not conf.test:
cookie_flags["secure"] = True
bottle.response.set_cookie(ADMIN_TOKEN_NAME, new_token.id, path="/", **cookie_flags)
user_info = display(user_info) if return_user_info else {}
return {"user_info": user_info}
@options('auth/')
@enable_cors
def login_options(self):
r = bottle.HTTPResponse("")
r.content_type = "text/html"
return r
@post('logout/')
@check_params(token=TokenId)
def logout(self, token):
"""
Stop user session.
"""
UserToken.remove(token)
bottle.response.delete_cookie("token", path="/")
return {}
@get('user/me/')
@check_params(token=TokenId)
def get_info(self, token):
"""
Return user info of current user.
:return dict user_info: User info
**Example**::
{
"user_info": {
{"name": "Super Admin",
"deleted": null,
"email": "admin@test.ru",
"role": {'localized_name': {'en': 'Administrator', 'ru': 'Администратор'},
'role_id': 'admin'}
"created": "2015-04-24T11:14:22"}
}
}
"""
user = User.get_by_id(token.user_id)
if user is None:
logbook.debug("User not found by id {}", token.user_id)
raise errors.UserInvalidToken()
return {"user_info": display(user)}
@get('user/<user_id>/')
@check_params(
token=TokenId,
user_id=ModelId)
def get_others_info(self, user_id):
"""
returns user info
:param user user_id: user id
:return dict user_info: the dict has the same structure as result of
method :obj:`get /0/user/me/ <view.get /0/user/me>`
"""
user = User.get_by_id(user_id)
if user is None:
raise errors.UserNotFound()
return {"user_info": display(user)}
# noinspection PyUnusedLocal
@put('user/me/')
@check_params(
all_parameters=True,
token=TokenId,
password=PasswordValidator,
name=StringWithLimits(max_length=256),
email=(Email, IndexSizeLimit)
)
@autocommit
def update(self, token, all_parameters, password=None, name=None, email=None):
"""
Update user self profile.
:param str password: New password [optional]
:param str name: New name [optional]
:param str email: New email [optional]
:return dict user_info: User info
"""
return self.update_user_common(User.get_by_id(token.user_id), all_parameters)
# noinspection PyUnusedLocal
@put('user/<user>/')
@check_params(
all_parameters=True,
token=TokenAdmin,
user=UserIdExpand,
password=PasswordValidator,
name=StringWithLimits(max_length=256),
email=(Email, IndexSizeLimit),
role=Roles
)
@autocommit
def update_other(self, token, user, all_parameters, password=None,
name=None, email=None, role=None):
"""
Update user profile of other user.
:param User user: User id
:param str password: New password [optional]
:param str name: New name [optional]
:param str email: New email [optional]
:return dict user_info: Dict with user info, as in :obj:`PUT /0/user/me/ <view.PUT /0/user/me>`
"""
all_parameters.pop("user", None)
return self.update_user_common(user, all_parameters)
@staticmethod
def update_user_common(user, all_parameters):
if not all_parameters:
raise errors.NothingForUpdate()
user.update(all_parameters)
return {"user_info": display(user)}
@delete('user/me/')
@check_params(token=TokenId)
@autocommit
def remove(self, token):
"""
Mark myself user as removed
:return: None
"""
user = User.get_by_id(token.user_id)
if not user.mark_removed():
raise errors.UserRemoved()
return {}
@delete('user/<user>/')
@check_params(
token=TokenAdmin,
user=UserIdExpand
)
@autocommit
def remove_other(self, token, user):
"""
Mark user as removed
:param: None
"""
logbook.info("Deleting user {} by {}", user, token.user_id)
if int(token.user_id) == user.user_id:
raise errors.HarakiriIsNotAllowed()
if not user.mark_removed():
raise errors.UserRemoved()
return {}
@delete('user/password_reset/')
@check_params(email=(Email, IndexSizeLimit))
def request_password_reset(self, email):
"""
Sent email with link to reset password
:param Email email: Email_ - user email
:return: None.
"""
self.send_password_reset_email(email, request_base_url())
return {}
@post('user/password_reset/<password_token>/')
@check_params(password_token=PasswordResetTokenValidator, password=PasswordValidator)
@autocommit
def password_reset(self, password_token, password):
"""
Reset user password
:param PasswordResetToken password_token: Token which was returned by method
:obj:`POST /0/user/request_password_reset/ <view.POST /0/user/request_password_reset>`;
:param str password: New password.
:return: None
"""
# noinspection PyUnresolvedReferences
user = User.get_by_id(password_token.user_id)
user.password_reset(password)
UserToken.remove_by(user.user_id)
PasswordResetToken.remove(password_token)
return {}
# noinspection PyUnusedLocal
@get('user/password_reset/<password_token>/')
@check_params(password_token=PasswordResetTokenValidator)
def validate_password_reset(self, password_token):
"""
Checks that password reset token is valid.
:param PasswordResetToken password_token: Token which was returned by method
:obj:`POST /0/user/request_password_reset/ <view.POST /0/user/request_password_reset>`.
:return: None.
"""
return {}
# noinspection PyUnusedLocal
@get('user/')
@check_params(
token=TokenId,
role=Roles,
role_list=List(Roles),
name=String,
visibility=Visibility,
deleted_before=Date(), deleted_after=Date(),
email=String,
created_before=Date(), created_after=Date(),
page=IntRange(1),
limit=IntRange(1, conf.api.pagination.limit),
sort=List(SortFields(User)), # Sort(User.Meta.sort_fields),
all_parameters=True
)
def list(self, email=None, role=None,
role_list=None, name=None,
visibility=Visibility.DEFAULT,
deleted_before=None, deleted_after=None,
created_before=None, created_after=None,
page=1, limit=conf.api.pagination.limit,
sort=('email',), all_parameters=True):
"""
Return filtered user list.
:param str email: Mask for email
:param Role role: Role_ - user role
:param List role_list: user role list
:param str visibility: Visibility options
*visible* - Only active users, [by default]
*deleted* - Only removed users.
*all* - All users.
:param Date deleted_before: Date_ - Filter users which were archived before this date.
:param Date deleted_after: Date_ - Filter users which were archived after this date.
:param Date created_before: Date_ - Filter users which were created before this date.
:param Date created_after: Date_ - Filter users which were created after this date.
:param int page: Page
:param int limit: Number of elements per page
:param str visibility: Visibility options
*visible* - Only active customers, [by default]
*deleted* - Only removed customers.
*all* - All customers.
:param str or List sort: Field name or list of field names which is used for sorting.
Ascending sort is default.
For descending sort use "-" before name.
Default sorting field: ('email');
:return List user_list: List of users for this query.
**Example**::
{
"user_list": {
"total": 2,
"limit": 200,
"offset": 0
"items": [
{
"name": null,
"created": "2013-09-19T06:42:03.747000+00:00",
"deleted": null,
"role": {'localized_name': {'en': 'Administrator', 'ru': 'Администратор'},
'role_id': 'admin'}
"user_id": "523a9cbb312f9120c41b96b5",
"email": "list_test0@test.ru"
},
{
"name": null,
"created": "2013-09-19T06:42:03.823000+00:00",
"deleted": null,
"role": {'localized_name': {'en': 'Administrator', 'ru': 'Администратор'},
'role_id': 'admin'}
"user_id": "523a9cbb312f9120c41b96b6",
"email": "list_test1@test.ru"
}]}
}
"""
# noinspection PyUnresolvedReferences
all_parameters.setdefault("limit", limit)
# noinspection PyUnresolvedReferences
all_parameters.setdefault("page", page)
# noinspection PyUnresolvedReferences
all_parameters.setdefault("sort", sort)
query = None
if role_list:
query = User.query.filter(User.role.in_(role_list))
query = User.api_filter(all_parameters, query=query, visibility=visibility)
return {"user_list": self.paginated_list(query)}
@staticmethod
def send_password_reset_email(email, base_url, for_new_user=False):
from task.mail import send_email
user = User.get_by_email(email, include_deleted=False)
if user is None:
raise errors.UserNotFound()
token = PasswordResetToken.create(user)
url = urljoin(base_url, posixpath.join(UserApi.ADMIN_FRONTEND_PATH,
"set-password/{}".format(token.id)))
template_id = MessageTemplate.NEW_USER if for_new_user else MessageTemplate.USER_PASSWORD_RESET
url_name = "activate_url" if for_new_user else "password_reset_url"
params = {url_name: url}
subject, body = MessageTemplate.get_rendered_message(template_id, language=preferred_language(),
user_name=user.name, **params)
send_email.delay(email, subject, body)
return user
| 1,338 | 13,018 | 72 |
345c69b0cd3d3611e0cbaa6da0053f9746981d6b | 317 | py | Python | nlp100/chapter-02/11.py | nwiizo/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | 1 | 2017-01-11T06:12:24.000Z | 2017-01-11T06:12:24.000Z | nlp100/chapter-02/11.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | nlp100/chapter-02/11.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#trコマンド
"""
python3 12.py 置換前 置換後
"""
import sys
if __name__=='__main__':
print (tra(sys.argv[1],sys.argv[2],sys.argv[3]))
| 12.68 | 49 | 0.615142 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#trコマンド
"""
python3 12.py 置換前 置換後
"""
import sys
def tra(file1,val1,val2):
try:
with open(file1) as f:
f = f.read()
except:
print("miss")
finally:
print(f.replace(val1,val2))
if __name__=='__main__':
print (tra(sys.argv[1],sys.argv[2],sys.argv[3]))
| 116 | 0 | 23 |
a5b5842b55f82ce2bfb162bae3b7edfff3d1aba8 | 457 | py | Python | script.deluge/resources/lib/basictypes/registry.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | script.deluge/resources/lib/basictypes/registry.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | script.deluge/resources/lib/basictypes/registry.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | """Mapping from core types/classes to stand-in DataTypeDefinitions"""
REGISTRY = {
}
def registerDT(base, DT):
"""Register a DataTypeDefinition for a given base-class"""
REGISTRY[base] = DT
def getDT(base):
"""Return the appropriate DT for the given base-class
This looks up the base in the registry, returning
either a registered stand-alone data-type-definition
or the base itself.
"""
return REGISTRY.get(base, base)
| 24.052632 | 69 | 0.700219 | """Mapping from core types/classes to stand-in DataTypeDefinitions"""
REGISTRY = {
}
def registerDT(base, DT):
"""Register a DataTypeDefinition for a given base-class"""
REGISTRY[base] = DT
def getDT(base):
"""Return the appropriate DT for the given base-class
This looks up the base in the registry, returning
either a registered stand-alone data-type-definition
or the base itself.
"""
return REGISTRY.get(base, base)
| 0 | 0 | 0 |
e14ec41089318bb31e1accb2b3548a22ce24ef83 | 1,056 | py | Python | layered_settings/loaders/config_parser_loader.py | mathandpencil/layered-settings | 8c3a48cc5ac7436a4ac7c1a72e81dad0e58ac7d8 | [
"MIT"
] | 1 | 2021-03-21T01:43:22.000Z | 2021-03-21T01:43:22.000Z | layered_settings/loaders/config_parser_loader.py | mathandpencil/layered-settings | 8c3a48cc5ac7436a4ac7c1a72e81dad0e58ac7d8 | [
"MIT"
] | null | null | null | layered_settings/loaders/config_parser_loader.py | mathandpencil/layered-settings | 8c3a48cc5ac7436a4ac7c1a72e81dad0e58ac7d8 | [
"MIT"
] | null | null | null | import functools
import logging
import configparser
import os
from .base_loader import BaseLoader
logger = logging.getLogger(__name__)
| 29.333333 | 78 | 0.638258 | import functools
import logging
import configparser
import os
from .base_loader import BaseLoader
logger = logging.getLogger(__name__)
class ConfigParserLoader(BaseLoader):
def __init__(self, path, ignore_if_missing=False):
self.path = path
if os.path.exists(self.path):
self.config = configparser.ConfigParser()
self.config.read(self.path)
# _get = functools.partial(_get_from_config_parser, cp)
logger.debug(f"Registered .ini filename source from {self.path}")
else:
if not ignore_if_missing:
raise FileNotFoundError(self.path)
def get_setting(self, section, key):
return _get_from_config_parser(self.config, section, key)
def __str__(self):
return f"ConfigParser from {self.path}"
def _get_from_config_parser(cp, section, key):
# print(f"Checking {cp} for {section} {key}...")
try:
return cp.get(section, key)
except:
raise KeyError(section + "_" + key)
| 756 | 16 | 135 |
db52d0e6993745640c28ee65e947b48ae54fd623 | 6,935 | py | Python | music_player.py | PracticalMetal/Music-Player | 26b3b664ea6d4e10b5890b3f3d2c226a653e92a3 | [
"Apache-2.0"
] | null | null | null | music_player.py | PracticalMetal/Music-Player | 26b3b664ea6d4e10b5890b3f3d2c226a653e92a3 | [
"Apache-2.0"
] | null | null | null | music_player.py | PracticalMetal/Music-Player | 26b3b664ea6d4e10b5890b3f3d2c226a653e92a3 | [
"Apache-2.0"
] | null | null | null | import os
import pickle
import tkinter as tk
from tkinter import *
from tkinter import filedialog # to open songs file
from pygame import mixer # to control music play,pause
root = tk.Tk()
root.config(bg='#C35817')
root.geometry( '600x400' )
root.title( "MP3 MUSIC PLAYER 🔊 🎧" )
img = PhotoImage( file=r'images/music.png' )
img_size=img.subsample(5,5)
next = PhotoImage( file=r'images/next.png' )
prev = PhotoImage( file=r'images/previous.png' )
play = PhotoImage( file=r'images/play.png' )
pause = PhotoImage( file=r'images/pause.png' )
add=PhotoImage(file=r'images/songs.png')
add_size=add.subsample(1,1)
app = Player( master=root )
app.mainloop()
| 37.896175 | 178 | 0.589329 | import os
import pickle
import tkinter as tk
from tkinter import *
from tkinter import filedialog # to open songs file
from pygame import mixer # to control music play,pause
class Player( tk.Frame ):
def __init__(self, master):
super().__init__( master )
self.master = master
self.pack()
mixer.init()
if os.path.exists( 'songs.pickle' ):
with open( 'songs.pickle', 'rb' ) as f:
self.playlist = pickle.load(f)
else:
self.playlist = []
self.current = 0
self.paused = True
self.played = False
self.playlist = []
self.create_frame()
self.track_widget()
self.control_widget()
self.tracklist_widget()
def create_frame(self): # frames
self.track = tk.LabelFrame( self, text="SONGS TRACK", font=("Cornerstone", 15, "bold"), bg='#52595D',
fg='white', bd=7, relief=tk.GROOVE ) # first_frame
self.track.configure( width=410, height=300 )
self.track.grid( row=0, column=0, padx=10 )
self.tracklist = tk.LabelFrame( self, text=f"PlayList-{len( self.playlist )}",
font=("Cornerstone", 15, "bold"), bg='#4863A0', fg='white',
bd=7, relief=tk.GROOVE ) # first_frame
self.tracklist.configure( width=190, height=400 )
self.tracklist.grid( row=0, column=1, rowspan=3, pady=5 )
self.controls = tk.LabelFrame( self, font=("times new roman", 15, "bold"), bg='#E6BF83', fg='white',
bd=7, relief=tk.GROOVE ) # first_frame
self.controls.configure( width=410, height=80 )
self.controls.grid( row=2, column=0, pady=5, padx=10 )
def track_widget(self): # diffrent widgets of diffrent methods
self.canvas = tk.Label( self.track, image=img_size)
self.canvas.configure( width=400, height=240 )
self.canvas.grid( row=0, column=0 )
self.songtrack = tk.Label( self.track, font=("bookman old style", 15, "bold"), bg='#D8BFD8', fg='#2C3539')
self.songtrack['text'] = 'MUSIC MP3 PLAYER'
self.songtrack.configure( width=30, height=1 )
self.songtrack.grid( row=1, column=0 )
def control_widget(self):
self.loadSongs = tk.Button( self.controls,image=add_size, font=10,bg='black')
self.loadSongs['text'] = "Load Songs"
self.loadSongs['command'] = self.retrieve_songs
self.loadSongs.grid( row=0, column=0, padx=10 )
self.prev = tk.Button( self.controls, bg='#E42217', fg='#DCD0FF', font=10, image=prev )
self.prev['command'] = self.pre_song
self.prev.grid( row=0, column=1 )
self.pause = tk.Button( self.controls, bg='#E42217', fg='#DCD0FF', font=10, image=pause )
self.pause['command'] = self.pause_song
self.pause.grid( row=0, column=2 )
self.next = tk.Button( self.controls, bg='#E42217', fg='#DCD0FF', font=10, image=next )
self.next['command'] = self.next_song
self.next.grid( row=0, column=3 )
self.volume = tk.DoubleVar() # volume part
self.slider = tk.Scale( self.controls, from_=0, to=10, orient=tk.HORIZONTAL,bg='#E42217',fg='white',bd=3,font=('Incised901 BT',14,'bold'),highlightbackground = "#151B54")
self.slider['variable'] = self.volume
self.slider.set(5)
mixer.music.set_volume(0.5)
self.slider['command'] = self.change_volume
self.slider.grid( row=0, column=4, padx=5 )
def tracklist_widget(self):
self.scrollbar = tk.Scrollbar( self.tracklist, orient=tk.VERTICAL )
self.scrollbar.grid( row=0, column=1, rowspan=5, sticky='ns' )
self.list = tk.Listbox( self.tracklist, selectmode=tk.SINGLE, yscrollcommand=self.scrollbar.set,
selectbackground='sky blue' )
self.enumerate_songs()
self.list.config( height=22 )
self.list.bind( '<Double-1>', self.play_song )
self.scrollbar.config( command=self.list.yview )
self.list.grid( row=0, column=0, rowspan=5 )
def enumerate_songs(self):
for index, song in enumerate( self.playlist ):
self.list.insert( index, os.path.basename( song ) )
def retrieve_songs(self):
self.songlist = []
directory = filedialog.askdirectory()
for root__, dirs, files in os.walk( directory ):
for file in files:
if os.path.splitext( file )[1] == '.mp3':
path = (root__ + '/' + file).replace( '\\', '/' )
self.songlist.append( path )
with open( 'songs.pickle', 'wb' ) as f:
pickle.dump( self.songlist, f )
self.playlist = self.songlist
self.tracklist['text'] = f"PlayList-{str( len( self.playlist ) )}"
self.list.delete( 0, tk.END )
self.enumerate_songs()
def play_song(self, event=None):
if event is not None:
self.current = self.list.curselection()[0]
for i in range( len( self.playlist ) ):
self.list.itemconfigure( i, bg='white' )
mixer.music.load( self.playlist[self.current] )
self.pause['image']=play
self.paused=False
self.played=True
self.songtrack['anchor']='w'
self.songtrack['text']=os.path.basename(self.playlist[self.current])
self.list.activate(self.current)
self.list.itemconfigure(self.current,bg='#C04000')
mixer.music.play()
def pause_song(self):
if not self.paused:
self.paused=True
mixer.music.pause()
self.pause['image']=pause
else:
if self.played==False:
self.play_song()
self.paused=False
mixer.music.unpause()
self.pause['image'] = play
def pre_song(self):
if self.current > 0:
self.current-=1
else:
self.current=0
self.list.itemconfigure(self.current+1,bg='white')
self.play_song()
def next_song(self):
if self.current < len(self.playlist)-1:
self.current += 1
else:
self.current = 0
self.list.itemconfigure( self.current-1, bg='white' )
self.play_song()
def change_volume(self, event=None):
self.v = self.volume.get()
mixer.music.set_volume(self.v/10)
root = tk.Tk()
root.config(bg='#C35817')
root.geometry( '600x400' )
root.title( "MP3 MUSIC PLAYER 🔊 🎧" )
img = PhotoImage( file=r'images/music.png' )
img_size=img.subsample(5,5)
next = PhotoImage( file=r'images/next.png' )
prev = PhotoImage( file=r'images/previous.png' )
play = PhotoImage( file=r'images/play.png' )
pause = PhotoImage( file=r'images/pause.png' )
add=PhotoImage(file=r'images/songs.png')
add_size=add.subsample(1,1)
app = Player( master=root )
app.mainloop()
| 5,928 | 4 | 345 |
6facc95e1a9d0c12f1abf8dcd9030878ab28b5c8 | 3,827 | py | Python | test/python/transpiler/test_pass_call.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_pass_call.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 12 | 2018-09-21T12:02:18.000Z | 2018-09-25T09:14:59.000Z | test/python/transpiler/test_pass_call.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test calling passes (passmanager-less)"""
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PropertySet
from ._dummy_passes import PassD_TP_NR_NP, PassE_AP_NR_NP, PassN_AP_NR_NP
class TestPassCall(QiskitTestCase):
"""Test calling passes (passmanager-less)."""
def assertMessageLog(self, context, messages):
"""Checks the log messages"""
self.assertEqual([record.message for record in context.records], messages)
def test_transformation_pass(self):
"""Call a transformation pass without a scheduler"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
pass_d = PassD_TP_NR_NP(argument1=[1, 2])
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_d(circuit)
self.assertMessageLog(cm, ['run transformation pass PassD_TP_NR_NP', 'argument [1, 2]'])
self.assertEqual(circuit, result)
def test_analysis_pass_dict(self):
"""Call an analysis pass without a scheduler (property_set dict)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = {'another_property': 'another_value'}
pass_e = PassE_AP_NR_NP('value')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassE_AP_NR_NP', 'set property as value'])
self.assertEqual(property_set, {'another_property': 'another_value', 'property': 'value'})
self.assertIsInstance(property_set, dict)
self.assertEqual(circuit, result)
def test_analysis_pass_property_set(self):
"""Call an analysis pass without a scheduler (PropertySet dict)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = PropertySet({'another_property': 'another_value'})
pass_e = PassE_AP_NR_NP('value')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassE_AP_NR_NP', 'set property as value'])
self.assertEqual(property_set,
PropertySet({'another_property': 'another_value', 'property': 'value'}))
self.assertIsInstance(property_set, PropertySet)
self.assertEqual(circuit, result)
def test_analysis_pass_remove_property(self):
"""Call an analysis pass that removes a property without a scheduler"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = {'to remove': 'value to remove', 'to none': 'value to none'}
pass_e = PassN_AP_NR_NP('to remove', 'to none')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassN_AP_NR_NP',
'property to remove deleted',
'property to none noned'])
self.assertEqual(property_set, PropertySet({'to none': None}))
self.assertIsInstance(property_set, dict)
self.assertEqual(circuit, result)
| 43.988506 | 98 | 0.674419 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test calling passes (passmanager-less)"""
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PropertySet
from ._dummy_passes import PassD_TP_NR_NP, PassE_AP_NR_NP, PassN_AP_NR_NP
class TestPassCall(QiskitTestCase):
"""Test calling passes (passmanager-less)."""
def assertMessageLog(self, context, messages):
"""Checks the log messages"""
self.assertEqual([record.message for record in context.records], messages)
def test_transformation_pass(self):
"""Call a transformation pass without a scheduler"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
pass_d = PassD_TP_NR_NP(argument1=[1, 2])
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_d(circuit)
self.assertMessageLog(cm, ['run transformation pass PassD_TP_NR_NP', 'argument [1, 2]'])
self.assertEqual(circuit, result)
def test_analysis_pass_dict(self):
"""Call an analysis pass without a scheduler (property_set dict)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = {'another_property': 'another_value'}
pass_e = PassE_AP_NR_NP('value')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassE_AP_NR_NP', 'set property as value'])
self.assertEqual(property_set, {'another_property': 'another_value', 'property': 'value'})
self.assertIsInstance(property_set, dict)
self.assertEqual(circuit, result)
def test_analysis_pass_property_set(self):
"""Call an analysis pass without a scheduler (PropertySet dict)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = PropertySet({'another_property': 'another_value'})
pass_e = PassE_AP_NR_NP('value')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassE_AP_NR_NP', 'set property as value'])
self.assertEqual(property_set,
PropertySet({'another_property': 'another_value', 'property': 'value'}))
self.assertIsInstance(property_set, PropertySet)
self.assertEqual(circuit, result)
def test_analysis_pass_remove_property(self):
"""Call an analysis pass that removes a property without a scheduler"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = {'to remove': 'value to remove', 'to none': 'value to none'}
pass_e = PassN_AP_NR_NP('to remove', 'to none')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassN_AP_NR_NP',
'property to remove deleted',
'property to none noned'])
self.assertEqual(property_set, PropertySet({'to none': None}))
self.assertIsInstance(property_set, dict)
self.assertEqual(circuit, result)
| 0 | 0 | 0 |
4053350b2cc870776918c441df73f7d514e34d89 | 139 | py | Python | test/conftest.py | jaimeHMol/airflow-kubernetes | f520216555c00dc87158bd7c169d3f36722acac3 | [
"MIT"
] | 6 | 2020-11-18T11:02:20.000Z | 2021-11-16T13:00:20.000Z | test/conftest.py | jaimeHMol/airflow-kubernetes | f520216555c00dc87158bd7c169d3f36722acac3 | [
"MIT"
] | null | null | null | test/conftest.py | jaimeHMol/airflow-kubernetes | f520216555c00dc87158bd7c169d3f36722acac3 | [
"MIT"
] | 2 | 2020-11-18T11:02:22.000Z | 2020-11-19T04:18:22.000Z | import pytest
from airflow.models import DagBag
@pytest.fixture(scope="session")
| 17.375 | 41 | 0.776978 | import pytest
from airflow.models import DagBag
@pytest.fixture(scope="session")
def dagbag():
return DagBag(include_examples=False)
| 34 | 0 | 22 |
1153cb6e2f89ac2d6c2be8ad3391a606c16659e4 | 4,180 | py | Python | gen_feas/gen_slide_feas.py | PingjunChen/ThyroidGeneralWSI | ee3adaa4c3aa7c56d3cc5bd7b44d99894578beee | [
"MIT"
] | 2 | 2020-05-02T16:37:13.000Z | 2020-05-04T20:44:17.000Z | gen_feas/gen_slide_feas.py | PingjunChen/frozen-thyroid-cls | ee3adaa4c3aa7c56d3cc5bd7b44d99894578beee | [
"MIT"
] | null | null | null | gen_feas/gen_slide_feas.py | PingjunChen/frozen-thyroid-cls | ee3adaa4c3aa7c56d3cc5bd7b44d99894578beee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os, sys
import numpy as np
import argparse, time
import torch
from pydaily import filesystem
from pyimg import combine
import openslide
import matplotlib.pyplot as plt
from skimage import io, transform
import deepdish as dd
import utils, patch_util
if __name__ == '__main__':
args = set_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device_id)
# load patch model
args.model_path = os.path.join(args.model_dir, args.model_type, args.model_name)
if not os.path.exists(args.model_path):
raise AssertionError("Model path does not exist")
ft_model = torch.load(args.model_path)
ft_model.cuda()
ft_model.eval()
# predict all patches
print("Prediction model is: {}".format(args.model_name))
predit_all_feas(model=ft_model, args=args)
| 41.8 | 104 | 0.67799 | # -*- coding: utf-8 -*-
import os, sys
import numpy as np
import argparse, time
import torch
from pydaily import filesystem
from pyimg import combine
import openslide
import matplotlib.pyplot as plt
from skimage import io, transform
import deepdish as dd
import utils, patch_util
def predict_slide_fea(slide_path, cls_model, save_dir, args):
file_fullname = os.path.basename(slide_path)
file_name = os.path.splitext(file_fullname)[0]
file_cat = os.path.basename(os.path.dirname(slide_path))
fea_save_dir = os.path.join(save_dir, file_cat)
if not os.path.exists(fea_save_dir):
os.makedirs(fea_save_dir)
fea_filepath = os.path.join(fea_save_dir, file_name + ".h5")
# print("Step 1: Split slide to patches")
split_arr, patch_list, wsi_dim, s_img, mask = utils.split_regions(
slide_path, args.img_level, args.cnt_level)
if len(split_arr) == 0:
return None
# # save mask overlay image to validate the accuracy of tissue localization
# mask_overlay = combine.blend_images(s_img, combine.graymask2rgb(mask), alpha=0.64)
# s_mask_overlay = transform.resize(mask_overlay, (int(mask.shape[0]*0.3), int(mask.shape[1]*0.3)))
# io.imsave(os.path.join(save_dir, file_name + ".png"), s_mask_overlay)
# print("Step 2: Generate features")
fea_dict = patch_util.gen_slide_feas(cls_model, split_arr, np.asarray(patch_list), wsi_dim, args)
# save features
dd.io.save(fea_filepath, fea_dict)
def predit_all_feas(model, args):
slide_path = os.path.join(args.slide_dir, args.dset)
slide_list = filesystem.find_ext_files(slide_path, "tiff")
print("There are {} slides in totoal.".format(len(slide_list)))
slide_list.sort()
fea_dir = os.path.join(args.fea_dir, args.model_type, args.dset)
print("Start processing...")
print("="*80)
slide_start = time.time()
for ind, slide_path in enumerate(slide_list):
slide_filename = os.path.splitext(os.path.basename(slide_path))[0]
slide_head = openslide.OpenSlide(slide_path)
print("Processing {}, width: {}, height: {}, {}/{}".format(
slide_filename, slide_head.dimensions[0], slide_head.dimensions[1], ind+1, len(slide_list)))
predict_slide_fea(slide_path, model, fea_dir, args)
print("="*80)
slide_elapsed = time.time() - slide_start
print("Time cost: " + time.strftime("%H:%M:%S", time.gmtime(slide_elapsed)))
print("Finish Prediction...")
def set_args():
parser = argparse.ArgumentParser(description="Settings for thyroid slide patch feature generation")
parser.add_argument('--device_id', type=str, default="5", help='which device')
parser.add_argument('--slide_dir', type=str, default="../data/CV04/Slides")
parser.add_argument('--fea_dir', type=str, default="../data/CV04/Feas")
parser.add_argument('--dset', type=str, default="val")
# patch model setting
parser.add_argument('--model_dir', type=str, default="../data/CV04/Models/PatchModels")
parser.add_argument('--model_type', type=str, default="resnet50")
parser.add_argument('--model_name', type=str, default="thyroid03-0.7670.pth")
parser.add_argument('--patch_size', type=int, default=224)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--img_level', type=int, default=2)
parser.add_argument('--cnt_level', type=int, default=3)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = set_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device_id)
# load patch model
args.model_path = os.path.join(args.model_dir, args.model_type, args.model_name)
if not os.path.exists(args.model_path):
raise AssertionError("Model path does not exist")
ft_model = torch.load(args.model_path)
ft_model.cuda()
ft_model.eval()
# predict all patches
print("Prediction model is: {}".format(args.model_name))
predit_all_feas(model=ft_model, args=args)
| 3,286 | 0 | 69 |
ab3ebaea3f1a6d00029f5ca6400c93a7db38b8d6 | 602 | py | Python | tests/test_gdal.py | richardsheridan/imageio | f80f068329123fc3c164c522391969ac8eeb0dd4 | [
"BSD-2-Clause"
] | null | null | null | tests/test_gdal.py | richardsheridan/imageio | f80f068329123fc3c164c522391969ac8eeb0dd4 | [
"BSD-2-Clause"
] | null | null | null | tests/test_gdal.py | richardsheridan/imageio | f80f068329123fc3c164c522391969ac8eeb0dd4 | [
"BSD-2-Clause"
] | 1 | 2018-11-03T18:43:26.000Z | 2018-11-03T18:43:26.000Z | """ Test gdal plugin functionality.
"""
import pytest
import imageio
pytest.importorskip("osgeo", reason="gdal is not installed")
def test_gdal_reading(test_images):
"""Test reading gdal"""
filename = test_images / "geotiff.tif"
im = imageio.imread(filename, "gdal")
assert im.shape == (929, 699)
R = imageio.read(filename, "gdal")
assert R.format.name == "GDAL"
meta_data = R.get_meta_data()
assert "TIFFTAG_XRESOLUTION" in meta_data
# Fail
with pytest.raises(IndexError):
R.get_data(-1)
with pytest.raises(IndexError):
R.get_data(3)
| 22.296296 | 60 | 0.664452 | """ Test gdal plugin functionality.
"""
import pytest
import imageio
pytest.importorskip("osgeo", reason="gdal is not installed")
def test_gdal_reading(test_images):
"""Test reading gdal"""
filename = test_images / "geotiff.tif"
im = imageio.imread(filename, "gdal")
assert im.shape == (929, 699)
R = imageio.read(filename, "gdal")
assert R.format.name == "GDAL"
meta_data = R.get_meta_data()
assert "TIFFTAG_XRESOLUTION" in meta_data
# Fail
with pytest.raises(IndexError):
R.get_data(-1)
with pytest.raises(IndexError):
R.get_data(3)
| 0 | 0 | 0 |
d9ba8bca5b7327bbb7e6554d0a3849c186cc4ba9 | 1,623 | py | Python | inspiration/simplegallery/test/upload/variants/test_aws_uploader.py | Zenahr/simple-music-gallery | 2cf6e81208b721a91dcbf77e047c7f77182dd194 | [
"MIT"
] | 1 | 2020-07-03T17:21:01.000Z | 2020-07-03T17:21:01.000Z | simplegallery/test/upload/variants/test_aws_uploader.py | theemack/simple-photo-gallery | f5db98bca7a7443ea7a9172317811f446eff760c | [
"MIT"
] | 1 | 2020-06-20T12:13:00.000Z | 2020-06-20T15:32:03.000Z | inspiration/simplegallery/test/upload/variants/test_aws_uploader.py | Zenahr/simple-music-gallery | 2cf6e81208b721a91dcbf77e047c7f77182dd194 | [
"MIT"
] | null | null | null | import unittest
from unittest import mock
import os
import subprocess
from testfixtures import TempDirectory
from simplegallery.upload.uploader_factory import get_uploader
if __name__ == '__main__':
unittest.main()
| 37.744186 | 103 | 0.646334 | import unittest
from unittest import mock
import os
import subprocess
from testfixtures import TempDirectory
from simplegallery.upload.uploader_factory import get_uploader
class AWSUploaderTestCase(unittest.TestCase):
def test_no_location(self):
uploader = get_uploader('aws')
self.assertFalse(uploader.check_location(''))
@mock.patch('subprocess.run')
def test_upload_gallery(self, subprocess_run):
subprocess_run.return_value = subprocess.CompletedProcess([], returncode=0)
with TempDirectory() as tempdir:
# Setup mock file and uploader
tempdir.write('index.html', b'')
gallery_path = os.path.join(tempdir.path, 'index.html')
uploader = get_uploader('aws')
# Test upload to bucket
uploader.upload_gallery('s3://testbucket/path/', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
# Test upload to bucket without prefix
uploader.upload_gallery('testbucket/path/', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
# Test upload to bucket without trailing /
uploader.upload_gallery('s3://testbucket/path', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
if __name__ == '__main__':
unittest.main()
| 1,266 | 112 | 23 |
d3269fd3713d6d4b1c880425b840be5804eac931 | 154,090 | py | Python | discussion/happiness_dictionary.py | mm5110/PIC16A | e2dab91439c2627f6a47f4bf6d16de8ba5977fe8 | [
"MIT"
] | 10 | 2020-11-07T04:07:34.000Z | 2021-12-31T10:19:12.000Z | discussion/happiness_dictionary.py | mm5110/PIC16A | e2dab91439c2627f6a47f4bf6d16de8ba5977fe8 | [
"MIT"
] | 16 | 2021-02-03T22:35:01.000Z | 2021-05-24T21:28:56.000Z | discussion/happiness_dictionary.py | mm5110/PIC16A | e2dab91439c2627f6a47f4bf6d16de8ba5977fe8 | [
"MIT"
] | 19 | 2020-11-11T05:44:53.000Z | 2022-02-01T14:10:15.000Z |
happiness_dictionary={'laughter':8.5,
'happiness':8.44,
'love':8.42,
'happy':8.3,
'laughed':8.26,
'laugh':8.22,
'laughing':8.2,
'excellent':8.18,
'laughs':8.18,
'joy':8.16,
'successful':8.16,
'win':8.12,
'rainbow':8.1,
'smile':8.1,
'won':8.1,
'pleasure':8.08,
'smiled':8.08,
'rainbows':8.06,
'winning':8.04,
'celebration':8.02,
'enjoyed':8.02,
'healthy':8.02,
'music':8.02,
'celebrating':8,
'congratulations':8,
'weekend':8,
'celebrate':7.98,
'comedy':7.98,
'jokes':7.98,
'rich':7.98,
'victory':7.98,
'christmas':7.96,
'free':7.96,
'friendship':7.96,
'fun':7.96,
'holidays':7.96,
'loved':7.96,
'loves':7.96,
'loving':7.96,
'beach':7.94,
'hahaha':7.94,
'kissing':7.94,
'sunshine':7.94,
'beautiful':7.92,
'delicious':7.92,
'friends':7.92,
'funny':7.92,
'outstanding':7.92,
'paradise':7.92,
'sweetest':7.92,
'vacation':7.92,
'butterflies':7.92,
'freedom':7.9,
'flower':7.88,
'great':7.88,
'sunlight':7.88,
'sweetheart':7.88,
'sweetness':7.88,
'award':7.86,
'chocolate':7.86,
'hahahaha':7.86,
'heaven':7.86,
'peace':7.86,
'splendid':7.86,
'success':7.86,
'enjoying':7.84,
'kissed':7.84,
'attraction':7.82,
'celebrated':7.8,
'hero':7.8,
'hugs':7.8,
'positive':7.8,
'sun':7.8,
'birthday':7.78,
'blessed':7.78,
'fantastic':7.78,
'winner':7.78,
'delight':7.78,
'beauty':7.76,
'butterfly':7.76,
'entertainment':7.76,
'funniest':7.76,
'honesty':7.76,
'sky':7.76,
'smiles':7.76,
'succeed':7.76,
'wonderful':7.76,
'glorious':7.74,
'kisses':7.74,
'promotion':7.74,
'family':7.72,
'gift':7.72,
'humor':7.72,
'romantic':7.72,
'cupcakes':7.7,
'festival':7.7,
'hahahahaha':7.7,
'honour':7.7,
'relax':7.7,
'weekends':7.7,
'angel':7.68,
'b-day':7.68,
'bonus':7.68,
'brilliant':7.68,
'diamonds':7.68,
'holiday':7.68,
'lucky':7.68,
'mother':7.68,
'super':7.68,
'amazing':7.66,
'angels':7.66,
'enjoy':7.66,
'friend':7.66,
'friendly':7.66,
'mother\'s':7.66,
'profit':7.66,
'finest':7.66,
'bday':7.64,
'champion':7.64,
'grandmother':7.64,
'haha':7.64,
'kiss':7.64,
'kitten':7.64,
'miracle':7.64,
'mom':7.64,
'sweet':7.64,
'blessings':7.62,
'bright':7.62,
'cutest':7.62,
'entertaining':7.62,
'excited':7.62,
'excitement':7.62,
'joke':7.62,
'millionaire':7.62,
'prize':7.62,
'succeeded':7.62,
'successfully':7.62,
'winners':7.62,
'shines':7.6,
'awesome':7.6,
'genius':7.6,
'achievement':7.58,
'cake':7.58,
'cheers':7.58,
'exciting':7.58,
'goodness':7.58,
'hug':7.58,
'income':7.58,
'party':7.58,
'puppy':7.58,
'smiling':7.58,
'song':7.58,
'succeeding':7.58,
'tasty':7.58,
'victories':7.58,
'achieved':7.56,
'billion':7.56,
'cakes':7.56,
'easier':7.56,
'flowers':7.56,
'gifts':7.56,
'gold':7.56,
'merry':7.56,
'families':7.54,
'handsome':7.54,
'lovers':7.54,
'affection':7.53,
'candy':7.52,
'cute':7.52,
'diamond':7.52,
'earnings':7.52,
'interesting':7.52,
'peacefully':7.52,
'praise':7.52,
'relaxing':7.52,
'roses':7.52,
'saturdays':7.52,
'faithful':7.51,
'heavens':7.51,
'cherish':7.5,
'comfort':7.5,
'congrats':7.5,
'cupcake':7.5,
'earn':7.5,
'extraordinary':7.5,
'glory':7.5,
'hilarious':7.5,
'moonlight':7.5,
'optimistic':7.5,
'peaceful':7.5,
'romance':7.5,
'feast':7.49,
'attractive':7.48,
'glad':7.48,
'grandma':7.48,
'internet':7.48,
'pleasant':7.48,
'profits':7.48,
'smart':7.48,
'x-mas':7.48,
'babies':7.46,
'cheer':7.46,
'courage':7.46,
'enthusiasm':7.46,
'honest':7.46,
'loyal':7.46,
'opportunities':7.46,
'triumph':7.46,
'wow':7.46,
'jewels':7.46,
'forests':7.45,
'apple':7.44,
'dreams':7.44,
'fantasy':7.44,
'food':7.44,
'honey':7.44,
'miracles':7.44,
'sex':7.44,
'sing':7.44,
'starlight':7.44,
'thankful':7.44,
'wins':7.44,
'achieve':7.42,
'adored':7.42,
'cash':7.42,
'dances':7.42,
'gorgeous':7.42,
'grandchildren':7.42,
'incredible':7.42,
'lunch':7.42,
'mommy':7.42,
'parties':7.42,
'perfect':7.42,
'saturday':7.42,
'surprise':7.42,
'truth':7.42,
'blessing':7.4,
'creative':7.4,
'dinner':7.4,
'kindness':7.4,
'pleased':7.4,
'sexy':7.4,
'strength':7.4,
'thank':7.4,
'thanks':7.4,
'thanksgiving':7.4,
'treasure':7.4,
'valentine':7.4,
'riches':7.39,
'awarded':7.38,
'fabulous':7.38,
'grandfather':7.38,
'heavenly':7.38,
'hope':7.38,
'kids':7.38,
'magical':7.38,
'million':7.38,
'nice':7.38,
'sundays':7.38,
'wealth':7.38,
'fantasies':7.36,
'cares':7.36,
'dance':7.36,
'daughters':7.36,
'favorable':7.36,
'friend\'s':7.36,
'generosity':7.36,
'grateful':7.36,
'inspired':7.36,
'mothers':7.36,
'parents':7.36,
'valentine\'s':7.36,
'intelligent':7.35,
'liberation':7.35,
'melody':7.35,
'wonderland':7.35,
'beloved':7.34,
'caring':7.34,
'homemade':7.34,
'inspiring':7.34,
'movies':7.34,
'precious':7.34,
'respect':7.34,
'satisfaction':7.34,
'satisfy':7.34,
'wedding':7.34,
'accomplished':7.32,
'adorable':7.32,
'championship':7.32,
'comfortable':7.32,
'cuddle':7.32,
'games':7.32,
'grandson':7.32,
'life':7.32,
'lovely':7.32,
'pretty':7.32,
'proud':7.32,
'rose':7.32,
'united':7.32,
'fruits':7.31,
'adventure':7.3,
'couple':7.3,
'dollars':7.3,
'eating':7.3,
'fortune':7.3,
'generous':7.3,
'golden':7.3,
'hahah':7.3,
'hooray':7.3,
'intelligence':7.3,
'lover':7.3,
'luxury':7.3,
'money':7.3,
'passion':7.3,
'prosperity':7.3,
'remarkable':7.3,
'sweetie':7.3,
'valentines':7.3,
'educated':7.29,
'gently':7.29,
'baby':7.28,
'books':7.28,
'bride':7.28,
'cherished':7.28,
'cookies':7.28,
'dessert':7.28,
'employed':7.28,
'glow':7.28,
'god':7.28,
'great-grandchildren':7.28,
'helped':7.28,
'independence':7.28,
'likes':7.28,
'luckily':7.28,
'moon':7.28,
'perfectly':7.28,
'satisfied':7.28,
'sunday':7.28,
'juicy':7.27,
'championships':7.26,
'divine':7.26,
'dreaming':7.26,
'foods':7.26,
'fresh':7.26,
'gladly':7.26,
'greatest':7.26,
'hearts':7.26,
'luck':7.26,
'millions':7.26,
'musicians':7.26,
'play':7.26,
'progress':7.26,
'savings':7.26,
'appreciation':7.24,
'bliss':7.24,
'bloom':7.24,
'book':7.24,
'child':7.24,
'companion':7.24,
'computer':7.24,
'gardens':7.24,
'gentle':7.24,
'hahahah':7.24,
'helpful':7.24,
'impressed':7.24,
'kind':7.24,
'knowledge':7.24,
'liberty':7.24,
'mama':7.24,
'nature':7.24,
'pal':7.24,
'passionate':7.24,
'promoted':7.24,
'reward':7.24,
'warmth':7.24,
'xmas':7.24,
'danced':7.22,
'amazed':7.22,
'appreciate':7.22,
'brother':7.22,
'confidence':7.22,
'darling':7.22,
'encouraging':7.22,
'energy':7.22,
'films':7.22,
'garden':7.22,
'graduated':7.22,
'guitar':7.22,
'health':7.22,
'heart':7.22,
'honor':7.22,
'like':7.22,
'musical':7.22,
'pets':7.22,
'relaxed':7.22,
'salary':7.22,
'star':7.22,
'sweeter':7.22,
'trust':7.22,
'yummy':7.22,
'ecstasy':7.2,
'eternal':7.2,
'approved':7.2,
'benefits':7.2,
'cartoon':7.2,
'comforted':7.2,
'cool':7.2,
'discount':7.2,
'good':7.2,
'google':7.2,
'ladies':7.2,
'libraries':7.2,
'luv':7.2,
'perfection':7.2,
'presents':7.2,
'prizes':7.2,
'special':7.2,
'wishes':7.2,
'alive':7.18,
'awards':7.18,
'bed':7.18,
'best':7.18,
'coffee':7.18,
'comfy':7.18,
'fiesta':7.18,
'genuine':7.18,
'helping':7.18,
'imagine':7.18,
'leisure':7.18,
'meal':7.18,
'promise':7.18,
'respected':7.18,
'rest':7.18,
'travel':7.18,
'abundant':7.16,
'attracted':7.16,
'devoted':7.16,
'favourite':7.16,
'granddaughter':7.16,
'heroes':7.16,
'ideas':7.16,
'liked':7.16,
'oceans':7.16,
'pizza':7.16,
'skies':7.16,
'sleep':7.16,
'spring':7.16,
'sunset':7.16,
'welcome':7.16,
'1st':7.14,
'adoring':7.14,
'brighter':7.14,
'children\'s':7.14,
'cure':7.14,
'fireworks':7.14,
'home':7.14,
'honored':7.14,
'journey':7.14,
'lovin':7.14,
'opportunity':7.14,
'paid':7.14,
'parks':7.14,
'playing':7.14,
'shine':7.14,
'strawberry':7.14,
'summertime':7.14,
'wealthy':7.14,
'appreciated':7.12,
'artistic':7.12,
'birth':7.12,
'children':7.12,
'fruit':7.12,
'inspire':7.12,
'juice':7.12,
'laptop':7.12,
'partners':7.12,
'son':7.12,
'stronger':7.12,
'superman':7.12,
'tree':7.12,
'valuable':7.12,
'woman\'s':7.12,
'women':7.12,
'glowing':7.1,
'admiration':7.1,
'carnival':7.1,
'computers':7.1,
'confident':7.1,
'cookie':7.1,
'cutie':7.1,
'dearest':7.1,
'dream':7.1,
'freely':7.1,
'fridays':7.1,
'plants':7.1,
'quality':7.1,
'rabbit':7.1,
'resort':7.1,
'shopping':7.1,
'sincere':7.1,
'snack':7.1,
'stars':7.1,
'toys':7.1,
'useful':7.1,
'wise':7.1,
'yum':7.1,
'desirable':7.08,
'sparkle':7.08,
'bless':7.08,
'comic':7.08,
'cooking':7.08,
'dancing':7.08,
'earned':7.08,
'equality':7.08,
'faith':7.08,
'graduate':7.08,
'improvements':7.08,
'memories':7.08,
'park':7.08,
'pet':7.08,
'powerful':7.08,
'princess':7.08,
'qualities':7.08,
'thrill':7.08,
'TRUE':7.08,
'wonder':7.08,
'everlasting':7.06,
'mamma':7.06,
'caress':7.06,
'charm':7.06,
'clever':7.06,
'father':7.06,
'grand':7.06,
'hehehe':7.06,
'idea':7.06,
'pearl':7.06,
'pictures':7.06,
'restaurant':7.06,
'sandwich':7.06,
'sharing':7.06,
'strong':7.06,
'talent':7.06,
'talented':7.06,
'tenderness':7.06,
'weddings':7.06,
'dove':7.04,
'awsome':7.04,
'cherry':7.04,
'daughter':7.04,
'eat':7.04,
'favorite':7.04,
'girlfriend':7.04,
'hoping':7.04,
'impressive':7.04,
'loyalty':7.04,
'parent':7.04,
'relationship':7.04,
'safe':7.04,
'scholarship':7.04,
'shining':7.04,
'sunrise':7.04,
'yoga':7.04,
'respects':7.02,
'fairy':7.02,
'humanity':7.02,
'productivity':7.02,
'brave':7.02,
'colours':7.02,
'correct':7.02,
'dad':7.02,
'daddy':7.02,
'dollar':7.02,
'easily':7.02,
'fans':7.02,
'goal':7.02,
'hawaii':7.02,
'honestly':7.02,
'inspiration':7.02,
'olympics':7.02,
'saints':7.02,
'sleeping':7.02,
'wisdom':7.02,
'believed':7,
'better':7,
'color':7,
'colors':7,
'dad\'s':7,
'determination':7,
'discovered':7,
'gentlemen':7,
'girl':7,
'harmony':7,
'hello':7,
'hopes':7,
'noble':7,
'praised':7,
'reliable':7,
'trip':7,
'agreed':6.98,
'approval':6.98,
'brothers':6.98,
'concerts':6.98,
'cooperation':6.98,
'encouraged':6.98,
'giving':6.98,
'goals':6.98,
'ideal':6.98,
'intellectual':6.98,
'invitation':6.98,
'marry':6.98,
'musician':6.98,
'outdoors':6.98,
'photography':6.98,
'plenty':6.98,
'rome':6.98,
'trees':6.98,
'trips':6.98,
'unique':6.98,
'wildlife':6.98,
'lullaby':6.98,
'thrills':6.98,
'abroad':6.96,
'bath':6.96,
'benefit':6.96,
'birds':6.96,
'dads':6.96,
'elegant':6.96,
'eternally':6.96,
'fair':6.96,
'fancy':6.96,
'great-grandfather':6.96,
'imagination':6.96,
'improving':6.96,
'mountains':6.96,
'ocean':6.96,
'pancakes':6.96,
'photograph':6.96,
'praying':6.96,
'present':6.96,
'reunion':6.96,
'safely':6.96,
'saving':6.96,
'singing':6.96,
'songs':6.96,
'sunny':6.96,
'terrific':6.96,
'theater':6.96,
'vanilla':6.96,
'adore':6.96,
'gentleman':6.96,
'autumn':6.94,
'cinema':6.94,
'college':6.94,
'concert':6.94,
'correctly':6.94,
'cozy':6.94,
'dear':6.94,
'earning':6.94,
'earns':6.94,
'gardening':6.94,
'girls':6.94,
'massage':6.94,
'outdoor':6.94,
'photos':6.94,
'piano':6.94,
'sea':6.94,
'trusted':6.94,
'albums':6.92,
'dignity':6.92,
'favored':6.92,
'fitness':6.92,
'game':6.92,
'healing':6.92,
'learned':6.92,
'learning':6.92,
'prayers':6.92,
'promote':6.92,
'secure':6.92,
'spa':6.92,
'unity':6.92,
'wish':6.92,
'youtube':6.92,
'favour':6.92,
'clean':6.9,
'dynamic':6.9,
'encourage':6.9,
'infant':6.9,
'jewelry':6.9,
'necklace':6.9,
'paintings':6.9,
'stability':6.9,
'voyage':6.9,
'worthy':6.9,
'fulfill':6.9,
'eternity':6.9,
'accuracy':6.88,
'bookstores':6.88,
'breeze':6.88,
'bunny':6.88,
'cheese':6.88,
'comics':6.88,
'donated':6.88,
'easter':6.88,
'education':6.88,
'email':6.88,
'farmer':6.88,
'female':6.88,
'flavor':6.88,
'friday':6.88,
'moms':6.88,
'photo':6.88,
'pillow':6.88,
'pure':6.88,
'saved':6.88,
'shakespeare':6.88,
'survived':6.88,
'taste':6.88,
'valued':6.88,
'vitamin':6.88,
'infants':6.88,
'silk':6.88,
'dreamed':6.87,
'\#music':6.86,
'acceptance':6.86,
'banana':6.86,
'breakfast':6.86,
'cooperative':6.86,
'dancer':6.86,
'grace':6.86,
'greatly':6.86,
'guarantee':6.86,
'improved':6.86,
'improvement':6.86,
'independent':6.86,
'liking':6.86,
'paris':6.86,
'pasta':6.86,
'photographs':6.86,
'recipes':6.86,
'relationships':6.86,
'relief':6.86,
'sailing':6.86,
'science':6.86,
'seas':6.86,
'toast':6.86,
'truly':6.86,
'platinum':6.86,
'superstar':6.86,
'understands':6.86,
'accurately':6.84,
'advantage':6.84,
'belonging':6.84,
'buddy':6.84,
'childhood':6.84,
'daylight':6.84,
'discover':6.84,
'forgiveness':6.84,
'great-grandmother':6.84,
'hopefully':6.84,
'horses':6.84,
'interested':6.84,
'kid':6.84,
'live':6.84,
'lol':6.84,
'movie':6.84,
'popularity':6.84,
'solution':6.84,
'swim':6.84,
'toy':6.84,
'understanding':6.84,
'universe':6.84,
'woman':6.84,
'woohoo':6.84,
'rivers':6.84,
'sail':6.84,
'cared':6.83,
'active':6.82,
'artists':6.82,
'babe':6.82,
'believes':6.82,
'born':6.82,
'champagne':6.82,
'compassion':6.82,
'completed':6.82,
'create':6.82,
'dedicated':6.82,
'experienced':6.82,
'fathers':6.82,
'first':6.82,
'gains':6.82,
'heal':6.82,
'new':6.82,
'significant':6.82,
'singer':6.82,
'surprisingly':6.82,
'young':6.82,
'mansion':6.82,
'prevail':6.82,
'qualified':6.81,
'air':6.8,
'amazon':6.8,
'animal':6.8,
'bedroom':6.8,
'camera':6.8,
'cream':6.8,
'dreamer':6.8,
'forgiven':6.8,
'highest':6.8,
'horse':6.8,
'magic':6.8,
'manners':6.8,
'naturally':6.8,
'novels':6.8,
'performers':6.8,
'pies':6.8,
'protect':6.8,
'santa':6.8,
'shared':6.8,
'smooth':6.8,
'together':6.8,
'uncle':6.8,
'efficient':6.8,
'elevated':6.8,
'cafe':6.78,
'coke':6.78,
'completion':6.78,
'coolest':6.78,
'creation':6.78,
'dogs':6.78,
'effectiveness':6.78,
'esteemed':6.78,
'finished':6.78,
'glee':6.78,
'green':6.78,
'heartbeat':6.78,
'island':6.78,
'jukebox':6.78,
'medal':6.78,
'mom\'s':6.78,
'museums':6.78,
'painting':6.78,
'pie':6.78,
'pool':6.78,
'reading':6.78,
'real':6.78,
'ruby':6.78,
'share':6.78,
'sons':6.78,
'traveling':6.78,
'variety':6.78,
'wonders':6.78,
'worth':6.78,
'guaranteed':6.78,
'raindrops':6.78,
'visions':6.78,
'pearls':6.77,
'america':6.76,
'easy':6.76,
'effective':6.76,
'future':6.76,
'humans':6.76,
'intimate':6.76,
'married':6.76,
'muffin':6.76,
'papa':6.76,
'plus':6.76,
'popcorn':6.76,
'savior':6.76,
'seasons':6.76,
'shop':6.76,
'sister':6.76,
'style':6.76,
'supporter':6.76,
'switzerland':6.76,
'tenderly':6.76,
'top':6.76,
'oxygen':6.76,
'rhyme':6.76,
'allright':6.74,
'american':6.74,
'artist':6.74,
'capable':6.74,
'complete':6.74,
'convenient':6.74,
'courtesy':6.74,
'donate':6.74,
'drinks':6.74,
'father\'s':6.74,
'fine':6.74,
'focused':6.74,
'guitars':6.74,
'hi':6.74,
'integrity':6.74,
'justice':6.74,
'lake':6.74,
'mankind':6.74,
'mentor':6.74,
'merit':6.74,
'performance':6.74,
'plant':6.74,
'prepared':6.74,
'raise':6.74,
'romeo':6.74,
'shiny':6.74,
'sugar':6.74,
'surprising':6.74,
'technology':6.74,
'treat':6.74,
'university':6.74,
'wishing':6.74,
'yes':6.74,
'desires':6.73,
'wished':6.73,
'4-bedroom':6.72,
'attract':6.72,
'bike':6.72,
'car':6.72,
'civilization':6.72,
'classy':6.72,
'confirmed':6.72,
'costumes':6.72,
'creating':6.72,
'culture':6.72,
'finish':6.72,
'gallery':6.72,
'knowing':6.72,
'lifelong':6.72,
'momma':6.72,
'neat':6.72,
'niece':6.72,
'online':6.72,
'orchestra':6.72,
'plays':6.72,
'revenue':6.72,
'shower':6.72,
'spiritual':6.72,
'surprised':6.72,
'tremendous':6.72,
'values':6.72,
'villages':6.72,
'warm':6.72,
'doggy':6.71,
'hallelujah':6.71,
'candle':6.71,
'secured':6.71,
'valid':6.71,
'agree':6.7,
'anniversary':6.7,
'antiques':6.7,
'believe':6.7,
'bucks':6.7,
'cruise':6.7,
'dancers':6.7,
'dine':6.7,
'dog':6.7,
'florida':6.7,
'grandsons':6.7,
'grants':6.7,
'hired':6.7,
'learn':6.7,
'marriage':6.7,
'mum':6.7,
'partner':6.7,
'productive':6.7,
'rockin':6.7,
'teaches':6.7,
'treats':6.7,
'tv':6.7,
'water':6.7,
'grin':6.69,
'invention':6.69,
'virtues':6.69,
'brains':6.69,
'sensation':6.68,
'ability':6.68,
'ace':6.68,
'animals':6.68,
'bake':6.68,
'bridegroom':6.68,
'desire':6.68,
'famous':6.68,
'forest':6.68,
'fountain':6.68,
'goodmorning':6.68,
'greater':6.68,
'grow':6.68,
'heritage':6.68,
'landscape':6.68,
'liberties':6.68,
'living':6.68,
'lyrics':6.68,
'mercy':6.68,
'museum':6.68,
'novel':6.68,
'palace':6.68,
'pianist':6.68,
'potential':6.68,
'power':6.68,
'privilege':6.68,
'proceed':6.68,
'promised':6.68,
'river':6.68,
'scotland':6.68,
'shares':6.68,
'skating':6.68,
'thanx':6.68,
'theatre':6.68,
'tours':6.68,
'well':6.68,
'acceptable':6.67,
'possibilities':6.67,
'accurate':6.67,
'candles':6.67,
'approve':6.66,
'assets':6.66,
'aunt':6.66,
'career':6.66,
'charms':6.66,
'communicate':6.66,
'competent':6.66,
'currency':6.66,
'dedication':6.66,
'dvd':6.66,
'eligible':6.66,
'fan':6.66,
'firefighters':6.66,
'greet':6.66,
'motivation':6.66,
'nieces':6.66,
'personality':6.66,
'powers':6.66,
'raises':6.66,
'sculpture':6.66,
'survivors':6.66,
'tea':6.66,
'television':6.66,
'tour':6.66,
'pony':6.65,
'rhythm':6.65,
'bird':6.64,
'care':6.64,
'cat':6.64,
'cook':6.64,
'corn':6.64,
'deposits':6.64,
'expert':6.64,
'high':6.64,
'holy':6.64,
'invite':6.64,
'leading':6.64,
'photographer':6.64,
'picture':6.64,
'promising':6.64,
'recover':6.64,
'recovered':6.64,
'recovery':6.64,
'salad':6.64,
'shops':6.64,
'solutions':6.64,
'sparks':6.64,
'sport':6.64,
'supreme':6.64,
'theaters':6.64,
'tunes':6.64,
'unite':6.64,
'volunteers':6.64,
'simplicity':6.62,
'attained':6.62,
'book\'s':6.62,
'cameras':6.62,
'chatting':6.62,
'crown':6.62,
'disney':6.62,
'dresses':6.62,
'heartfelt':6.62,
'homes':6.62,
'husband':6.62,
'immortal':6.62,
'invest':6.62,
'kitty':6.62,
'offer':6.62,
'organized':6.62,
'performances':6.62,
'perfume':6.62,
'pray':6.62,
'rescue':6.62,
'restaurants':6.62,
'salaries':6.62,
'sisters':6.62,
'slept':6.62,
'steak':6.62,
'stories':6.62,
'varieties':6.62,
'vision':6.62,
'wife':6.62,
'youth':6.62,
'zoo':6.62,
'stimulation':6.61,
'touching':6.61,
'furnished':6.6,
'suitable':6.6,
'album':6.6,
'amour':6.6,
'art':6.6,
'beam':6.6,
'captain':6.6,
'certainty':6.6,
'child\'s':6.6,
'clothing':6.6,
'conservation':6.6,
'desired':6.6,
'dress':6.6,
'favorited':6.6,
'females':6.6,
'growth':6.6,
'helps':6.6,
'highly':6.6,
'ideals':6.6,
'lady':6.6,
'lime':6.6,
'popular':6.6,
'proposal':6.6,
'protected':6.6,
'relatives':6.6,
'rhymes':6.6,
'singers':6.6,
'specialty':6.6,
'spirit':6.6,
'starry':6.6,
'stroll':6.6,
'supported':6.6,
'therapeutic':6.6,
'unlimited':6.6,
'visiting':6.6,
'expressions':6.6,
'efficiency':6.59,
'sleeps':6.59,
'vocals':6.59,
'impress':6.58,
'sympathetic':6.58,
'advance':6.58,
'advanced':6.58,
'arts':6.58,
'available':6.58,
'baking':6.58,
'classic':6.58,
'classical':6.58,
'colour':6.58,
'drawing':6.58,
'english':6.58,
'exhibition':6.58,
'expecting':6.58,
'fish':6.58,
'goodnight':6.58,
'invented':6.58,
'islands':6.58,
'language':6.58,
'majesty':6.58,
'me':6.58,
'preferred':6.58,
'radio':6.58,
'ready':6.58,
'relative':6.58,
'sale':6.58,
'solve':6.58,
'springs':6.58,
'student':6.58,
'symphony':6.58,
'traditions':6.58,
'understood':6.58,
'upgrade':6.58,
'usa':6.58,
'saviour':6.57,
'skill':6.57,
'belonged':6.56,
'muscles':6.56,
'able':6.56,
'ahaha':6.56,
'butter':6.56,
'circus':6.56,
'cosmic':6.56,
'coupon':6.56,
'diploma':6.56,
'donations':6.56,
'e-mail':6.56,
'encore':6.56,
'film':6.56,
'guidance':6.56,
'illustration':6.56,
'increase':6.56,
'international':6.56,
'ipod':6.56,
'morning':6.56,
'natural':6.56,
'okay':6.56,
'preservation':6.56,
'progressive':6.56,
'protection':6.56,
'raised':6.56,
'showers':6.56,
'tacos':6.56,
'teach':6.56,
'traveler':6.56,
'understand':6.56,
'universities':6.56,
'worldwide':6.56,
'privileges':6.55,
'accepted':6.54,
'adoption':6.54,
'asset':6.54,
'blanket':6.54,
'cats':6.54,
'cleaned':6.54,
'coin':6.54,
'cooked':6.54,
'crystal':6.54,
'dawn':6.54,
'dearly':6.54,
'discovery':6.54,
'done':6.54,
'eager':6.54,
'emails':6.54,
'exercises':6.54,
'found':6.54,
'give':6.54,
'groovy':6.54,
'haven':6.54,
'invited':6.54,
'iphone':6.54,
'moral':6.54,
'nephew':6.54,
'orange':6.54,
'overcome':6.54,
'pays':6.54,
'potato':6.54,
'premiere':6.54,
'pride':6.54,
'receiving':6.54,
'recognition':6.54,
'reindeer':6.54,
'right':6.54,
'rising':6.54,
'save':6.54,
'scholars':6.54,
'shelter':6.54,
'solar':6.54,
'spontaneous':6.54,
'tasting':6.54,
'ultimate':6.54,
'visit':6.54,
'advantages':6.53,
'sailed':6.53,
'feather':6.52,
'ambitious':6.52,
'baker':6.52,
'brain':6.52,
'champ':6.52,
'communication':6.52,
'compensation':6.52,
'ease':6.52,
'ethics':6.52,
'extra':6.52,
'fries':6.52,
'growing':6.52,
'guest':6.52,
'incredibly':6.52,
'initiative':6.52,
'jesus':6.52,
'lips':6.52,
'literature':6.52,
'nights':6.52,
'phenomenon':6.52,
'planet':6.52,
'poem':6.52,
'poet':6.52,
'prefer':6.52,
'read':6.52,
'sang':6.52,
'soup':6.52,
'surf':6.52,
'swimming':6.52,
'videos':6.52,
'wings':6.52,
'world':6.52,
'amore':6.51,
'bounce':6.51,
'cultures':6.51,
'eden':6.51,
'interaction':6.51,
'mercedes':6.51,
'velvet':6.51,
'balanced':6.51,
'agriculture':6.5,
'allies':6.5,
'americans':6.5,
'bells':6.5,
'chips':6.5,
'contribute':6.5,
'couples':6.5,
'cousins':6.5,
'deals':6.5,
'determined':6.5,
'eaten':6.5,
'fame':6.5,
'gives':6.5,
'hire':6.5,
'innocence':6.5,
'ipad':6.5,
'leadership':6.5,
'legend':6.5,
'lounge':6.5,
'mature':6.5,
'newest':6.5,
'newly':6.5,
'performing':6.5,
'receive':6.5,
'recipe':6.5,
'roast':6.5,
'starting':6.5,
'stunning':6.5,
'tales':6.5,
'elder':6.49,
'grows':6.49,
'herb':6.49,
'illustrations':6.49,
'rays':6.49,
'relevant':6.49,
'sanity':6.49,
'acoustic':6.48,
'always':6.48,
'answers':6.48,
'bible':6.48,
'boost':6.48,
'clap':6.48,
'dining':6.48,
'electronics':6.48,
'exclusive':6.48,
'family\'s':6.48,
'gathering':6.48,
'hehe':6.48,
'humble':6.48,
'information':6.48,
'italian':6.48,
'library':6.48,
'mate':6.48,
'modern':6.48,
'offers':6.48,
'paperbacks':6.48,
'perform':6.48,
'poems':6.48,
'potatoes':6.48,
'prayer':6.48,
'pumpkin':6.48,
'restored':6.48,
'rights':6.48,
'scholar':6.48,
'screenplay':6.48,
'shopper':6.48,
'sings':6.48,
'soft':6.48,
'starbucks':6.48,
'story':6.48,
'supporting':6.48,
'video':6.48,
'instrumental':6.48,
'backyard':6.47,
'drums':6.47,
'virtue':6.47,
'activities':6.46,
'athletic':6.46,
'clothes':6.46,
'cultivated':6.46,
'forever':6.46,
'goods':6.46,
'grass':6.46,
'higher':6.46,
'literary':6.46,
'london':6.46,
'memory':6.46,
'mint':6.46,
'nephews':6.46,
'prime':6.46,
'prospect':6.46,
'reception':6.46,
'recommended':6.46,
'research':6.46,
'resource':6.46,
'resources':6.46,
'riverside':6.46,
'rocking':6.46,
'scored':6.46,
'talking':6.46,
'believer':6.46,
'functioning':6.46,
'poets':6.46,
'boats':6.45,
'remedy':6.45,
'tender':6.45,
'aaah':6.44,
'beatles':6.44,
'chance':6.44,
'coast':6.44,
'draw':6.44,
'earth':6.44,
'eats':6.44,
'effectively':6.44,
'familiar':6.44,
'fast':6.44,
'forgive':6.44,
'gained':6.44,
'graphics':6.44,
'improve':6.44,
'increases':6.44,
'infinite':6.44,
'languages':6.44,
'likely':6.44,
'nap':6.44,
'philosophy':6.44,
'phone':6.44,
'prince':6.44,
'princes':6.44,
'professional':6.44,
'revival':6.44,
'rice':6.44,
'rides':6.44,
'satisfactory':6.44,
'scientific':6.44,
'scoring':6.44,
'sis':6.44,
'soccer':6.44,
'supermarkets':6.44,
'support':6.44,
'teachers':6.44,
'teaching':6.44,
'wage':6.44,
'whale':6.44,
'wink':6.44,
'wit':6.44,
'accept':6.42,
'assist':6.42,
'band':6.42,
'chat':6.42,
'composer':6.42,
'contribution':6.42,
'cousin':6.42,
'curves':6.42,
'dates':6.42,
'delivered':6.42,
'environmental':6.42,
'evening':6.42,
'feed':6.42,
'fest':6.42,
'gaming':6.42,
'india':6.42,
'interests':6.42,
'jazz':6.42,
'novelist':6.42,
'panties':6.42,
'partnership':6.42,
'party\'s':6.42,
'portrait':6.42,
'remember':6.42,
'residence':6.42,
'shore':6.42,
'simply':6.42,
'stream':6.42,
'traveled':6.42,
'wine':6.42,
'wondered':6.42,
'farming':6.42,
'hats':6.41,
'hearted':6.41,
'1980s':6.4,
'actress':6.4,
'adopt':6.4,
'altogether':6.4,
'architecture':6.4,
'australia':6.4,
'baked':6.4,
'buying':6.4,
'ceremony':6.4,
'charity':6.4,
'chicken':6.4,
'chorus':6.4,
'consciousness':6.4,
'cultivation':6.4,
'dating':6.4,
'deserve':6.4,
'destination':6.4,
'documentary':6.4,
'drawings':6.4,
'educational':6.4,
'electronic':6.4,
'equally':6.4,
'europe':6.4,
'floating':6.4,
'futures':6.4,
'gain':6.4,
'generations':6.4,
'gmail':6.4,
'hills':6.4,
'increasing':6.4,
'kidding':6.4,
'launch':6.4,
'light':6.4,
'mountain':6.4,
'participate':6.4,
'pics':6.4,
'playin':6.4,
'poetry':6.4,
'possibility':6.4,
'provide':6.4,
'resolved':6.4,
'shores':6.4,
'studies':6.4,
'summer':6.4,
'tennis':6.4,
'touch':6.4,
'touched':6.4,
'tradition':6.4,
'twins':6.4,
'visits':6.4,
'wages':6.4,
'waves':6.4,
'willing':6.4,
'younger':6.4,
'exercised':6.39,
'enabled':6.39,
'greeks':6.39,
'purely':6.39,
'seeds':6.39,
'sixteen':6.39,
'softly':6.39,
'cradle':6.38,
'80\'s':6.38,
'americas':6.38,
'arose':6.38,
'bigger':6.38,
'boyfriend':6.38,
'breath':6.38,
'committed':6.38,
'contributing':6.38,
'craft':6.38,
'designers':6.38,
'development':6.38,
'distinction':6.38,
'faster':6.38,
'functional':6.38,
'giveaway':6.38,
'increased':6.38,
'lamb':6.38,
'leader':6.38,
'lottery':6.38,
'maximum':6.38,
'meet':6.38,
'neighborhood':6.38,
'ownership':6.38,
'painter':6.38,
'played':6.38,
'preserve':6.38,
'purchased':6.38,
'queens':6.38,
'reasonable':6.38,
'revenues':6.38,
'rocket':6.38,
'sails':6.38,
'saves':6.38,
'score':6.38,
'seeing':6.38,
'silver':6.38,
'skills':6.38,
'sung':6.38,
'tasted':6.38,
'tastes':6.38,
'thinks':6.38,
'thought':6.38,
'touches':6.38,
'we':6.38,
'agricultural':6.38,
'belle':6.37,
'explore':6.37,
'sketch':6.37,
'voluntary':6.37,
'acquire':6.36,
'april':6.36,
'architect':6.36,
'broadway':6.36,
'calm':6.36,
'climbed':6.36,
'colleagues':6.36,
'curious':6.36,
'definite':6.36,
'democracy':6.36,
'deposit':6.36,
'developed':6.36,
'distinguished':6.36,
'dressed':6.36,
'drink':6.36,
'employment':6.36,
'farms':6.36,
'fashion':6.36,
'gravy':6.36,
'guiding':6.36,
'imagined':6.36,
'innocent':6.36,
'instantly':6.36,
'interest':6.36,
'justified':6.36,
'logical':6.36,
'mail':6.36,
'maintained':6.36,
'mario':6.36,
'mobile':6.36,
'mp3':6.36,
'obtained':6.36,
'original':6.36,
'patience':6.36,
'performed':6.36,
'please':6.36,
'prayed':6.36,
'rain':6.36,
'rational':6.36,
'relation':6.36,
'rings':6.36,
'rise':6.36,
'rudolph':6.36,
'teacher':6.36,
'technologies':6.36,
'value':6.36,
'vegas':6.36,
'volunteer':6.36,
'wifi':6.36,
'revealed':6.35,
'branches':6.35,
'existed':6.35,
'spotlight':6.35,
'bread':6.34,
'castle':6.34,
'cheddar':6.34,
'clouds':6.34,
'clubs':6.34,
'colleges':6.34,
'completely':6.34,
'connected':6.34,
'december':6.34,
'dew':6.34,
'employ':6.34,
'exists':6.34,
'expedition':6.34,
'experience':6.34,
'farmers':6.34,
'firefox':6.34,
'football':6.34,
'grant':6.34,
'hiring':6.34,
'hollywood':6.34,
'house':6.34,
'illustrated':6.34,
'images':6.34,
'jeans':6.34,
'largest':6.34,
'linguistic':6.34,
'lord':6.34,
'purchase':6.34,
'received':6.34,
'released':6.34,
'saint':6.34,
'scientists':6.34,
'september':6.34,
'soon':6.34,
'soul':6.34,
'soundtrack':6.34,
'studio':6.34,
'tickets':6.34,
'wave':6.34,
'continuity':6.33,
'equilibrium':6.33,
'activity':6.32,
'agreement':6.32,
'amor':6.32,
'arrival':6.32,
'arrive':6.32,
'asian':6.32,
'bbq':6.32,
'bedtime':6.32,
'berry':6.32,
'brunch':6.32,
'commitment':6.32,
'date':6.32,
'deal':6.32,
'democratic':6.32,
'design':6.32,
'designer':6.32,
'devotion':6.32,
'experiences':6.32,
'fly':6.32,
'foxy':6.32,
'france':6.32,
'handy':6.32,
'importance':6.32,
'important':6.32,
'jamaica':6.32,
'jobs':6.32,
'june':6.32,
'kin':6.32,
'lights':6.32,
'mornings':6.32,
'newspaper':6.32,
'offering':6.32,
'organic':6.32,
'parade':6.32,
'pink':6.32,
'published':6.32,
'reader':6.32,
'remembered':6.32,
'resolve':6.32,
'ring':6.32,
'rofl':6.32,
'selected':6.32,
'snow':6.32,
'streams':6.32,
'sufficient':6.32,
'sufficiently':6.32,
'sure':6.32,
'universal':6.32,
'unlocked':6.32,
'visitors':6.32,
'waters':6.32,
'women\'s':6.32,
'worship':6.32,
'writers':6.32,
'assembled':6.31,
'chickens':6.31,
'wheat':6.31,
'connections':6.31,
'scent':6.31,
'volumes':6.31,
'whistle':6.31,
'absolutely':6.3,
'atmosphere':6.3,
'belongs':6.3,
'bought':6.3,
'chess':6.3,
'christian':6.3,
'clear':6.3,
'clearer':6.3,
'commonwealth':6.3,
'conversations':6.3,
'designed':6.3,
'downloaded':6.3,
'earrings':6.3,
'engineer':6.3,
'epic':6.3,
'exercise':6.3,
'expansion':6.3,
'feeding':6.3,
'flowing':6.3,
'headphones':6.3,
'indians':6.3,
'joined':6.3,
'lipstick':6.3,
'metropolitan':6.3,
'mine':6.3,
'myself':6.3,
'paint':6.3,
'painted':6.3,
'plane':6.3,
'produced':6.3,
'protecting':6.3,
'reasoning':6.3,
'relations':6.3,
'salvation':6.3,
'sciences':6.3,
'sense':6.3,
'software':6.3,
'suite':6.3,
'surplus':6.3,
'swing':6.3,
'visited':6.3,
'cheeks':6.29,
'observation':6.29,
'calcium':6.29,
'conceived':6.29,
'rum':6.29,
'amigo':6.28,
'babes':6.28,
'begin':6.28,
'breathe':6.28,
'bridegroom\'s':6.28,
'buy':6.28,
'community':6.28,
'cooler':6.28,
'country':6.28,
'disco':6.28,
'emerging':6.28,
'england':6.28,
'experts':6.28,
'fairly':6.28,
'fix':6.28,
'founded':6.28,
'globe':6.28,
'honorary':6.28,
'hoped':6.28,
'introduced':6.28,
'lead':6.28,
'listening':6.28,
'lots':6.28,
'market':6.28,
'monkey':6.28,
'olympic':6.28,
'pioneer':6.28,
'plaza':6.28,
'professionals':6.28,
'reflect':6.28,
'remembering':6.28,
'reputation':6.28,
'sentimental':6.28,
'skype':6.28,
'students':6.28,
'sweden':6.28,
'technological':6.28,
'themes':6.28,
'thinking':6.28,
'tips':6.28,
'vehicles':6.28,
'village':6.28,
'virginia':6.28,
'website':6.28,
'white':6.28,
'wines':6.28,
'reasonably':6.27,
'uptown':6.27,
'aims':6.27,
'observe':6.27,
'regards':6.27,
'allows':6.26,
'appropriate':6.26,
'australian':6.26,
'blackberry':6.26,
'breathing':6.26,
'camp':6.26,
'cars':6.26,
'considerable':6.26,
'costume':6.26,
'degree':6.26,
'develop':6.26,
'egypt':6.26,
'events':6.26,
'flag':6.26,
'gave':6.26,
'gods':6.26,
'gr8':6.26,
'hotels':6.26,
'human':6.26,
'indian':6.26,
'leap':6.26,
'lifetime':6.26,
'magnetic':6.26,
'mirror':6.26,
'mmmm':6.26,
'occasion':6.26,
'produce':6.26,
'prominent':6.26,
'promises':6.26,
'proved':6.26,
'raising':6.26,
'school':6.26,
'shirt':6.26,
'spark':6.26,
'surely':6.26,
'team':6.26,
'travelers':6.26,
'upcoming':6.26,
'us':6.26,
'valley':6.26,
'vintage':6.26,
'proteins':6.25,
'almighty':6.24,
'horizon':6.24,
'insight':6.24,
'ooooh':6.24,
'poetic':6.24,
'spirits':6.24,
'aboard':6.24,
'acknowledge':6.24,
'actors':6.24,
'advances':6.24,
'aid':6.24,
'answer':6.24,
'athletes':6.24,
'bowling':6.24,
'boy':6.24,
'built':6.24,
'choice':6.24,
'constitution':6.24,
'conversation':6.24,
'cowboy':6.24,
'day':6.24,
'deliver':6.24,
'developments':6.24,
'distinctive':6.24,
'dvds':6.24,
'edison':6.24,
'eighteen':6.24,
'enterprise':6.24,
'eyes':6.24,
'flying':6.24,
'grad':6.24,
'grammy':6.24,
'grill':6.24,
'halloween':6.24,
'holland':6.24,
'jelly':6.24,
'jingle':6.24,
'legitimate':6.24,
'making':6.24,
'more':6.24,
'options':6.24,
'possible':6.24,
'practical':6.24,
'proceeds':6.24,
'proposed':6.24,
'provides':6.24,
'queen':6.24,
'revolutionary':6.24,
'rises':6.24,
'samsung':6.24,
'self':6.24,
'show':6.24,
'sooner':6.24,
'speed':6.24,
'strategy':6.24,
'tale':6.24,
'tip':6.24,
'updating':6.24,
'vip':6.24,
'websites':6.24,
'worlds':6.24,
'writing':6.24,
'xbox':6.24,
'you':6.24,
'yours':6.24,
'yourself':6.24,
'collective':6.23,
'embrace':6.22,
'produces':6.22,
'meanings':6.22,
'accompanied':6.22,
'advice':6.22,
'all':6.22,
'answered':6.22,
'architectural':6.22,
'asia':6.22,
'authors':6.22,
'avid':6.22,
'batman':6.22,
'big':6.22,
'breast':6.22,
'bro':6.22,
'build':6.22,
'chef':6.22,
'clowns':6.22,
'contacts':6.22,
'contributions':6.22,
'cotton':6.22,
'cowboys':6.22,
'decent':6.22,
'designs':6.22,
'downloading':6.22,
'environment':6.22,
'evolution':6.22,
'farm':6.22,
'finishing':6.22,
'fit':6.22,
'foundations':6.22,
'full':6.22,
'guys':6.22,
'instrument':6.22,
'join':6.22,
'karma':6.22,
'knight':6.22,
'lives':6.22,
'logic':6.22,
'milk':6.22,
'most':6.22,
'neon':6.22,
'night':6.22,
'package':6.22,
'participation':6.22,
'penny':6.22,
'pregnant':6.22,
'properly':6.22,
'quest':6.22,
'restoration':6.22,
'seventeen':6.22,
'social':6.22,
'styles':6.22,
'supports':6.22,
'tech':6.22,
'thai':6.22,
'thoughts':6.22,
'today':6.22,
'transformation':6.22,
'treaty':6.22,
'tribute':6.22,
'aesthetic':6.21,
'upside':6.21,
'behold':6.2,
'dough':6.2,
'sands':6.2,
'3-bedroom':6.2,
'actor':6.2,
'agreements':6.2,
'arise':6.2,
'assured':6.2,
'bubble':6.2,
'cereal':6.2,
'definitely':6.2,
'dime':6.2,
'engage':6.2,
'erected':6.2,
'estate':6.2,
'ethical':6.2,
'everybody':6.2,
'faces':6.2,
'feeds':6.2,
'haircut':6.2,
'halo':6.2,
'jacket':6.2,
'joining':6.2,
'kingdom':6.2,
'lifted':6.2,
'listened':6.2,
'meat':6.2,
'menu':6.2,
'nurse':6.2,
'opening':6.2,
'pension':6.2,
'phd':6.2,
'phones':6.2,
'plans':6.2,
'premier':6.2,
'proposals':6.2,
'protein':6.2,
'providence':6.2,
'recommendations':6.2,
'sexual':6.2,
'soda':6.2,
'spain':6.2,
'stable':6.2,
'succession':6.2,
'supporters':6.2,
'taco':6.2,
'think':6.2,
'trading':6.2,
'upward':6.2,
'yields':6.2,
'sailor':6.19,
'dynamics':6.19,
'lyrical':6.19,
'copper':6.18,
'realise':6.18,
'righteous':6.18,
'transformed':6.18,
'venus':6.18,
'80s':6.18,
'advocates':6.18,
'aha':6.18,
'ate':6.18,
'atlantic':6.18,
'awareness':6.18,
'balance':6.18,
'blonde':6.18,
'burger':6.18,
'buyer':6.18,
'certificate':6.18,
'chances':6.18,
'chief':6.18,
'clearly':6.18,
'cultural':6.18,
'draws':6.18,
'driving':6.18,
'duck':6.18,
'eagle':6.18,
'emotions':6.18,
'established':6.18,
'experiments':6.18,
'expression':6.18,
'fishing':6.18,
'fri':6.18,
'fully':6.18,
'informed':6.18,
'initiated':6.18,
'italy':6.18,
'king':6.18,
'land':6.18,
'lion':6.18,
'miami':6.18,
'midnight':6.18,
'mineral':6.18,
'nomination':6.18,
'oak':6.18,
'occasions':6.18,
'philosophical':6.18,
'playlist':6.18,
'profound':6.18,
'provided':6.18,
'resolution':6.18,
'riding':6.18,
'safety':6.18,
'scientist':6.18,
'she':6.18,
'sight':6.18,
'spice':6.18,
'steady':6.18,
'survey':6.18,
'swiss':6.18,
't-shirt':6.18,
'tiger':6.18,
'tomorrow':6.18,
'tourist':6.18,
'tournament':6.18,
'trade':6.18,
'trains':6.18,
'tune':6.18,
'victor':6.18,
'walking':6.18,
'wireless':6.18,
'www':6.18,
'yea':6.18,
'beds':6.17,
'preference':6.17,
'applying':6.16,
'crop':6.16,
'enable':6.16,
'interactions':6.16,
'narrative':6.16,
'railway':6.16,
'afford':6.16,
'allowing':6.16,
'automobile':6.16,
'bands':6.16,
'boys':6.16,
'cds':6.16,
'christ':6.16,
'dictionary':6.16,
'downloads':6.16,
'eagles':6.16,
'engaged':6.16,
'especially':6.16,
'fiction':6.16,
'grocery':6.16,
'hotel':6.16,
'houses':6.16,
'hubby':6.16,
'included':6.16,
'lemon':6.16,
'mellow':6.16,
'minds':6.16,
'my':6.16,
'own':6.16,
'pacific':6.16,
'people':6.16,
'planning':6.16,
'polish':6.16,
'premium':6.16,
'providing':6.16,
'readers':6.16,
'rocked':6.16,
'sausage':6.16,
'south':6.16,
'transportation':6.16,
'turkey':6.16,
'wed':6.16,
'wheels':6.16,
'woods':6.16,
'yacht':6.16,
'livin':6.15,
'believing':6.14,
'chemistry':6.14,
'continuous':6.14,
'persons':6.14,
'seed':6.14,
'sheep':6.14,
'successive':6.14,
'adult':6.14,
'amsterdam':6.14,
'arises':6.14,
'arrived':6.14,
'asleep':6.14,
'aviation':6.14,
'basketball':6.14,
'browser':6.14,
'cathedral':6.14,
'cd':6.14,
'cheek':6.14,
'combination':6.14,
'conscious':6.14,
'cricket':6.14,
'debut':6.14,
'dividends':6.14,
'drinking':6.14,
'elizabeth':6.14,
'eye':6.14,
'generate':6.14,
'granted':6.14,
'guests':6.14,
'huge':6.14,
'jumping':6.14,
'kindle':6.14,
'launches':6.14,
'mend':6.14,
'models':6.14,
'mutual':6.14,
'offered':6.14,
'places':6.14,
'plan':6.14,
'principles':6.14,
'recovering':6.14,
'respectively':6.14,
'restore':6.14,
'ride':6.14,
'rock':6.14,
'shirts':6.14,
'sony':6.14,
'strategies':6.14,
'strongly':6.14,
'temple':6.14,
'thousands':6.14,
'tonight':6.14,
'trail':6.14,
'twin':6.14,
'up':6.14,
'updates':6.14,
'vagina':6.14,
'yahoo':6.14,
'receives':6.13,
'exclusively':6.12,
'writings':6.12,
'destiny':6.12,
'outcomes':6.12,
'quicker':6.12,
'boulevard':6.12,
'chapels':6.12,
'consideration':6.12,
'digital':6.12,
'dish':6.12,
'eat-in':6.12,
'ensure':6.12,
'event':6.12,
'everyone':6.12,
'face':6.12,
'focus':6.12,
'funds':6.12,
'garlic':6.12,
'investing':6.12,
'keyboard':6.12,
'knows':6.12,
'leaf':6.12,
'males':6.12,
'maps':6.12,
'masters':6.12,
'networking':6.12,
'nursing':6.12,
'patiently':6.12,
'proceeded':6.12,
'proceeding':6.12,
'profession':6.12,
'robot':6.12,
'snowing':6.12,
'studied':6.12,
'study':6.12,
'theme':6.12,
'toward':6.12,
'traditional':6.12,
'treasurer':6.12,
'university\'s':6.12,
'v-day':6.12,
'very':6.12,
'voted':6.12,
'wii':6.12,
'waving':6.11,
'extending':6.1,
'readily':6.1,
'mirrors':6.1,
'nearer':6.1,
'nurses':6.1,
'preserved':6.1,
'senses':6.1,
'aah':6.1,
'acknowledged':6.1,
'beers':6.1,
'bentley':6.1,
'brazil':6.1,
'cattle':6.1,
'challenging':6.1,
'check':6.1,
'chili':6.1,
'citizens':6.1,
'collection':6.1,
'comprehend':6.1,
'customers':6.1,
'elected':6.1,
'electricity':6.1,
'enters':6.1,
'essence':6.1,
'fab':6.1,
'forthcoming':6.1,
'forward':6.1,
'guide':6.1,
'herself':6.1,
'increasingly':6.1,
'info':6.1,
'investments':6.1,
'justification':6.1,
'karaoke':6.1,
'keeping':6.1,
'know':6.1,
'launched':6.1,
'life\'s':6.1,
'madame':6.1,
'markets':6.1,
'moments':6.1,
'nike':6.1,
'november':6.1,
'open':6.1,
'oscar':6.1,
'owner':6.1,
'practically':6.1,
'precise':6.1,
'release':6.1,
'romans':6.1,
'security':6.1,
'shade':6.1,
'shoulders':6.1,
'soap':6.1,
'springfield':6.1,
'start':6.1,
'telecommunications':6.1,
'tomorrow\'s':6.1,
'trinity':6.1,
'western':6.1,
'window':6.1,
'woof':6.1,
'yay':6.1,
'roam':6.09,
'dawning':6.08,
'choir':6.08,
'crops':6.08,
'elvis':6.08,
'significance':6.08,
'throne':6.08,
'velocity':6.08,
'acquainted':6.08,
'ahead':6.08,
'alright':6.08,
'audiences':6.08,
'ball':6.08,
'belief':6.08,
'bff':6.08,
'boat':6.08,
'boots':6.08,
'california':6.08,
'centuries':6.08,
'cheaper':6.08,
'clue':6.08,
'coat':6.08,
'consensus':6.08,
'contact':6.08,
'deserved':6.08,
'drive':6.08,
'facebook':6.08,
'freelance':6.08,
'greek':6.08,
'grown':6.08,
'help':6.08,
'housing':6.08,
'instant':6.08,
'integrated':6.08,
'introduction':6.08,
'legit':6.08,
'ma':6.08,
'message':6.08,
'negotiate':6.08,
'neighbor':6.08,
'neighborhoods':6.08,
'numerous':6.08,
'our':6.08,
'oven':6.08,
'picked':6.08,
'reached':6.08,
'recognize':6.08,
'recognized':6.08,
'rider':6.08,
'shows':6.08,
'significantly':6.08,
'specialist':6.08,
'suggestions':6.08,
'superior':6.08,
'tempo':6.08,
'tourists':6.08,
'ups':6.08,
'validity':6.08,
'vehicle':6.08,
'votes':6.08,
'theories':6.06,
'associations':6.06,
'attachment':6.06,
'fluid':6.06,
'shells':6.06,
'1970s':6.06,
'adults':6.06,
'advocacy':6.06,
'bella':6.06,
'brazilian':6.06,
'bueno':6.06,
'certain':6.06,
'certainly':6.06,
'combinations':6.06,
'composed':6.06,
'composition':6.06,
'couch':6.06,
'created':6.06,
'creek':6.06,
'dimes':6.06,
'distinct':6.06,
'equal':6.06,
'facts':6.06,
'flight':6.06,
'gaze':6.06,
'goodman':6.06,
'harbor':6.06,
'hey':6.06,
'historian':6.06,
'host':6.06,
'icon':6.06,
'influences':6.06,
'instruments':6.06,
'landmark':6.06,
'large':6.06,
'latest':6.06,
'leads':6.06,
'legs':6.06,
'liverpool':6.06,
'magazines':6.06,
'membership':6.06,
'muscle':6.06,
'nation':6.06,
'outlets':6.06,
'overseas':6.06,
'peanut':6.06,
'personal':6.06,
'photoshop':6.06,
'preparation':6.06,
'quantities':6.06,
'racing':6.06,
'reflection':6.06,
'representation':6.06,
'respective':6.06,
'see':6.06,
'servings':6.06,
'shoes':6.06,
'slim':6.06,
'sports':6.06,
'starring':6.06,
'straight':6.06,
'talk':6.06,
'towns':6.06,
'updated':6.06,
'wood':6.06,
'solving':6.04,
'bridges':6.04,
'climbing':6.04,
'geographical':6.04,
'skirt':6.04,
'1960s':6.04,
'academy':6.04,
'accompanying':6.04,
'acquired':6.04,
'acting':6.04,
'alumni':6.04,
'america\'s':6.04,
'approaches':6.04,
'bass':6.04,
'beginning':6.04,
'bringing':6.04,
'campus':6.04,
'casino':6.04,
'choices':6.04,
'contributed':6.04,
'exact':6.04,
'expand':6.04,
'express':6.04,
'fave':6.04,
'feliz':6.04,
'folks':6.04,
'fund':6.04,
'furniture':6.04,
'groove':6.04,
'hair':6.04,
'hint':6.04,
'installed':6.04,
'interactive':6.04,
'kitchen':6.04,
'melbourne':6.04,
'mind':6.04,
'numbers':6.04,
'perspective':6.04,
'points':6.04,
'prevention':6.04,
'professor':6.04,
'prospective':6.04,
'prospects':6.04,
'purple':6.04,
'purpose':6.04,
'replied':6.04,
'sauce':6.04,
'signing':6.04,
'sofa':6.04,
'supplies':6.04,
'tops':6.04,
'transport':6.04,
'union':6.04,
'visible':6.04,
'vocal':6.04,
'washington':6.04,
'words':6.04,
'xp':6.04,
'carriage':6.02,
'beings':6.02,
'colored':6.02,
'considerations':6.02,
'nearest':6.02,
'porch':6.02,
'relate':6.02,
'seventeenth':6.02,
'vibe':6.02,
'1980\'s':6.02,
'acres':6.02,
'aircraft':6.02,
'amen':6.02,
'basket':6.02,
'blog':6.02,
'cards':6.02,
'celebrity':6.02,
'christians':6.02,
'concepts':6.02,
'content':6.02,
'creates':6.02,
'delivery':6.02,
'developing':6.02,
'doll':6.02,
'download':6.02,
'eggs':6.02,
'engineers':6.02,
'essential':6.02,
'fixed':6.02,
'float':6.02,
'fridge':6.02,
'fund-raising':6.02,
'inn':6.02,
'jam':6.02,
'japanese':6.02,
'male':6.02,
'monetary':6.02,
'native':6.02,
'newspapers':6.02,
'objectives':6.02,
'pregnancy':6.02,
'presence':6.02,
'production':6.02,
'programs':6.02,
'pub':6.02,
'quick':6.02,
'rare':6.02,
'records':6.02,
'retire':6.02,
'simple':6.02,
'sophisticated':6.02,
'teams':6.02,
'totally':6.02,
'try':6.02,
'unwind':6.02,
'voting':6.02,
'walk':6.02,
'will':6.02,
'windows':6.02,
'wondering':6.02,
'writes':6.02,
'xoxo':6.02,
'rains':6.01,
'1990\'s':6,
'act':6,
'adapted':6,
'alliance':6,
'allow':6,
'applicable':6,
'archives':6,
'attend':6,
'attending':6,
'automatic':6,
'automatically':6,
'avatar':6,
'beans':6,
'beliefs':6,
'bien':6,
'biggest':6,
'brew':6,
'brook':6,
'cambridge':6,
'concentrations':6,
'conscience':6,
'continent':6,
'crimson':6,
'eighteenth':6,
'exactly':6,
'extend':6,
'favor':6,
'finale':6,
'find':6,
'fireplace':6,
'fixing':6,
'glance':6,
'global':6,
'ha':6,
'hands':6,
'heating':6,
'indeed':6,
'integral':6,
'itunes':6,
'japan':6,
'jenny':6,
'king\'s':6,
'lawn':6,
'lighting':6,
'likewise':6,
'lmfao':6,
'make':6,
'meaning':6,
'mega':6,
'metals':6,
'mucho':6,
'nations':6,
'network':6,
'olive':6,
'opened':6,
'oregon':6,
'owns':6,
'participants':6,
'pilot':6,
'principle':6,
'religion':6,
'result':6,
'service':6,
'sights':6,
'sites':6,
'sponsor':6,
'started':6,
'stereo':6,
'stores':6,
'successor':6,
'survive':6,
'surviving':6,
'today\'s':6,
'tuned':6,
'virgin':6,
'vista':6,
'walked':6,
'2-car':5.98,
'action':5.98,
'afternoon':5.98,
'anytime':5.98,
'attempting':5.98,
'audience':5.98,
'august':5.98,
'author':5.98,
'awww':5.98,
'bbc':5.98,
'began':5.98,
'biography':5.98,
'broadcast':5.98,
'canada':5.98,
'communities':5.98,
'contributor':5.98,
'creatures':5.98,
'declaration':5.98,
'dell':5.98,
'dialogue':5.98,
'drum':5.98,
'ebook':5.98,
'egg':5.98,
'explained':5.98,
'fabric':5.98,
'father-in-law':5.98,
'feature':5.98,
'ferry':5.98,
'fingertips':5.98,
'flash':5.98,
'flights':5.98,
'folk':5.98,
'gathered':5.98,
'grammys':5.98,
'heh':5.98,
'hill':5.98,
'http':5.98,
'identity':5.98,
'informal':5.98,
'ireland':5.98,
'java':5.98,
'july':5.98,
'keys':5.98,
'lego':5.98,
'lessons':5.98,
'looks':5.98,
'macbook':5.98,
'mcdonalds':5.98,
'meets':5.98,
'messages':5.98,
'national':5.98,
'netherlands':5.98,
'nintendo':5.98,
'normal':5.98,
'nyc':5.98,
'organization':5.98,
'originally':5.98,
'ours':5.98,
'ourselves':5.98,
'pairs':5.98,
'pic':5.98,
'planned':5.98,
'pop':5.98,
'prose':5.98,
'recordings':5.98,
'represented':5.98,
'robin':5.98,
'schools':5.98,
'singapore':5.98,
'sounds':5.98,
'specialized':5.98,
'store':5.98,
'sweater':5.98,
'tonight\'s':5.98,
'train':5.98,
'triple':5.98,
'wing':5.98,
'faire':5.98,
'lasts':5.98,
'nana':5.98,
'precisely':5.98,
'probable':5.98,
'refer':5.98,
'spoon':5.98,
'similarly':5.98,
'glimpse':5.98,
'souls':5.98,
'above':5.96,
'academic':5.96,
'allowed':5.96,
'assistance':5.96,
'authorized':5.96,
'bacon':5.96,
'bay':5.96,
'bf':5.96,
'body':5.96,
'collected':5.96,
'convinced':5.96,
'destined':5.96,
'discuss':5.96,
'driven':5.96,
'everyone\'s':5.96,
'everything':5.96,
'fav':5.96,
'features':5.96,
'flickr':5.96,
'french':5.96,
'gig':5.96,
'gracias':5.96,
'gym':5.96,
'head':5.96,
'heels':5.96,
'hundreds':5.96,
'including':5.96,
'islanders':5.96,
'jeep':5.96,
'job':5.96,
'largely':5.96,
'made':5.96,
'mambo':5.96,
'match':5.96,
'memoir':5.96,
'mighty':5.96,
'mmmmm':5.96,
'net':5.96,
'netflix':5.96,
'players':5.96,
'potentially':5.96,
'presently':5.96,
'proof':5.96,
'reaches':5.96,
'reflecting':5.96,
'related':5.96,
'releases':5.96,
'reveal':5.96,
'reveals':5.96,
'rocks':5.96,
'roommate':5.96,
'season':5.96,
'selection':5.96,
'ship':5.96,
'ships':5.96,
'similar':5.96,
'space':5.96,
'stadium':5.96,
'starts':5.96,
'taught':5.96,
'world\'s':5.96,
'writer':5.96,
'yep':5.96,
'justify':5.96,
'pupil':5.96,
'spreading':5.96,
'wales':5.96,
'whoo':5.96,
'deeds':5.96,
'exhibit':5.96,
'fiddle':5.96,
'exceed':5.96,
'3d':5.94,
'alternative':5.94,
'approach':5.94,
'awe':5.94,
'ballet':5.94,
'begins':5.94,
'building':5.94,
'business':5.94,
'carpet':5.94,
'chick':5.94,
'choose':5.94,
'consent':5.94,
'continental':5.94,
'correspondence':5.94,
'custom':5.94,
'decided':5.94,
'diary':5.94,
'echo':5.94,
'elevation':5.94,
'european':5.94,
'exports':5.94,
'finds':5.94,
'forum':5.94,
'framework':5.94,
'frank':5.94,
'gather':5.94,
'germany':5.94,
'image':5.94,
'impression':5.94,
'include':5.94,
'inherent':5.94,
'intention':5.94,
'investor':5.94,
'jet':5.94,
'joyce':5.94,
'kings':5.94,
'knew':5.94,
'larger':5.94,
'letter':5.94,
'listen':5.94,
'looking':5.94,
'mba':5.94,
'member':5.94,
'men':5.94,
'movement':5.94,
'nation\'s':5.94,
'obama':5.94,
'ok':5.94,
'oooh':5.94,
'option':5.94,
'phoenix':5.94,
'player':5.94,
'portfolio':5.94,
'preparations':5.94,
'presidential':5.94,
'prom':5.94,
'proper':5.94,
'pulse':5.94,
'reality':5.94,
'regularly':5.94,
'reservations':5.94,
'salmon':5.94,
'scene':5.94,
'societies':5.94,
'submitted':5.94,
'substantial':5.94,
'swift':5.94,
'technique':5.94,
'thnx':5.94,
'thx':5.94,
'tide':5.94,
'trends':5.94,
'visual':5.94,
'wallet':5.94,
'wear':5.94,
'formation':5.94,
'cloth':5.94,
'delicate':5.94,
'echoes':5.94,
'geography':5.94,
'processing':5.94,
'swinging':5.94,
'1970\'s':5.92,
'aides':5.92,
'bank':5.92,
'banks':5.92,
'beer':5.92,
'boobs':5.92,
'capital':5.92,
'chapters':5.92,
'chicks':5.92,
'chiefs':5.92,
'christianity':5.92,
'citizen':5.92,
'collections':5.92,
'conclude':5.92,
'constant':5.92,
'covered':5.92,
'devices':5.92,
'diagram':5.92,
'directors':5.92,
'doubtless':5.92,
'equity':5.92,
'fields':5.92,
'florence':5.92,
'forecast':5.92,
'get':5.92,
'group':5.92,
'guy':5.92,
'hah':5.92,
'harvard':5.92,
'historic':5.92,
'i':5.92,
'laboratory':5.92,
'linux':5.92,
'opens':5.92,
'orlando':5.92,
'pants':5.92,
'patterns':5.92,
'private':5.92,
'publishing':5.92,
'raining':5.92,
'residential':5.92,
'retirement':5.92,
'runnin':5.92,
'salon':5.92,
'sends':5.92,
'shorts':5.92,
'shown':5.92,
'skinny':5.92,
'solid':5.92,
'stoked':5.92,
'substantially':5.92,
'teen':5.92,
'theatrical':5.92,
'toyota':5.92,
'translated':5.92,
'tribe':5.92,
'umbrella':5.92,
'vienna':5.92,
'views':5.92,
'viva':5.92,
'washed':5.92,
'wholly':5.92,
'alternatives':5.92,
'applies':5.92,
'generated':5.92,
'merchant':5.92,
'missionary':5.92,
'vine':5.92,
'vive':5.91,
'add':5.9,
'addition':5.9,
'alike':5.9,
'attributed':5.9,
'blu-ray':5.9,
'both':5.9,
'brought':5.9,
'buyers':5.9,
'chillin':5.9,
'co-op':5.9,
'conception':5.9,
'conclusions':5.9,
'considered':5.9,
'daughter-in-law':5.9,
'diaries':5.9,
'dividend':5.9,
'doe':5.9,
'establish':5.9,
'exist':5.9,
'existence':5.9,
'expect':5.9,
'fact':5.9,
'featured':5.9,
'feel':5.9,
'gin':5.9,
'grew':5.9,
'hand':5.9,
'hosting':5.9,
'legacy':5.9,
'letters':5.9,
'lip':5.9,
'lolz':5.9,
'magazine':5.9,
'majority':5.9,
'mall':5.9,
'man':5.9,
'modest':5.9,
'naked':5.9,
'neighbors':5.9,
'nokia':5.9,
'notebook':5.9,
'now':5.9,
'pass':5.9,
'peak':5.9,
'permit':5.9,
'personally':5.9,
'planes':5.9,
'ratings':5.9,
'recording':5.9,
'replies':5.9,
'results':5.9,
'retail':5.9,
'scenes':5.9,
'scores':5.9,
'seattle':5.9,
'settlement':5.9,
'speak':5.9,
'stanford':5.9,
'strategic':5.9,
'symbols':5.9,
'talked':5.9,
'thousand':5.9,
'twenty':5.9,
'winter':5.9,
'yeah':5.9,
'angle':5.9,
'bun':5.9,
'displayed':5.9,
'dolly':5.9,
'illustrate':5.9,
'pockets':5.9,
'puppet':5.9,
'sensory':5.9,
'grande':5.9,
'mixture':5.9,
'myth':5.9,
'admiral':5.89,
'intensity':5.89,
'access':5.88,
'adobe':5.88,
'airport':5.88,
'allied':5.88,
'applications':5.88,
'architects':5.88,
'audio':5.88,
'austria':5.88,
'celeb':5.88,
'chosen':5.88,
'city\'s':5.88,
'coordinator':5.88,
'cyber':5.88,
'deserves':5.88,
'distinguish':5.88,
'drivin':5.88,
'entire':5.88,
'evidently':5.88,
'expanded':5.88,
'feedback':5.88,
'field':5.88,
'flew':5.88,
'founder':5.88,
'hip':5.88,
'includes':5.88,
'keeps':5.88,
'leaders':5.88,
'lmaooo':5.88,
'mary':5.88,
'mood':5.88,
'mrs':5.88,
'october':5.88,
'organism':5.88,
'outlook':5.88,
'philharmonic':5.88,
'physical':5.88,
'poland':5.88,
'primary':5.88,
'printed':5.88,
'privacy':5.88,
'pro':5.88,
'producer':5.88,
'railroad':5.88,
'researchers':5.88,
'scout':5.88,
'sequence':5.88,
'sovereign':5.88,
'speaking':5.88,
'sustained':5.88,
'town':5.88,
'twilight':5.88,
'victoria':5.88,
'weather':5.88,
'whole':5.88,
'yeh':5.88,
'pun':5.88,
'demonstration':5.88,
'misty':5.88,
'sovereignty':5.88,
'scripture':5.88,
'sleigh':5.88,
'flex':5.87,
'2morrow':5.86,
'adopted':5.86,
'aim':5.86,
'amounts':5.86,
'applied':5.86,
'arrangement':5.86,
'articles':5.86,
'balls':5.86,
'barbie':5.86,
'bear':5.86,
'boogie':5.86,
'bridge':5.86,
'brooks':5.86,
'brother-in-law':5.86,
'chrome':5.86,
'club':5.86,
'columbus':5.86,
'connect':5.86,
'constitutional':5.86,
'contemporary':5.86,
'country\'s':5.86,
'credit':5.86,
'credits':5.86,
'curve':5.86,
'diverse':5.86,
'dj':5.86,
'effort':5.86,
'engineering':5.86,
'equipment':5.86,
'figures':5.86,
'freeway':5.86,
'front-page':5.86,
'frontier':5.86,
'hotter':5.86,
'household':5.86,
'integration':5.86,
'introduce':5.86,
'japan\'s':5.86,
'jennifer':5.86,
'keep':5.86,
'layout':5.86,
'lens':5.86,
'leo':5.86,
'located':5.86,
'metro':5.86,
'newman':5.86,
'nut':5.86,
'nuts':5.86,
'observations':5.86,
'obtain':5.86,
'pc':5.86,
'position':5.86,
'potter':5.86,
'president':5.86,
'productions':5.86,
'property':5.86,
'pumping':5.86,
'revelation':5.86,
'road':5.86,
'sand':5.86,
'seat':5.86,
'services':5.86,
'sound':5.86,
'survival':5.86,
'teens':5.86,
'thursday':5.86,
'trained':5.86,
'variations':5.86,
'viewers':5.86,
'wrapped':5.86,
'attitudes':5.86,
'autonomy':5.86,
'concentrated':5.86,
'deeper':5.86,
'fifteen':5.86,
'fourteen':5.86,
'gum':5.86,
'liquid':5.86,
'organizational':5.86,
'output':5.86,
'phenomena':5.86,
'seal':5.86,
'concentration':5.85,
'props':5.85,
'construct':5.85,
'amount':5.84,
'angeles':5.84,
'appear':5.84,
'arena':5.84,
'banking':5.84,
'baseball':5.84,
'begun':5.84,
'being':5.84,
'benz':5.84,
'blogs':5.84,
'buck':5.84,
'canadian':5.84,
'checks':5.84,
'chicago':5.84,
'circles':5.84,
'classes':5.84,
'colorado':5.84,
'coming':5.84,
'conducting':5.84,
'crossword':5.84,
'curry':5.84,
'decide':5.84,
'descriptions':5.84,
'desktop':5.84,
'element':5.84,
'enter':5.84,
'escaped':5.84,
'ethnic':5.84,
'experimental':5.84,
'feelings':5.84,
'germans':5.84,
'gets':5.84,
'grain':5.84,
'grammar':5.84,
'gravity':5.84,
'hear':5.84,
'her':5.84,
'history':5.84,
'individuals':5.84,
'landed':5.84,
'lands':5.84,
'lays':5.84,
'maryland':5.84,
'matrix':5.84,
'mexico':5.84,
'nationwide':5.84,
'ooh':5.84,
'oral':5.84,
'patents':5.84,
'poster':5.84,
'producing':5.84,
'programming':5.84,
'prophet':5.84,
'provisions':5.84,
'puff':5.84,
'quartet':5.84,
'realize':5.84,
'really':5.84,
'responses':5.84,
'sample':5.84,
'shoe':5.84,
'showing':5.84,
'ski':5.84,
'stages':5.84,
'stored':5.84,
'suggestion':5.84,
'tall':5.84,
'telephone':5.84,
'theoretical':5.84,
'uk':5.84,
'urban':5.84,
'watching':5.84,
'web':5.84,
'absorption':5.84,
'constructed':5.84,
'dimensions':5.84,
'examples':5.84,
'interpretation':5.84,
'programme':5.84,
'relating':5.84,
'shades':5.84,
'subtle':5.84,
'instruction':5.83,
'rotation':5.83,
'wagon':5.83,
'10:00:00PM':5.82,
'7-9pm':5.82,
'apply':5.82,
'arising':5.82,
'bar':5.82,
'becoming':5.82,
'blogging':5.82,
'closer':5.82,
'come':5.82,
'communications':5.82,
'connection':5.82,
'consistent':5.82,
'cow':5.82,
'detail':5.82,
'diplomatic':5.82,
'east':5.82,
'eatin':5.82,
'emphasized':5.82,
'endowment':5.82,
'entered':5.82,
'expressed':5.82,
'fig':5.82,
'have':5.82,
'hearing':5.82,
'homey':5.82,
'hundred':5.82,
'investment':5.82,
'involved':5.82,
'irish':5.82,
'jean':5.82,
'key':5.82,
'landing':5.82,
'lived':5.82,
'maine':5.82,
'maker':5.82,
'many':5.82,
'met':5.82,
'montreal':5.82,
'nashville':5.82,
'opinion':5.82,
'owl':5.82,
'pair':5.82,
'path':5.82,
'peoples':5.82,
'philosophers':5.82,
'publisher':5.82,
'quickly':5.82,
'realised':5.82,
'regarded':5.82,
'royal':5.82,
'sane':5.82,
'sister-in-law':5.82,
'southwest':5.82,
'spanish':5.82,
'sum':5.82,
'talks':5.82,
'teen-agers':5.82,
'tennessee':5.82,
'toronto':5.82,
'upper':5.82,
'woot':5.82,
'workin':5.82,
'diversity':5.82,
'ideology':5.82,
'mist':5.82,
'movements':5.82,
'outline':5.82,
'continually':5.81,
'obtaining':5.81,
'06:00:00PM':5.8,
'accordingly':5.8,
'acquisition':5.8,
'addressed':5.8,
'analysis':5.8,
'appearance':5.8,
'attention':5.8,
'attitude':5.8,
'bean':5.8,
'becomes':5.8,
'belong':5.8,
'brings':5.8,
'caffeine':5.8,
'changing':5.8,
'climate':5.8,
'commonly':5.8,
'courses':5.8,
'crib':5.8,
'definition':5.8,
'determine':5.8,
'director':5.8,
'double':5.8,
'dude':5.8,
'entre':5.8,
'establishing':5.8,
'extended':5.8,
'finding':5.8,
'god\'s':5.8,
'gradually':5.8,
'group\'s':5.8,
'grove':5.8,
'hai':5.8,
'headed':5.8,
'ice':5.8,
'interior':5.8,
'kentucky':5.8,
'known':5.8,
'league':5.8,
'liberal':5.8,
'lmao':5.8,
'master\'s':5.8,
'men\'s':5.8,
'mix':5.8,
'model':5.8,
'mostly':5.8,
'mouth':5.8,
'networks':5.8,
'northeast':5.8,
'outside':5.8,
'paper':5.8,
'pardon':5.8,
'perceive':5.8,
'pilots':5.8,
'podcast':5.8,
'practice':5.8,
'psychology':5.8,
'pumped':5.8,
'rapid':5.8,
'reconstruction':5.8,
'rehearsal':5.8,
'responsible':5.8,
'roads':5.8,
'root':5.8,
'rubber':5.8,
'sales':5.8,
'sending':5.8,
'shaped':5.8,
'simultaneously':5.8,
'spoke':5.8,
'stock':5.8,
'tended':5.8,
'vivo':5.8,
'vote':5.8,
'wind':5.8,
'write':5.8,
'yellow':5.8,
'seated':5.8,
'behaviour':5.8,
'description':5.8,
'dimension':5.8,
'gender':5.8,
'impulse':5.8,
'involve':5.8,
'maintaining':5.8,
'manufacture':5.8,
'occupation':5.8,
'provinces':5.8,
'quantity':5.8,
'sentiment':5.8,
'natives':5.79,
'thirty':5.79,
'arch':5.79,
'actions':5.78,
'added':5.78,
'additional':5.78,
'admission':5.78,
'ahhhh':5.78,
'ambassador':5.78,
'amber':5.78,
'anna':5.78,
'annie':5.78,
'attributes':5.78,
'auction':5.78,
'aware':5.78,
'backup':5.78,
'britain':5.78,
'carefully':5.78,
'century':5.78,
'challenge':5.78,
'characters':5.78,
'colleague':5.78,
'containing':5.78,
'contest':5.78,
'convince':5.78,
'downtown':5.78,
'drives':5.78,
'ebay':5.78,
'egyptian':5.78,
'entering':5.78,
'featuring':5.78,
'fed':5.78,
'fibers':5.78,
'fitted':5.78,
'flows':5.78,
'founding':5.78,
'frequent':5.78,
'having':5.78,
'hd':5.78,
'hosted':5.78,
'hottest':5.78,
'intervals':5.78,
'inventory':5.78,
'lift':5.78,
'link':5.78,
'lot':5.78,
'march':5.78,
'mare':5.78,
'morality':5.78,
'newton':5.78,
'optical':5.78,
'passages':5.78,
'plasma':5.78,
'plates':5.78,
'poker':5.78,
'pops':5.78,
'possibly':5.78,
'realized':5.78,
'record':5.78,
'resident':5.78,
'respond':5.78,
'rural':5.78,
'shuttle':5.78,
'society':5.78,
'texts':5.78,
'total':5.78,
'trying':5.78,
'uploaded':5.78,
'various':5.78,
'volume':5.78,
'wheel':5.78,
'woo':5.78,
'workers':5.78,
'workout':5.78,
'yess':5.78,
'sober':5.78,
'components':5.78,
'defined':5.78,
'flashing':5.78,
'momento':5.78,
'movin':5.78,
'rollin':5.78,
'rover':5.78,
'vessel':5.78,
'printing':5.77,
'spatial':5.77,
'corresponding':5.77,
'accord':5.76,
'afterwards':5.76,
'apparatus':5.76,
'approaching':5.76,
'boston':5.76,
'brands':5.76,
'characteristic':5.76,
'city':5.76,
'coach':5.76,
'commission':5.76,
'continue':5.76,
'continuing':5.76,
'days':5.76,
'deeply':5.76,
'describes':5.76,
'diana':5.76,
'discussing':5.76,
'do':5.76,
'eastern':5.76,
'emotion':5.76,
'ensemble':5.76,
'episode':5.76,
'essentially':5.76,
'everywhere':5.76,
'experiment':5.76,
'facilities':5.76,
'functions':5.76,
'ginger':5.76,
'glass':5.76,
'greece':5.76,
'historical':5.76,
'horny':5.76,
'install':5.76,
'jessica':5.76,
'just':5.76,
'lamp':5.76,
'lincoln':5.76,
'magnitude':5.76,
'maintain':5.76,
'major':5.76,
'makers':5.76,
'makeup':5.76,
'manor':5.76,
'manual':5.76,
'mechanisms':5.76,
'michelle':5.76,
'motion':5.76,
'outfit':5.76,
'oxford':5.76,
'payments':5.76,
'permitted':5.76,
'preparing':5.76,
'preview':5.76,
'privately':5.76,
'probability':5.76,
'producers':5.76,
'products':5.76,
'ps3':5.76,
'publication':5.76,
'race':5.76,
'rachel':5.76,
'referring':5.76,
'remix':5.76,
'representing':5.76,
'republic':5.76,
'sees':5.76,
'selling':5.76,
'slide':5.76,
'species':5.76,
'staying':5.76,
'supplied':5.76,
'supply':5.76,
'things':5.76,
'tokyo':5.76,
'viewing':5.76,
'vital':5.76,
'voice':5.76,
'wednesdays':5.76,
'whisper':5.76,
'workshop':5.76,
'chiefly':5.76,
'dimensional':5.76,
'handed':5.76,
'interval':5.76,
'ladder':5.76,
'oooooh':5.76,
'perception':5.76,
'pupils':5.76,
'shield':5.76,
'thoroughly':5.76,
'considerably':5.75,
'manuscript':5.75,
'symbolic':5.74,
'07:00:00PM':5.74,
'awake':5.74,
'booty':5.74,
'cadillac':5.74,
'call':5.74,
'calling':5.74,
'catch':5.74,
'challenges':5.74,
'chelsea':5.74,
'chile':5.74,
'concentrate':5.74,
'deep':5.74,
'details':5.74,
'diplomacy':5.74,
'dragon':5.74,
'employee':5.74,
'endorsed':5.74,
'entry':5.74,
'estates':5.74,
'everyday':5.74,
'expected':5.74,
'forth':5.74,
'fundamental':5.74,
'gf':5.74,
'given':5.74,
'i\'m':5.74,
'inclined':5.74,
'kept':5.74,
'kinds':5.74,
'lace':5.74,
'mac':5.74,
'manage':5.74,
'much':5.74,
'name':5.74,
'newsstands':5.74,
'ninja':5.74,
'nite':5.74,
'observed':5.74,
'orientation':5.74,
'owners':5.74,
'powder':5.74,
'presented':5.74,
'princeton':5.74,
'project':5.74,
'prove':5.74,
'quarters':5.74,
'reach':5.74,
'responded':5.74,
'rio':5.74,
'screen':5.74,
'serves':5.74,
'settled':5.74,
'showed':5.74,
'situated':5.74,
'spare':5.74,
'spokeswoman':5.74,
'suitcase':5.74,
'suits':5.74,
'swag':5.74,
'team\'s':5.74,
'thin':5.74,
'time':5.74,
'todays':5.74,
'training':5.74,
'transactions':5.74,
'treasury':5.74,
'walkin':5.74,
'warrior':5.74,
'wash':5.74,
'wives':5.74,
'cave':5.73,
'involves':5.73,
'mechanical':5.73,
'sphere':5.73,
'structural':5.73,
'identification':5.73,
'shell':5.73,
'nod':5.72,
'pose':5.72,
'3g':5.72,
'09:00:00PM':5.72,
'adding':5.72,
'affiliation':5.72,
'alexander':5.72,
'apt':5.72,
'argentina':5.72,
'blend':5.72,
'canal':5.72,
'card':5.72,
'channels':5.72,
'click':5.72,
'detailed':5.72,
'distinguishable':5.72,
'dvr':5.72,
'ears':5.72,
'euro':5.72,
'expanding':5.72,
'funky':5.72,
'goldman':5.72,
'happening':5.72,
'hypothesis':5.72,
'implementation':5.72,
'import':5.72,
'individual':5.72,
'jewish':5.72,
'kathryn':5.72,
'knowin':5.72,
'marine':5.72,
'midtown':5.72,
'missouri':5.72,
'modification':5.72,
'move':5.72,
'near':5.72,
'passenger':5.72,
'passengers':5.72,
'pen':5.72,
'persuade':5.72,
'philadelphia':5.72,
'plate':5.72,
'publications':5.72,
'quietly':5.72,
'races':5.72,
'rank':5.72,
'registered':5.72,
'responsibility':5.72,
'roles':5.72,
'satellite':5.72,
'script':5.72,
'seek':5.72,
'signed':5.72,
'source':5.72,
'spectrum':5.72,
'stage':5.72,
'surrounds':5.72,
'taxi':5.72,
'three':5.72,
'towards':5.72,
'translation':5.72,
'ultimately':5.72,
'update':5.72,
'uses':5.72,
'view':5.72,
'waking':5.72,
'whiskey':5.72,
'winds':5.72,
'with':5.72,
'wrap':5.72,
'contains':5.71,
'employer':5.71,
'fifty':5.71,
'immense':5.71,
'opinions':5.71,
'temperatures':5.71,
'fella':5.71,
'flippin':5.71,
'hears':5.71,
'scope':5.71,
'soil':5.71,
'timber':5.71,
'objective':5.7,
'willow':5.7,
'1960\'s':5.7,
'05:00:00PM':5.7,
'accumulation':5.7,
'android':5.7,
'appointed':5.7,
'approximately':5.7,
'arrangements':5.7,
'atm':5.7,
'attribute':5.7,
'banner':5.7,
'become':5.7,
'biggie':5.7,
'bunch':5.7,
'churches':5.7,
'contain':5.7,
'data':5.7,
'demonstrated':5.7,
'developer':5.7,
'disc':5.7,
'discussion':5.7,
'dozens':5.7,
'driver':5.7,
'earliest':5.7,
'elementary':5.7,
'engine':5.7,
'extremely':5.7,
'feat':5.7,
'feeling':5.7,
'fill':5.7,
'fried':5.7,
'grade':5.7,
'hat':5.7,
'hold':5.7,
'identical':5.7,
'jackson':5.7,
'january':5.7,
'johnny':5.7,
'journal':5.7,
'manhattan':5.7,
'master':5.7,
'max':5.7,
'michael':5.7,
'migration':5.7,
'mild':5.7,
'mmm':5.7,
'multiple':5.7,
'noon':5.7,
'northwest':5.7,
'observer':5.7,
'placing':5.7,
'pocket':5.7,
'prevented':5.7,
'rally':5.7,
'rankings':5.7,
'raymond':5.7,
'reforms':5.7,
'regular':5.7,
'rolling':5.7,
'roman':5.7,
'running':5.7,
'sippin':5.7,
'sonic':5.7,
'streaming':5.7,
'superbowl':5.7,
'synthesis':5.7,
'thickness':5.7,
'thumb':5.7,
'tonite':5.7,
'vertical':5.7,
'walks':5.7,
'want':5.7,
'wassup':5.7,
'watch':5.7,
'wendy':5.7,
'whites':5.7,
'written':5.7,
'xo':5.7,
'yaa':5.7,
'correlation':5.69,
'jungle':5.69,
'keepin':5.69,
'paragraph':5.69,
'yonder':5.69,
'determining':5.69,
'dusk':5.69,
'gal':5.69,
'hindu':5.69,
'mechanism':5.69,
'\#jobs':5.68,
'affiliate':5.68,
'amongst':5.68,
'angles':5.68,
'announce':5.68,
'appears':5.68,
'associated':5.68,
'avenue':5.68,
'bars':5.68,
'be':5.68,
'benjamin':5.68,
'bond':5.68,
'broadcasting':5.68,
'button':5.68,
'cabinet':5.68,
'cent':5.68,
'character':5.68,
'civic':5.68,
'climb':5.68,
'clinton':5.68,
'countries':5.68,
'database':5.68,
'degrees':5.68,
'direct':5.68,
'emerged':5.68,
'emphasis':5.68,
'enterprises':5.68,
'exchange':5.68,
'footage':5.68,
'foreign':5.68,
'formula':5.68,
'fort':5.68,
'gaga':5.68,
'getting':5.68,
'graham':5.68,
'grasp':5.68,
'greenwich':5.68,
'grounds':5.68,
'jill':5.68,
'laude':5.68,
'location':5.68,
'logo':5.68,
'machines':5.68,
'managed':5.68,
'marching':5.68,
'mars':5.68,
'merchants':5.68,
'mission':5.68,
'mississippi':5.68,
'moment':5.68,
'moves':5.68,
'nearby':5.68,
'nuevo':5.68,
'often':5.68,
'organs':5.68,
'permanent':5.68,
'perspectives':5.68,
'physiological':5.68,
'playoff':5.68,
'portland':5.68,
'program':5.68,
'publicity':5.68,
'publishers':5.68,
'pursue':5.68,
'response':5.68,
'resume':5.68,
'role':5.68,
'salt':5.68,
'seeks':5.68,
'sitting':5.68,
'southeast':5.68,
'speaker':5.68,
'speaks':5.68,
'spoken':5.68,
'stimulus':5.68,
'suggests':5.68,
'sydney':5.68,
'tot':5.68,
'trustee':5.68,
'usb':5.68,
'west':5.68,
'moses':5.67,
'occurring':5.67,
'saddle':5.67,
'samples':5.67,
'tail':5.67,
'thrust':5.67,
'vow':5.67,
'conversion':5.67,
'evident':5.67,
'\#travel':5.66,
'21st':5.66,
'08:00:00PM':5.66,
'absorbed':5.66,
'african':5.66,
'alexandra':5.66,
'among':5.66,
'aspect':5.66,
'association':5.66,
'auto':5.66,
'blue':5.66,
'bold':5.66,
'british':5.66,
'casa':5.66,
'cents':5.66,
'chose':5.66,
'claus':5.66,
'collect':5.66,
'compete':5.66,
'concluded':5.66,
'conclusion':5.66,
'continues':5.66,
'coverage':5.66,
'cup':5.66,
'customer':5.66,
'describe':5.66,
'developmental':5.66,
'digest':5.66,
'discussions':5.66,
'drawn':5.66,
'drew':5.66,
'early':5.66,
'electric':5.66,
'entrance':5.66,
'exchanges':5.66,
'follow':5.66,
'foundation':5.66,
'glove':5.66,
'gps':5.66,
'groups':5.66,
'ham':5.66,
'immediately':5.66,
'indiana':5.66,
'indie':5.66,
'intense':5.66,
'leopard':5.66,
'louisiana':5.66,
'mane':5.66,
'manufacturing':5.66,
'members':5.66,
'molecules':5.66,
'obama\'s':5.66,
'occupy':5.66,
'oooo':5.66,
'parkway':5.66,
'passed':5.66,
'people\'s':5.66,
'phillips':5.66,
'playoffs':5.66,
'practices':5.66,
'prepare':5.66,
'priority':5.66,
'reap':5.66,
'regard':5.66,
'residents':5.66,
'rode':5.66,
'roll':5.66,
'roots':5.66,
'rugged':5.66,
'sake':5.66,
'sandy':5.66,
'served':5.66,
'seven':5.66,
'several':5.66,
'shareholders':5.66,
'sidney':5.66,
'sign':5.66,
'silkk':5.66,
'sol':5.66,
'son-in-law':5.66,
'stretch':5.66,
'tenure':5.66,
'timing':5.66,
'tongues':5.66,
'tower':5.66,
'upstairs':5.66,
'usually':5.66,
'verse':5.66,
'wrapping':5.66,
'yard':5.66,
'adequate':5.66,
'explains':5.66,
'doorway':5.65,
'drinkin':5.65,
'examined':5.65,
'height':5.65,
'influenced':5.65,
'mami':5.65,
'mathematics':5.65,
'organisation':5.65,
'phases':5.65,
'realm':5.65,
'remarked':5.65,
'structures':5.65,
'consisting':5.65,
'clown':5.65,
'equations':5.65,
'hum':5.65,
'1950\'s':5.64,
'2day':5.64,
'ac':5.64,
'alaska':5.64,
'amanda':5.64,
'asterisk':5.64,
'bag':5.64,
'bra':5.64,
'businesses':5.64,
'cable':5.64,
'charger':5.64,
'chester':5.64,
'chinese':5.64,
'circular':5.64,
'civilian':5.64,
'civilians':5.64,
'closely':5.64,
'cognitive':5.64,
'combo':5.64,
'commons':5.64,
'competition':5.64,
'construction':5.64,
'designated':5.64,
'dive':5.64,
'editor':5.64,
'employees':5.64,
'entitled':5.64,
'escape':5.64,
'every':5.64,
'evidence':5.64,
'expects':5.64,
'financing':5.64,
'flown':5.64,
'followers':5.64,
'gucci':5.64,
'guess':5.64,
'hamilton':5.64,
'handful':5.64,
'heads':5.64,
'heights':5.64,
'holding':5.64,
'importa':5.64,
'influence':5.64,
'inning':5.64,
'involvement':5.64,
'kathleen':5.64,
'kit':5.64,
'layer':5.64,
'lit':5.64,
'means':5.64,
'mtv':5.64,
'mystery':5.64,
'night\'s':5.64,
'obvious':5.64,
'oriented':5.64,
'owned':5.64,
'pace':5.64,
'pennsylvania':5.64,
'portions':5.64,
'presidents':5.64,
'probably':5.64,
'provision':5.64,
'purposes':5.64,
'rainy':5.64,
'rang':5.64,
'rangers':5.64,
'recommend':5.64,
'ricky':5.64,
'secular':5.64,
'senior':5.64,
'serving':5.64,
'sheets':5.64,
'southern':5.64,
'soy':5.64,
'speakers':5.64,
'spin':5.64,
'states':5.64,
'streets':5.64,
'symbol':5.64,
'techniques':5.64,
'tee':5.64,
'tends':5.64,
'tokio':5.64,
'trend':5.64,
'upload':5.64,
'use':5.64,
'vast':5.64,
'venture':5.64,
'veterans':5.64,
'wholesalers':5.64,
'wrote':5.64,
'elite':5.64,
'genes':5.64,
'hydrogen':5.63,
'intentions':5.63,
'lungs':5.63,
'measuring':5.63,
'origin':5.63,
'peripheral':5.63,
'twentieth':5.63,
'riders':5.62,
'spaces':5.62,
'vary':5.62,
'accent':5.62,
'airline':5.62,
'alma':5.62,
'appeal':5.62,
'around':5.62,
'assistant':5.62,
'associate':5.62,
'became':5.62,
'behavioral':5.62,
'bottle':5.62,
'buildings':5.62,
'buzz':5.62,
'can':5.62,
'catching':5.62,
'characteristics':5.62,
'charlie':5.62,
'clock':5.62,
'cloud':5.62,
'comments':5.62,
'corazon':5.62,
'cycle':5.62,
'describing':5.62,
'dice':5.62,
'display':5.62,
'dudes':5.62,
'dutch':5.62,
'espn':5.62,
'eve':5.62,
'ford':5.62,
'formal':5.62,
'fry':5.62,
'heading':5.62,
'heather':5.62,
'homies':5.62,
'instances':5.62,
'jews':5.62,
'leaves':5.62,
'leg':5.62,
'lolol':5.62,
'managing':5.62,
'material':5.62,
'media':5.62,
'microsoft':5.62,
'next':5.62,
'oct':5.62,
'organisms':5.62,
'page':5.62,
'pages':5.62,
'pitch':5.62,
'poll':5.62,
'printer':5.62,
'proportion':5.62,
'proportions':5.62,
'referred':5.62,
'reflects':5.62,
'reply':5.62,
'resulted':5.62,
'rockville':5.62,
'runs':5.62,
'sacred':5.62,
'sells':5.62,
'sidewalk':5.62,
'snowed':5.62,
'status':5.62,
'they':5.62,
'through':5.62,
'tommy':5.62,
'transaction':5.62,
'tub':5.62,
'variable':5.62,
'vday':5.62,
'virtual':5.62,
'watches':5.62,
'we\'ve':5.62,
'widely':5.62,
'ya\'ll':5.62,
'york':5.62,
'ideological':5.61,
'midst':5.61,
'comparatively':5.6,
'address':5.6,
'airlines':5.6,
'ancient':5.6,
'apartment':5.6,
'apparent':5.6,
'arranged':5.6,
'assembly':5.6,
'bathroom':5.6,
'bees':5.6,
'bon':5.6,
'brasil':5.6,
'called':5.6,
'caroline':5.6,
'centers':5.6,
'central':5.6,
'chapter':5.6,
'cleaning':5.6,
'columns':5.6,
'combined':5.6,
'concrete':5.6,
'considering':5.6,
'consulting':5.6,
'covers':5.6,
'crew':5.6,
'edinburgh':5.6,
'efforts':5.6,
'eleven':5.6,
'enormous':5.6,
'entirely':5.6,
'evan':5.6,
'francisco':5.6,
'frequency':5.6,
'function':5.6,
'got':5.6,
'historians':5.6,
'hop':5.6,
'idol':5.6,
'immediate':5.6,
'indianapolis':5.6,
'involving':5.6,
'layers':5.6,
'level':5.6,
'links':5.6,
'lisa':5.6,
'mouths':5.6,
'news':5.6,
'occasional':5.6,
'outcome':5.6,
'pat':5.6,
'patent':5.6,
'perceived':5.6,
'pick':5.6,
'pope':5.6,
'priest':5.6,
'pronounced':5.6,
'quotes':5.6,
'reaching':5.6,
'relatively':5.6,
'reminded':5.6,
'reynolds':5.6,
'runner':5.6,
'saying':5.6,
'seeking':5.6,
'specific':5.6,
'spell':5.6,
'stand':5.6,
'suggested':5.6,
'title':5.6,
'topics':5.6,
'trustees':5.6,
'twice':5.6,
'utilities':5.6,
'veteran':5.6,
'viewed':5.6,
'virtually':5.6,
'walker':5.6,
'watchin':5.6,
'your':5.6,
'accustomed':5.6,
'deed':5.6,
'besos':5.59,
'classroom':5.59,
'comparative':5.59,
'constituted':5.59,
'indicating':5.59,
'occurs':5.59,
'parallel':5.59,
'sentences':5.59,
'vita':5.59,
'habits':5.58,
'\#iphone':5.58,
'allison':5.58,
'appeals':5.58,
'apps':5.58,
'arizona':5.58,
'attached':5.58,
'bags':5.58,
'barack':5.58,
'bears':5.58,
'bell':5.58,
'brand':5.58,
'broad':5.58,
'broader':5.58,
'bulletin':5.58,
'cara':5.58,
'casey':5.58,
'cerebral':5.58,
'chew':5.58,
'circle':5.58,
'cities':5.58,
'client':5.58,
'comes':5.58,
'comment':5.58,
'consists':5.58,
'corrected':5.58,
'current':5.58,
'danny':5.58,
'decisions':5.58,
'delaware':5.58,
'described':5.58,
'did':5.58,
'estimated':5.58,
'even':5.58,
'example':5.58,
'executive':5.58,
'feet':5.58,
'filling':5.58,
'finally':5.58,
'financed':5.58,
'fingers':5.58,
'formed':5.58,
'front':5.58,
'gran':5.58,
'hier':5.58,
'hips':5.58,
'i\'ve':5.58,
'ibm':5.58,
'identify':5.58,
'intro':5.58,
'kennedy':5.58,
'laura':5.58,
'lay':5.58,
'lets':5.58,
'lewis':5.58,
'linda':5.58,
'long-term':5.58,
'looked':5.58,
'lords':5.58,
'man\'s':5.58,
'marie':5.58,
'massachusetts':5.58,
'microphone':5.58,
'mills':5.58,
'ministry':5.58,
'mumbai':5.58,
'named':5.58,
'navy':5.58,
'operative':5.58,
'overnight':5.58,
'peep':5.58,
'pot':5.58,
'pursuit':5.58,
'rapidly':5.58,
'recorded':5.58,
'returning':5.58,
'rooms':5.58,
'seats':5.58,
'set':5.58,
'shortly':5.58,
'shoutout':5.58,
'soho':5.58,
'solely':5.58,
'stuff':5.58,
'suburban':5.58,
'talkin':5.58,
'teenage':5.58,
'thighs':5.58,
'thing':5.58,
'times':5.58,
'traders':5.58,
'trending':5.58,
'tries':5.58,
'valve':5.58,
'vermont':5.58,
'voters':5.58,
'waist':5.58,
'warming':5.58,
'we\'ll':5.58,
'yang':5.58,
'declare':5.57,
'departments':5.57,
'mathematical':5.57,
'sow':5.57,
'density':5.57,
'colony':5.56,
'component':5.56,
'illusion':5.56,
'sip':5.56,
'stride':5.56,
'summary':5.56,
'\#musicmonday':5.56,
'acted':5.56,
'aide':5.56,
'alot':5.56,
'analyst':5.56,
'announces':5.56,
'aspects':5.56,
'associates':5.56,
'attempt':5.56,
'basically':5.56,
'blowin':5.56,
'bong':5.56,
'brush':5.56,
'camps':5.56,
'cap':5.56,
'change':5.56,
'characterized':5.56,
'christopher':5.56,
'civil':5.56,
'clients':5.56,
'columnist':5.56,
'connecticut':5.56,
'consider':5.56,
'consumers':5.56,
'contents':5.56,
'dial':5.56,
'directly':5.56,
'discussed':5.56,
'electron':5.56,
'elle':5.56,
'era':5.56,
'evaluate':5.56,
'explanation':5.56,
'extends':5.56,
'fairfield':5.56,
'format':5.56,
'forming':5.56,
'golf':5.56,
'hampshire':5.56,
'his':5.56,
'inbox':5.56,
'indication':5.56,
'ink':5.56,
'innings':5.56,
'jay-z':5.56,
'jumped':5.56,
'kelly':5.56,
'lauren':5.56,
'leather':5.56,
'license':5.56,
'makes':5.56,
'manchester':5.56,
'marathon':5.56,
'matches':5.56,
'measured':5.56,
'method':5.56,
'mounted':5.56,
'nickel':5.56,
'on':5.56,
'opera':5.56,
'organizations':5.56,
'pan':5.56,
'passage':5.56,
'password':5.56,
'place':5.56,
'playa':5.56,
'presidency':5.56,
'product':5.56,
'promo':5.56,
'quarter':5.56,
'range':5.56,
'ranked':5.56,
'recent':5.56,
'red':5.56,
'regarding':5.56,
'remained':5.56,
'rolls':5.56,
'solo':5.56,
'stay':5.56,
'steppin':5.56,
'stepping':5.56,
'studying':5.56,
'substance':5.56,
'systematic':5.56,
'titles':5.56,
'tons':5.56,
'treated':5.56,
'turkish':5.56,
'type':5.56,
'varied':5.56,
'verbal':5.56,
'vida':5.56,
'vodka':5.56,
'voices':5.56,
'voltage':5.56,
'winding':5.56,
'wisconsin':5.56,
'woke':5.56,
'word':5.56,
'worker':5.56,
'working':5.56,
'yonkers':5.56,
'input':5.55,
'analyses':5.55,
'array':5.55,
'calculations':5.55,
'dixie':5.55,
'floss':5.55,
'molecular':5.55,
'pavement':5.55,
'tame':5.55,
'warriors':5.55,
'gospel':5.55,
'theological':5.55,
'depth':5.54,
'acquisitions':5.54,
'adam':5.54,
'apparently':5.54,
'attempts':5.54,
'attended':5.54,
'awwww':5.54,
'biological':5.54,
'bobby':5.54,
'box':5.54,
'bradley':5.54,
'brooklyn':5.54,
'brunswick':5.54,
'bud':5.54,
'buena':5.54,
'cardinal':5.54,
'catherine':5.54,
'chapel':5.54,
'chest':5.54,
'circulation':5.54,
'criteria':5.54,
'cuban':5.54,
'dame':5.54,
'daniel':5.54,
'dealing':5.54,
'device':5.54,
'direction':5.54,
'doctors':5.54,
'domain':5.54,
'dubai':5.54,
'ear':5.54,
'electrical':5.54,
'emily':5.54,
'every1':5.54,
'firmly':5.54,
'frame':5.54,
'gee':5.54,
'go':5.54,
'gusta':5.54,
'hablar':5.54,
'handle':5.54,
'him':5.54,
'holdings':5.54,
'journalist':5.54,
'lap':5.54,
'look':5.54,
'machinery':5.54,
'materials':5.54,
'mount':5.54,
'mysterious':5.54,
'nicholas':5.54,
'notes':5.54,
'ny':5.54,
'obviously':5.54,
'pamela':5.54,
'panel':5.54,
'particular':5.54,
'person':5.54,
'posted':5.54,
'ppl':5.54,
'prompted':5.54,
'properties':5.54,
'quarterback':5.54,
'rating':5.54,
'rearrange':5.54,
'reason':5.54,
'regions':5.54,
'reminder':5.54,
'rims':5.54,
'ross':5.54,
'round':5.54,
'say':5.54,
'signs':5.54,
'simpson':5.54,
'sources':5.54,
'sponsored':5.54,
'stephen':5.54,
'suggesting':5.54,
'suzanne':5.54,
'texas':5.54,
'thursdays':5.54,
'timeline':5.54,
'tools':5.54,
'trio':5.54,
'yale':5.54,
'abstract':5.53,
'accordance':5.53,
'fellas':5.53,
'lean':5.53,
'outward':5.53,
'particle':5.53,
'pipe':5.53,
'rely':5.53,
'sheet':5.53,
'sole':5.53,
'whatcha':5.53,
'arrow':5.52,
'blazing':5.52,
'wherever':5.52,
'aimed':5.52,
'alice':5.52,
'alicia':5.52,
'amendment':5.52,
'amy':5.52,
'appointment':5.52,
'archive':5.52,
'article':5.52,
'beth':5.52,
'beverly':5.52,
'beyonce':5.52,
'bottles':5.52,
'boxes':5.52,
'branch':5.52,
'carol':5.52,
'categories':5.52,
'class':5.52,
'coaches':5.52,
'compounds':5.52,
'could':5.52,
'cube':5.52,
'demonstrate':5.52,
'edition':5.52,
'employers':5.52,
'episodes':5.52,
'eva':5.52,
'exceptions':5.52,
'extension':5.52,
'filled':5.52,
'findings':5.52,
'graphic':5.52,
'headline':5.52,
'horizontal':5.52,
'jefferson':5.52,
'jets':5.52,
'jose':5.52,
'josh':5.52,
'journalism':5.52,
'monica':5.52,
'montgomery':5.52,
'nbc':5.52,
'north':5.52,
'outer':5.52,
'papi':5.52,
'parker':5.52,
'patrick':5.52,
'peas':5.52,
'plains':5.52,
'pm':5.52,
'positions':5.52,
'posting':5.52,
'ray':5.52,
'reaction':5.52,
'reference':5.52,
'reflected':5.52,
'remain':5.52,
'reserves':5.52,
'rockefeller':5.52,
'room':5.52,
'russian':5.52,
'ryan':5.52,
'sara':5.52,
'she\'s':5.52,
'subjective':5.52,
'suggest':5.52,
'summit':5.52,
'synagogue':5.52,
'taylor':5.52,
'throughout':5.52,
'tony':5.52,
'traded':5.52,
'trailer':5.52,
'twitters':5.52,
'u':5.52,
'url':5.52,
'usage':5.52,
'vet':5.52,
'vikings':5.52,
'whispers':5.52,
'beso':5.52,
'respiratory':5.52,
'fills':5.51,
'behaviors':5.51,
'breed':5.51,
'layin':5.51,
'maze':5.51,
'measurement':5.51,
'occurrence':5.51,
'priests':5.51,
'receptor':5.51,
'slope':5.51,
'tener':5.51,
'thong':5.51,
'account':5.5,
'adds':5.5,
'anglo':5.5,
'application':5.5,
'arm':5.5,
'atoms':5.5,
'austin':5.5,
'behavior':5.5,
'beyond':5.5,
'bloggers':5.5,
'bow':5.5,
'brief':5.5,
'buffalo':5.5,
'capacity':5.5,
'chairwoman':5.5,
'channel':5.5,
'charlotte':5.5,
'christine':5.5,
'clay':5.5,
'consumer':5.5,
'count':5.5,
'crews':5.5,
'david':5.5,
'democrats':5.5,
'doing':5.5,
'editions':5.5,
'effects':5.5,
'equivalent':5.5,
'eyed':5.5,
'faculty':5.5,
'feels':5.5,
'fellow':5.5,
'figure':5.5,
'finals':5.5,
'fm':5.5,
'footsteps':5.5,
'frequently':5.5,
'generation':5.5,
'genetic':5.5,
'glasses':5.5,
'halfway':5.5,
'handled':5.5,
'in':5.5,
'insure':5.5,
'investors':5.5,
'item':5.5,
'jack':5.5,
'jane':5.5,
'jones':5.5,
'leven':5.5,
'manifest':5.5,
'map':5.5,
'maria':5.5,
'melissa':5.5,
'minute':5.5,
'normally':5.5,
'pastor':5.5,
'patricia':5.5,
'pole':5.5,
'preface':5.5,
'prep':5.5,
'quiet':5.5,
'ran':5.5,
'rated':5.5,
'repertory':5.5,
'retailers':5.5,
'retain':5.5,
'rub':5.5,
'russia':5.5,
'settings':5.5,
'skin':5.5,
'sms':5.5,
'specifically':5.5,
'steven':5.5,
'stevie':5.5,
'texting':5.5,
'tie':5.5,
'transmission':5.5,
'unit':5.5,
'variation':5.5,
'vol':5.5,
'wanna':5.5,
'we\'re':5.5,
'wearing':5.5,
'westside':5.5,
'wild':5.5,
'womb':5.5,
'works':5.5,
'yankee':5.5,
'responsibilities':5.49,
'awaits':5.49,
'interface':5.49,
'mics':5.49,
'modified':5.49,
'remark':5.49,
'supervision':5.49,
'weave':5.49,
'flame':5.49,
'interpreted':5.49,
'11:00:00AM':5.48,
'acts':5.48,
'actual':5.48,
'adviser':5.48,
'advisers':5.48,
'ahhh':5.48,
'alabama':5.48,
'along':5.48,
'announced':5.48,
'approached':5.48,
'attempted':5.48,
'ballin':5.48,
'beta':5.48,
'bling':5.48,
'canon':5.48,
'cheap':5.48,
'church':5.48,
'cincinnati':5.48,
'column':5.48,
'compound':5.48,
'concept':5.48,
'consultant':5.48,
'convention':5.48,
'coupe':5.48,
'cumulative':5.48,
'demo':5.48,
'donald':5.48,
'elements':5.48,
'encountered':5.48,
'everytime':5.48,
'exception':5.48,
'export':5.48,
'extensive':5.48,
'external':5.48,
'fb':5.48,
'felt':5.48,
'guild':5.48,
'habit':5.48,
'he\'ll':5.48,
'here':5.48,
'highway':5.48,
'holmes':5.48,
'ikea':5.48,
'indicate':5.48,
'janet':5.48,
'joan':5.48,
'jump':5.48,
'kate':5.48,
'katherine':5.48,
'katie':5.48,
'katy':5.48,
'korean':5.48,
'laurie':5.48,
'led':5.48,
'lei':5.48,
'longest':5.48,
'luna':5.48,
'madison':5.48,
'mark':5.48,
'may':5.48,
'methods':5.48,
'mixed':5.48,
'motor':5.48,
'naval':5.48,
'nicole':5.48,
'nose':5.48,
'oklahoma':5.48,
'ole':5.48,
'operates':5.48,
'palm':5.48,
'particles':5.48,
'pepper':5.48,
'physics':5.48,
'picking':5.48,
'portion':5.48,
'post':5.48,
'powell':5.48,
'predicted':5.48,
'quoted':5.48,
'remote':5.48,
'requested':5.48,
'roller':5.48,
'route':5.48,
'run':5.48,
'sally':5.48,
'sell':5.48,
'server':5.48,
'sessions':5.48,
'shape':5.48,
'spread':5.48,
'square':5.48,
'stephanie':5.48,
'surfaces':5.48,
'surname':5.48,
'tampa':5.48,
'township':5.48,
'trek':5.48,
'tried':5.48,
'truck':5.48,
'tweet':5.48,
'user':5.48,
'users':5.48,
'utility':5.48,
'vamos':5.48,
'wells':5.48,
'ceiling':5.47,
'dwell':5.47,
'elaborate':5.47,
'grip':5.47,
'halls':5.47,
'loaded':5.47,
'metabolism':5.47,
'spinning':5.47,
'\#nowplaying':5.46,
'alex':5.46,
'appeared':5.46,
'arlington':5.46,
'blogger':5.46,
'bricks':5.46,
'cam':5.46,
'caption':5.46,
'carries':5.46,
'carroll':5.46,
'cavalry':5.46,
'challenged':5.46,
'chi':5.46,
'chronicle':5.46,
'coalition':5.46,
'colonies':5.46,
'competitive':5.46,
'conducted':5.46,
'consisted':5.46,
'contract':5.46,
'developers':5.46,
'diameter':5.46,
'directed':5.46,
'distributed':5.46,
'domestic':5.46,
'dozen':5.46,
'enough':5.46,
'equation':5.46,
'expectations':5.46,
'explain':5.46,
'followed':5.46,
'fox':5.46,
'further':5.46,
'gears':5.46,
'guts':5.46,
'helen':5.46,
'index':5.46,
'instructions':5.46,
'jeffrey':5.46,
'jerry':5.46,
'lafayette':5.46,
'laid':5.46,
'let\'s':5.46,
'linked':5.46,
'list':5.46,
'local':5.46,
'loop':5.46,
'manufacturers':5.46,
'math':5.46,
'matthew':5.46,
'meeting':5.46,
'megan':5.46,
'mornin':5.46,
'needed':5.46,
'object':5.46,
'organ':5.46,
'particularly':5.46,
'philly':5.46,
'process':5.46,
'projects':5.46,
'pulls':5.46,
'quote':5.46,
'rebecca':5.46,
'reform':5.46,
'religious':5.46,
'richmond':5.46,
'sandra':5.46,
'segment':5.46,
'sent':5.46,
'series':5.46,
'serve':5.46,
'sharon':5.46,
'shipping':5.46,
'shoulder':5.46,
'stacks':5.46,
'statements':5.46,
'surrounding':5.46,
'therapy':5.46,
'thy':5.46,
'twitter':5.46,
'uno':5.46,
'vincent':5.46,
'watched':5.46,
'wide':5.46,
'william':5.46,
'dome':5.46,
'filter':5.46,
'notions':5.46,
'unfold':5.46,
'administered':5.45,
'furthermore':5.45,
'situations':5.45,
'sociology':5.45,
'subsequent':5.45,
'sway':5.45,
'wrists':5.45,
'drawers':5.45,
'undoubtedly':5.45,
'2nite':5.44,
'amp':5.44,
'anita':5.44,
'area':5.44,
'arthur':5.44,
'assigned':5.44,
'aug':5.44,
'axis':5.44,
'battery':5.44,
'beside':5.44,
'bob':5.44,
'brown':5.44,
'calculation':5.44,
'carolina':5.44,
'carried':5.44,
'centres':5.44,
'chair':5.44,
'charter':5.44,
'columbia':5.44,
'company':5.44,
'consist':5.44,
'cope':5.44,
'counter':5.44,
'curtains':5.44,
'deck':5.44,
'den':5.44,
'doors':5.44,
'earl':5.44,
'editors':5.44,
'evelyn':5.44,
'fisher':5.44,
'flow':5.44,
'georgia':5.44,
'i\'d':5.44,
'imports':5.44,
'jay':5.44,
'joel':5.44,
'jordan':5.44,
'kong':5.44,
'lab':5.44,
'lateral':5.44,
'mass':5.44,
'meant':5.44,
'metal':5.44,
'mister':5.44,
'montana':5.44,
'moore':5.44,
'noche':5.44,
'nov':5.44,
'operating':5.44,
'overall':5.44,
'passes':5.44,
'passing':5.44,
'paul':5.44,
'phrase':5.44,
'possess':5.44,
'quantitative':5.44,
'recently':5.44,
'refers':5.44,
'represent':5.44,
'saw':5.44,
'search':5.44,
'sept':5.44,
'seventy':5.44,
'signal':5.44,
'solomon':5.44,
'stations':5.44,
'storage':5.44,
'street':5.44,
'subject':5.44,
'submit':5.44,
'surround':5.44,
'ten':5.44,
'tenants':5.44,
'thurs':5.44,
'tone':5.44,
'tongue':5.44,
'trunk':5.44,
'tweeted':5.44,
'versions':5.44,
'wagner':5.44,
'wax':5.44,
'wilson':5.44,
'worked':5.44,
'yen':5.44,
'zion':5.44,
'measurements':5.44,
'reactions':5.44,
'adjacent':5.43,
'bailar':5.43,
'kara':5.43,
'modes':5.43,
'proposition':5.43,
'remainder':5.43,
'steam':5.43,
'10:00:00AM':5.42,
'again':5.42,
'also':5.42,
'ashley':5.42,
'aye':5.42,
'background':5.42,
'bailey':5.42,
'barrel':5.42,
'bedford':5.42,
'booth':5.42,
'bowl':5.42,
'businessman':5.42,
'calls':5.42,
'came':5.42,
'carolyn':5.42,
'category':5.42,
'centre':5.42,
'chip':5.42,
'com':5.42,
'comprehensive':5.42,
'compromise':5.42,
'conductor':5.42,
'course':5.42,
'crow':5.42,
'dennis':5.42,
'derived':5.42,
'duration':5.42,
'enzyme':5.42,
'ever':5.42,
'financial':5.42,
'floors':5.42,
'frances':5.42,
'gene':5.42,
'going':5.42,
'gotten':5.42,
'he':5.42,
'himself':5.42,
'hockey':5.42,
'hopkins':5.42,
'initial':5.42,
'inner':5.42,
'instance':5.42,
'jeanne':5.42,
'jeremy':5.42,
'jr':5.42,
'julia':5.42,
'julie':5.42,
'listenin':5.42,
'livingston':5.42,
'memphis':5.42,
'mentioned':5.42,
'mercury':5.42,
'mini':5.42,
'monthly':5.42,
'nine':5.42,
'note':5.42,
'nowadays':5.42,
'om':5.42,
'oprah':5.42,
'pasa':5.42,
'penn':5.42,
'peter':5.42,
'point':5.42,
'polls':5.42,
'presentation':5.42,
'primarily':5.42,
'ranks':5.42,
'references':5.42,
'resulting':5.42,
'riley':5.42,
'rolled':5.42,
'roof':5.42,
'sam':5.42,
'sean':5.42,
'secretary':5.42,
'select':5.42,
'signals':5.42,
'snl':5.42,
'spencer':5.42,
'state\'s':5.42,
'subjects':5.42,
'tables':5.42,
'tell':5.42,
'terry':5.42,
'theory':5.42,
'tom':5.42,
'topic':5.42,
'toss':5.42,
'treasury\'s':5.42,
'tweets':5.42,
'yorkers':5.42,
'you\'ll':5.42,
'calculated':5.42,
'configuration':5.42,
'inhabitants':5.42,
'statute':5.42,
'interlude':5.41,
'clerk':5.41,
'constitutes':5.41,
'cylinder':5.41,
'knocks':5.41,
'ratio':5.41,
'tissue':5.41,
'variables':5.41,
'vector':5.41,
'vols':5.41,
'whassup':5.41,
'width':5.41,
'absolute':5.4,
'ah':5.4,
'alison':5.4,
'anne':5.4,
'arabia':5.4,
'arkansas':5.4,
'boldface':5.4,
'cast':5.4,
'chamber':5.4,
'china':5.4,
'claimed':5.4,
'conquest':5.4,
'consecutive':5.4,
'daily':5.4,
'dana':5.4,
'definitions':5.4,
'distribution':5.4,
'dna':5.4,
'document':5.4,
'each':5.4,
'earlier':5.4,
'embassy':5.4,
'esp':5.4,
'estimate':5.4,
'fam':5.4,
'figured':5.4,
'fuel':5.4,
'gulf':5.4,
'headquarters':5.4,
'healthcare':5.4,
'hee':5.4,
'holds':5.4,
'inside':5.4,
'intent':5.4,
'jan':5.4,
'johnson':5.4,
'joseph':5.4,
'lah':5.4,
'lawrence':5.4,
'lick':5.4,
'lou':5.4,
'lung':5.4,
'main':5.4,
'malcolm':5.4,
'margaret':5.4,
'matter':5.4,
'mexican':5.4,
'ministers':5.4,
'mixtape':5.4,
'nancy':5.4,
'oakland':5.4,
'obedience':5.4,
'one':5.4,
'paula':5.4,
'picks':5.4,
'processes':5.4,
'putting':5.4,
'ranging':5.4,
'reminds':5.4,
'reorganization':5.4,
'represents':5.4,
'rien':5.4,
'riverdale':5.4,
'sarah':5.4,
'seen':5.4,
'statistical':5.4,
'stayed':5.4,
'stomach':5.4,
'string':5.4,
'sushi':5.4,
'tap':5.4,
'testament':5.4,
'thee':5.4,
'they\'ll':5.4,
'transfer':5.4,
'two':5.4,
'xxx':5.4,
'origins':5.4,
'actin':5.39,
'cielo':5.39,
'defence':5.39,
'dub':5.39,
'empirical':5.39,
'explicitly':5.39,
'jive':5.39,
'reprinted':5.39,
'spins':5.39,
'\#letsbehonest':5.38,
'ahh':5.38,
'am':5.38,
'announcement':5.38,
'arms':5.38,
'baltimore':5.38,
'basis':5.38,
'butler':5.38,
'camino':5.38,
'carved':5.38,
'clark':5.38,
'coefficient':5.38,
'comp':5.38,
'control':5.38,
'copy':5.38,
'core':5.38,
'curriculum':5.38,
'dec':5.38,
'deemed':5.38,
'detective':5.38,
'different':5.38,
'doctrine':5.38,
'door':5.38,
'files':5.38,
'following':5.38,
'grams':5.38,
'hp':5.38,
'hudson':5.38,
'i\'ll':5.38,
'industry':5.38,
'items':5.38,
'jamie':5.38,
'jesse':5.38,
'latin':5.38,
'let':5.38,
'lite':5.38,
'lookin':5.38,
'machine':5.38,
'manner':5.38,
'mit':5.38,
'nelson':5.38,
'nitrogen':5.38,
'nucleus':5.38,
'official':5.38,
'overtime':5.38,
'personnel':5.38,
'pitching':5.38,
'projected':5.38,
'province':5.38,
'rope':5.38,
'said':5.38,
'second':5.38,
'securities':5.38,
'send':5.38,
'sensitivity':5.38,
'shall':5.38,
'soldiers':5.38,
'standards':5.38,
'statistically':5.38,
'steps':5.38,
'steve':5.38,
'tan':5.38,
'technical':5.38,
'text':5.38,
'thread':5.38,
'tierra':5.38,
'timbaland':5.38,
'tricks':5.38,
'tunnel':5.38,
'twelve':5.38,
'wants':5.38,
'wednesday':5.38,
'whew':5.38,
'wordpress':5.38,
'would':5.38,
'yards':5.38,
'year':5.38,
'yesterday\'s':5.38,
'comparison':5.37,
'ella':5.37,
'givin':5.37,
'hem':5.37,
'parish':5.37,
'silently':5.37,
'sits':5.37,
'whispering':5.37,
'illusions':5.36,
'asked':5.36,
'bee':5.36,
'briefing':5.36,
'britney':5.36,
'capitol':5.36,
'caps':5.36,
'claire':5.36,
'clip':5.36,
'clips':5.36,
'colonial':5.36,
'constitute':5.36,
'contracts':5.36,
'covering':5.36,
'customs':5.36,
'dash':5.36,
'delta':5.36,
'dishes':5.36,
'economic':5.36,
'edit':5.36,
'eileen':5.36,
'establishment':5.36,
'finger':5.36,
'georgetown':5.36,
'gloria':5.36,
'greene':5.36,
'gud':5.36,
'hall':5.36,
'hay':5.36,
'heard':5.36,
'jimmy':5.36,
'linear':5.36,
'liquor':5.36,
'listing':5.36,
'lmaoo':5.36,
'mason':5.36,
'miller':5.36,
'milwaukee':5.36,
'monde':5.36,
'mouse':5.36,
'moving':5.36,
'msn':5.36,
'nba':5.36,
'nude':5.36,
'nuestro':5.36,
'overview':5.36,
'oz':5.36,
'pattern':5.36,
'port':5.36,
'possession':5.36,
'press':5.36,
'principal':5.36,
'pronto':5.36,
'quiero':5.36,
'rabbi':5.36,
'reposing':5.36,
'russell':5.36,
'same':5.36,
'si':5.36,
'sim':5.36,
'sit':5.36,
'sold':5.36,
'sounded':5.36,
'staff':5.36,
'standing':5.36,
'stocks':5.36,
'structure':5.36,
'stuart':5.36,
'subsequently':5.36,
'sympathy':5.36,
'taiwan':5.36,
'target':5.36,
'teeth':5.36,
'trenton':5.36,
'tres':5.36,
'trucks':5.36,
'tuesdays':5.36,
'tummy':5.36,
'tweeting':5.36,
'verb':5.36,
'vest':5.36,
'wakes':5.36,
'walter':5.36,
'we\'d':5.36,
'westchester':5.36,
'wi':5.36,
'wright':5.36,
'you\'d':5.36,
'yugoslavia':5.36,
'emperor':5.35,
'thesis':5.35,
'chevy':5.35,
'della':5.35,
'finite':5.35,
'loot':5.35,
'motive':5.35,
'define':5.34,
'\#news':5.34,
'adams':5.34,
'advised':5.34,
'andrea':5.34,
'anonymity':5.34,
'anthony':5.34,
'anything':5.34,
'anywhere':5.34,
'arc':5.34,
'areas':5.34,
'ay':5.34,
'backs':5.34,
'bros':5.34,
'campaign':5.34,
'candidate':5.34,
'carter':5.34,
'checked':5.34,
'classified':5.34,
'colts':5.34,
'comparable':5.34,
'crossing':5.34,
'currently':5.34,
'denver':5.34,
'ding':5.34,
'doctor':5.34,
'drank':5.34,
'editorial':5.34,
'flick':5.34,
'fur':5.34,
'gear':5.34,
'geek':5.34,
'german':5.34,
'giant':5.34,
'giants':5.34,
'hampton':5.34,
'harold':5.34,
'ily':5.34,
'iron':5.34,
'karen':5.34,
'korea':5.34,
'liebe':5.34,
'lillian':5.34,
'log':5.34,
'manufacturer':5.34,
'massive':5.34,
'maureen':5.34,
'mc':5.34,
'middle':5.34,
'moderate':5.34,
'nog':5.34,
'noticed':5.34,
'occurred':5.34,
'ohhhh':5.34,
'orleans':5.34,
'ounce':5.34,
'pack':5.34,
'percent':5.34,
'phil':5.34,
'physician':5.34,
'rate':5.34,
'regional':5.34,
'request':5.34,
'revolution':5.34,
'rihanna':5.34,
'roosevelt':5.34,
'session':5.34,
'six':5.34,
'sullivan':5.34,
'surgeon':5.34,
'susan':5.34,
'sylvia':5.34,
'then':5.34,
'they\'re':5.34,
'thinkin':5.34,
'tmrw':5.34,
'transmitted':5.34,
'tube':5.34,
'typing':5.34,
'upon':5.34,
'walmart':5.34,
'whitman':5.34,
'whitney':5.34,
'wider':5.34,
'within':5.34,
'yo':5.34,
'blink':5.33,
'noches':5.33,
'threshold':5.33,
'bringin':5.33,
'tutti':5.33,
'verdad':5.33,
'abraham':5.32,
'alter':5.32,
'andre':5.32,
'beep':5.32,
'bench':5.32,
'bucket':5.32,
'calif':5.32,
'chin':5.32,
'commerce':5.32,
'compare':5.32,
'cover':5.32,
'currents':5.32,
'deepest':5.32,
'dorothy':5.32,
'editorials':5.32,
'emeritus':5.32,
'endless':5.32,
'estimates':5.32,
'evaluation':5.32,
'firm':5.32,
'francis':5.32,
'general':5.32,
'gregory':5.32,
'hoffman':5.32,
'hour':5.32,
'identified':5.32,
'indicates':5.32,
'jacqueline':5.32,
'joshua':5.32,
'kristen':5.32,
'label':5.32,
'literally':5.32,
'louise':5.32,
'mas':5.32,
'measure':5.32,
'medium':5.32,
'mention':5.32,
'michigan':5.32,
'names':5.32,
'nassau':5.32,
'negotiations':5.32,
'nineteenth':5.32,
'pa':5.32,
'palmer':5.32,
'partly':5.32,
'peeps':5.32,
'plz':5.32,
'posts':5.32,
'presumably':5.32,
'quite':5.32,
'rebounds':5.32,
'remind':5.32,
'reserve':5.32,
'review':5.32,
'rite':5.32,
'rye':5.32,
'selena':5.32,
'site':5.32,
'skip':5.32,
'someone\'s':5.32,
'speech':5.32,
'step':5.32,
'subway':5.32,
'surface':5.32,
'table':5.32,
'taking':5.32,
'tells':5.32,
'ticket':5.32,
'ting':5.32,
'tribes':5.32,
'turning':5.32,
'two-year':5.32,
'types':5.32,
'urself':5.32,
'vancouver':5.32,
'varies':5.32,
'yield':5.32,
'zone':5.32,
'preceding':5.31,
'affecting':5.31,
'alles':5.31,
'bop':5.31,
'consume':5.31,
'discipline':5.31,
'disposition':5.31,
'gypsy':5.31,
'heed':5.31,
'ion':5.31,
'shelf':5.31,
'stash':5.31,
'varying':5.31,
'vivir':5.31,
'\#fact':5.3,
'*estimated':5.3,
'actually':5.3,
'aire':5.3,
'ancora':5.3,
'atlanta':5.3,
'barnes':5.3,
'bat':5.3,
'biblical':5.3,
'bishop':5.3,
'bonnie':5.3,
'boundary':5.3,
'brad':5.3,
'brian':5.3,
'bring':5.3,
'calendar':5.3,
'carnegie':5.3,
'catholic':5.3,
'center':5.3,
'chairman':5.3,
'chrysler':5.3,
'circuits':5.3,
'colin':5.3,
'constantly':5.3,
'cornell':5.3,
'correspondent':5.3,
'counts':5.3,
'county':5.3,
'creature':5.3,
'dave':5.3,
'drake':5.3,
'editing':5.3,
'eight':5.3,
'elevator':5.3,
'glen':5.3,
'irene':5.3,
'jk':5.3,
'junior':5.3,
'km/h':5.3,
'lee':5.3,
'lesson':5.3,
'levels':5.3,
'lexington':5.3,
'md':5.3,
'medicare':5.3,
'mic':5.3,
'mike':5.3,
'miles':5.3,
'miriam':5.3,
'mph':5.3,
'murphy':5.3,
'neck':5.3,
'nova':5.3,
'number':5.3,
'one\'s':5.3,
'patch':5.3,
'pay':5.3,
'peggy':5.3,
'placed':5.3,
'pounds':5.3,
'president\'s':5.3,
'profile':5.3,
'quiz':5.3,
'rail':5.3,
'randy':5.3,
'reviews':5.3,
'ritual':5.3,
'robert':5.3,
'roberts':5.3,
'roger':5.3,
'samuel':5.3,
'scales':5.3,
'sec':5.3,
'seth':5.3,
'seymour':5.3,
'silly':5.3,
'singular':5.3,
'somebody':5.3,
'someone':5.3,
'spray':5.3,
'suit':5.3,
'system':5.3,
'tactics':5.3,
'telling':5.3,
'tend':5.3,
'third':5.3,
'transition':5.3,
'trump':5.3,
'via':5.3,
'vids':5.3,
'visitation':5.3,
'washing':5.3,
'ways':5.3,
'weekly':5.3,
'windy':5.3,
'year\'s':5.3,
'you\'re':5.3,
'hitherto':5.29,
'incorporated':5.29,
'prescribed':5.29,
'assumption':5.29,
'cama':5.29,
'clergy':5.29,
'heel':5.29,
'playas':5.29,
'rodeo':5.29,
'shakin':5.29,
'transferred':5.29,
'2-bath':5.28,
'alert':5.28,
'already':5.28,
'annual':5.28,
'assessment':5.28,
'beef':5.28,
'behalf':5.28,
'borough':5.28,
'code':5.28,
'comin':5.28,
'congregation':5.28,
'copies':5.28,
'craig':5.28,
'cuore':5.28,
'dean':5.28,
'declared':5.28,
'defended':5.28,
'diplomat':5.28,
'dot':5.28,
'empire':5.28,
'estar':5.28,
'esther':5.28,
'etsy':5.28,
'eventually':5.28,
'extract':5.28,
'feelin':5.28,
'follower':5.28,
'form':5.28,
'gates':5.28,
'handling':5.28,
'hannah':5.28,
'happen':5.28,
'harriet':5.28,
'harvey':5.28,
'held':5.28,
'holla':5.28,
'inches':5.28,
'institute':5.28,
'interviewed':5.28,
'jacobs':5.28,
'james':5.28,
'l':5.28,
'length':5.28,
'mag':5.28,
'martha':5.28,
'meanwhile':5.28,
'minutes':5.28,
'mode':5.28,
'morton':5.28,
'nonprofit':5.28,
'ora':5.28,
'packed':5.28,
'packing':5.28,
'pandora':5.28,
'parameter':5.28,
'posse':5.28,
'preacher':5.28,
'representatives':5.28,
'rewind':5.28,
'says':5.28,
'scheduled':5.28,
'secrets':5.28,
'section':5.28,
'serum':5.28,
'sheila':5.28,
'someday':5.28,
'sometimes':5.28,
'somewhere':5.28,
'sort':5.28,
'stands':5.28,
'state':5.28,
'stats':5.28,
'stays':5.28,
'temporal':5.28,
'that\'s':5.28,
'theodore':5.28,
'theology':5.28,
'tracks':5.28,
'tyler':5.28,
'unions':5.28,
'version':5.28,
'wandering':5.28,
'years':5.28,
'york\'s':5.28,
'specified':5.28,
'leben':5.27,
'anyhow':5.27,
'bumpin':5.27,
'governed':5.27,
'holdin':5.27,
'implies':5.27,
'moet':5.27,
'quieres':5.27,
'revised':5.27,
'semi':5.27,
'africa':5.26,
'agency':5.26,
'asking':5.26,
'based':5.26,
'berlin':5.26,
'bid':5.26,
'boyz':5.26,
'carrier':5.26,
'carrying':5.26,
'clinton\'s':5.26,
'commander':5.26,
'companies':5.26,
'conan':5.26,
'conference':5.26,
'converted':5.26,
'counsel':5.26,
'cynthia':5.26,
'dale':5.26,
'department':5.26,
'desk':5.26,
'detected':5.26,
'dias':5.26,
'digging':5.26,
'directions':5.26,
'doris':5.26,
'dormir':5.26,
'dramatic':5.26,
'drove':5.26,
'edward':5.26,
'elliott':5.26,
'facility':5.26,
'facing':5.26,
'fare':5.26,
'floyd':5.26,
'foto':5.26,
'frog':5.26,
'george':5.26,
'glenn':5.26,
'goes':5.26,
'ground':5.26,
'guidelines':5.26,
'hispanic':5.26,
'hmmmm':5.26,
'houston':5.26,
'jake':5.26,
'jim':5.26,
'justin':5.26,
'kay':5.26,
'lines':5.26,
'mainly':5.26,
'marcus':5.26,
'marshall':5.26,
'martin':5.26,
'matt':5.26,
'mayor':5.26,
'mr':5.26,
'mundo':5.26,
'nc':5.26,
'nearly':5.26,
'nina':5.26,
'papers':5.26,
'perry':5.26,
'philip':5.26,
'piece':5.26,
'plot':5.26,
'pouring':5.26,
'preliminary':5.26,
'print':5.26,
'prudential':5.26,
'qual':5.26,
'reasons':5.26,
'reed':5.26,
'register':5.26,
'richard':5.26,
'robinson':5.26,
'roslyn':5.26,
'semester':5.26,
'sergeant':5.26,
'shift':5.26,
'shirley':5.26,
'siempre':5.26,
'sir':5.26,
'spot':5.26,
'stated':5.26,
'statement':5.26,
'tool':5.26,
'uniform':5.26,
'units':5.26,
'walls':5.26,
'week\'s':5.26,
'lend':5.26,
'hangin':5.25,
'borne':5.24,
'differentiation':5.24,
'intermediate':5.24,
'motives':5.24,
'\#followfriday':5.24,
'a':5.24,
'abc':5.24,
'asks':5.24,
'beijing':5.24,
'bet':5.24,
'boeing':5.24,
'chart':5.24,
'depend':5.24,
'diplomats':5.24,
'doin':5.24,
'donna':5.24,
'douglas':5.24,
'drivers':5.24,
'edited':5.24,
'elaine':5.24,
'ellis':5.24,
'encounter':5.24,
'evans':5.24,
'faced':5.24,
'fifth':5.24,
'fin':5.24,
'five':5.24,
'franklin':5.24,
'garage':5.24,
'generally':5.24,
'goin':5.24,
'harry':5.24,
'industries':5.24,
'insurance':5.24,
'iowa':5.24,
'irving':5.24,
'jajaja':5.24,
'kirk':5.24,
'lieutenant':5.24,
'longtime':5.24,
'matters':5.24,
'mid':5.24,
'minnesota':5.24,
'morgan':5.24,
'namely':5.24,
'nathan':5.24,
'oliver':5.24,
'parliamentary':5.24,
'partially':5.24,
'parts':5.24,
'persian':5.24,
'pon':5.24,
'poppin':5.24,
'publicly':5.24,
'returns':5.24,
'ringing':5.24,
'rookie':5.24,
'salomon':5.24,
'sat':5.24,
'seem':5.24,
'sf':5.24,
'should':5.24,
'since':5.24,
'socialist':5.24,
'sorts':5.24,
'spending':5.24,
'stanley':5.24,
'substances':5.24,
'there\'s':5.24,
'ties':5.24,
'ton':5.24,
'toujours':5.24,
'turned':5.24,
'txt':5.24,
'vessels':5.24,
'veux':5.24,
'way':5.24,
'wee':5.24,
'woah':5.24,
'work':5.24,
'fraction':5.23,
'depths':5.22,
'destino':5.22,
'nelly':5.22,
'rug':5.22,
'shed':5.22,
'18th':5.22,
'adjustment':5.22,
'afterward':5.22,
'ali':5.22,
'and':5.22,
'anderson':5.22,
'andrew':5.22,
'any':5.22,
'artery':5.22,
'as':5.22,
'baila':5.22,
'barbara':5.22,
'bernstein':5.22,
'bio':5.22,
'bits':5.22,
'briefs':5.22,
'cause':5.22,
'charles':5.22,
'chris':5.22,
'como':5.22,
'counties':5.22,
'counting':5.22,
'dc':5.22,
'defend':5.22,
'defending':5.22,
'dems':5.22,
'dexter':5.22,
'does':5.22,
'drama':5.22,
'excess':5.22,
'file':5.22,
'for':5.22,
'fordham':5.22,
'hartford':5.22,
'hours':5.22,
'immigrants':5.22,
'joe':5.22,
'kim':5.22,
'knicks':5.22,
'lambert':5.22,
'lane':5.22,
'lcd':5.22,
'lg':5.22,
'lois':5.22,
'mano':5.22,
'mia':5.22,
'mill':5.22,
'mondo':5.22,
'motors':5.22,
'nets':5.22,
'northern':5.22,
'officer':5.22,
'ohio':5.22,
'order':5.22,
'others':5.22,
'palabras':5.22,
'psychological':5.22,
'pump':5.22,
'real-estate':5.22,
'ridge':5.22,
'seems':5.22,
'sentence':5.22,
'suffolk':5.22,
'swallow':5.22,
'systems':5.22,
'tal':5.22,
'ted':5.22,
'thru':5.22,
'till':5.22,
'tim':5.22,
'tissues':5.22,
'too':5.22,
'trance':5.22,
'trick':5.22,
'typical':5.22,
'undertaken':5.22,
'usual':5.22,
'veins':5.22,
'whoa':5.22,
'wrist':5.22,
'ya':5.22,
'yankees':5.22,
'bibliography':5.21,
'masses':5.21,
'mente':5.21,
'norms':5.21,
'twist':5.21,
'criterion':5.2,
'eastside':5.2,
'mio':5.2,
'node':5.2,
'nombre':5.2,
'repeats':5.2,
'thereafter':5.2,
'agency\'s':5.2,
'alcohol':5.2,
'another':5.2,
'app':5.2,
'ask':5.2,
'berkeley':5.2,
'bonds':5.2,
'briefly':5.2,
'cab':5.2,
'carry':5.2,
'checking':5.2,
'continued':5.2,
'cunningham':5.2,
'dallas':5.2,
'dare':5.2,
'decade':5.2,
'dia':5.2,
'donde':5.2,
'during':5.2,
'economist':5.2,
'four':5.2,
'goldberg':5.2,
'gurl':5.2,
'happens':5.2,
'hebrew':5.2,
'immigration':5.2,
'inch':5.2,
'initially':5.2,
'intended':5.2,
'internal':5.2,
'itself':5.2,
'jaw':5.2,
'jeff':5.2,
'jersey':5.2,
'jetzt':5.2,
'john\'s':5.2,
'journalists':5.2,
'kevin':5.2,
'klein':5.2,
'knocking':5.2,
'lightning':5.2,
'lil':5.2,
'linger':5.2,
'loads':5.2,
'lobby':5.2,
'marketing':5.2,
'maurice':5.2,
'mayor\'s':5.2,
'medieval':5.2,
'mejor':5.2,
'moreover':5.2,
'necessity':5.2,
'negotiating':5.2,
'objects':5.2,
'pattinson':5.2,
'peel':5.2,
'percentage':5.2,
'physicians':5.2,
'pitcher':5.2,
'poco':5.2,
'retiring':5.2,
'return':5.2,
'retweeting':5.2,
'rick':5.2,
'rochester':5.2,
'rodriguez':5.2,
'rosen':5.2,
'russians':5.2,
'rutgers':5.2,
'secondary':5.2,
'sections':5.2,
'shes':5.2,
'slang':5.2,
'snap':5.2,
'tape':5.2,
'tighter':5.2,
'tires':5.2,
'turn':5.2,
'turns':5.2,
'van':5.2,
'viento':5.2,
'vuelve':5.2,
'warner':5.2,
'williams':5.2,
'yi':5.2,
'lotta':5.19,
'amar':5.19,
'dogg':5.19,
'dominant':5.19,
'retained':5.19,
'searched':5.19,
'turnin':5.19,
'kickin':5.18,
'ph':5.18,
'squad':5.18,
'tasks':5.18,
'duro':5.18,
'advocate':5.18,
'ahora':5.18,
'allan':5.18,
'back':5.18,
'barney':5.18,
'barry':5.18,
'basement':5.18,
'blowing':5.18,
'boards':5.18,
'bones':5.18,
'brick':5.18,
'candidates':5.18,
'cape':5.18,
'cha':5.18,
'chancellor':5.18,
'chap':5.18,
'china\'s':5.18,
'claim':5.18,
'classification':5.18,
'closet':5.18,
'cnn':5.18,
'collar':5.18,
'context':5.18,
'crawling':5.18,
'deborah':5.18,
'defense':5.18,
'democrat':5.18,
'election':5.18,
'etc':5.18,
'existing':5.18,
'from':5.18,
'gate':5.18,
'governor\'s':5.18,
'hardcore':5.18,
'has':5.18,
'hasta':5.18,
'horn':5.18,
'imperial':5.18,
'is':5.18,
'jacob':5.18,
'joint':5.18,
'jonathan':5.18,
'judith':5.18,
'kita':5.18,
'knees':5.18,
'legal':5.18,
'leonard':5.18,
'leslie':5.18,
'letting':5.18,
'lloyd':5.18,
'longer':5.18,
'lynn':5.18,
'minister':5.18,
'mon':5.18,
'monitor':5.18,
'month':5.18,
'mt':5.18,
'muy':5.18,
'ninth':5.18,
'notion':5.18,
'o\'connor':5.18,
'ore':5.18,
'pac':5.18,
'penis':5.18,
'pete':5.18,
'phyllis':5.18,
'plug':5.18,
'pour':5.18,
'public':5.18,
'ra':5.18,
'render':5.18,
'reporters':5.18,
'retreat':5.18,
'returned':5.18,
'reuters':5.18,
'ritmo':5.18,
'roar':5.18,
'sera':5.18,
'shaw':5.18,
'simon':5.18,
'slick':5.18,
'sox':5.18,
'stepped':5.18,
'stuffed':5.18,
'take':5.18,
'urge':5.18,
'woh':5.18,
'yah':5.18,
'fuse':5.17,
'capitalism':5.16,
'doet':5.16,
'examine':5.16,
'laced':5.16,
'lado':5.16,
'spine':5.16,
'zeit':5.16,
'census':5.16,
'\#tinychat':5.16,
'14th':5.16,
'81st':5.16,
'about':5.16,
'after-tax':5.16,
'apartments':5.16,
'are':5.16,
'ballot':5.16,
'barometer':5.16,
'basic':5.16,
'basin':5.16,
'betty':5.16,
'chain':5.16,
'cooper':5.16,
'cuomo':5.16,
'cyrus':5.16,
'depot':5.16,
'diane':5.16,
'diddy':5.16,
'dios':5.16,
'dos':5.16,
'downstairs':5.16,
'ds':5.16,
'ed':5.16,
'effect':5.16,
'ellen':5.16,
'feb':5.16,
'floor':5.16,
'fuego':5.16,
'gordon':5.16,
'greg':5.16,
'hari':5.16,
'hype':5.16,
'lang':5.16,
'leon':5.16,
'locker':5.16,
'lt':5.16,
'mil':5.16,
'mira':5.16,
'months':5.16,
'murray':5.16,
'nfl':5.16,
'notice':5.16,
'occur':5.16,
'ones':5.16,
'permission':5.16,
'platform':5.16,
'pointing':5.16,
'population':5.16,
'prevent':5.16,
'prolonged':5.16,
'react':5.16,
'remaining':5.16,
'reporter':5.16,
'rosenberg':5.16,
'sabes':5.16,
'she\'ll':5.16,
'staten':5.16,
'station':5.16,
'stein':5.16,
'such':5.16,
'suga':5.16,
'sweep':5.16,
'tendency':5.16,
'tested':5.16,
'their':5.16,
'thermal':5.16,
'troops':5.16,
'turner':5.16,
'utah':5.16,
'verizon':5.16,
'viene':5.16,
'vou':5.16,
'wears':5.16,
'whereby':5.16,
'ions':5.15,
'ing':5.15,
'posterior':5.15,
'anterior':5.14,
'bearing':5.14,
'complexity':5.14,
'copyright':5.14,
'haffi':5.14,
'lui':5.14,
'melting':5.14,
'10th':5.14,
'02:00:00PM':5.14,
'a1':5.14,
'adjusted':5.14,
'ann':5.14,
'antonio':5.14,
'aw':5.14,
'baller':5.14,
'ben':5.14,
'besides':5.14,
'bruce':5.14,
'calle':5.14,
'calor':5.14,
'cohen':5.14,
'conduct':5.14,
'cosa':5.14,
'district':5.14,
'eddie':5.14,
'endlessly':5.14,
'englewood':5.14,
'estoy':5.14,
'factors':5.14,
'farther':5.14,
'firms':5.14,
'fyi':5.14,
'gail':5.14,
'garcia':5.14,
'gente':5.14,
'governor':5.14,
'greenberg':5.14,
'harrison':5.14,
'havin':5.14,
'henry':5.14,
'hmmm':5.14,
'hypnotized':5.14,
'israelis':5.14,
'it\'ll':5.14,
'keith':5.14,
'knw':5.14,
'larry':5.14,
'laying':5.14,
'lesbian':5.14,
'louis':5.14,
'lovato':5.14,
'mets':5.14,
'mitchell':5.14,
'mu':5.14,
'onto':5.14,
'operated':5.14,
'pad':5.14,
'pittsburgh':5.14,
'poi':5.14,
'pre':5.14,
'puerto':5.14,
'regardless':5.14,
'region':5.14,
'rendered':5.14,
'repeat':5.14,
'retired':5.14,
'roberta':5.14,
'roy':5.14,
'seemed':5.14,
'shake':5.14,
'silence':5.14,
'somehow':5.14,
'soooo':5.14,
'stem':5.14,
'still':5.14,
'subsidies':5.14,
'supposed':5.14,
'tak':5.14,
'thou':5.14,
'thus':5.14,
'toes':5.14,
'track':5.14,
'verte':5.14,
'volver':5.14,
'weil':5.14,
'wet':5.14,
'y\'all':5.14,
'yearning':5.14,
'jar':5.12,
'callin':5.12,
'hierarchy':5.12,
'latter':5.12,
'mirada':5.12,
'pum':5.12,
'territories':5.12,
'\#fb':5.12,
'1-bath':5.12,
'9th':5.12,
'#NAME?':5.12,
'anata':5.12,
'ankle':5.12,
'anyway':5.12,
'anyways':5.12,
'aww':5.12,
'backed':5.12,
'bare':5.12,
'bernard':5.12,
'boom':5.12,
'bulk':5.12,
'c\'mon':5.12,
'c-after':5.12,
'c/o':5.12,
'cell':5.12,
'collins':5.12,
'comer':5.12,
'committee':5.12,
'contained':5.12,
'cops':5.12,
'coro':5.12,
'creo':5.12,
'crush':5.12,
'debating':5.12,
'deja':5.12,
'del':5.12,
'digo':5.12,
'duke':5.12,
'eleanor':5.12,
'extreme':5.12,
'foster':5.12,
'here\'s':5.12,
'hillary':5.12,
'jah':5.12,
'jason':5.12,
'jerusalem':5.12,
'juga':5.12,
'jurisdiction':5.12,
'kalo':5.12,
'kansas':5.12,
'ken':5.12,
'meine':5.12,
'ncaa':5.12,
'nyt':5.12,
'office':5.12,
'pas':5.12,
'policies':5.12,
'rear':5.12,
'reported':5.12,
'reporting':5.12,
'retweet':5.12,
'rounds':5.12,
'sais':5.12,
'shadows':5.12,
'side':5.12,
'silent':5.12,
'single':5.12,
'sixth':5.12,
'soldier':5.12,
'stairs':5.12,
'tau':5.12,
'territory':5.12,
'testimony':5.12,
'tex':5.12,
'tumbling':5.12,
'ty':5.12,
'typically':5.12,
'viii':5.12,
'von':5.12,
'wander':5.12,
'while':5.12,
'willie':5.12,
'wire':5.12,
'xx':5.12,
'ye':5.12,
'torch':5.11,
'brotha':5.1,
'conmigo':5.1,
'edges':5.1,
'amino':5.1,
'pause':5.1,
'populations':5.1,
'sealed':5.1,
'ren':5.1,
'20th':5.1,
'4th':5.1,
'\@dealsplus':5.1,
'aaron':5.1,
'according':5.1,
'administrative':5.1,
'albert':5.1,
'alleen':5.1,
'allen':5.1,
'ave':5.1,
'average':5.1,
'bases':5.1,
'before':5.1,
'bellwether':5.1,
'betta':5.1,
'between':5.1,
'bryan':5.1,
'bus':5.1,
'butt':5.1,
'ca':5.1,
'careful':5.1,
'carlos':5.1,
'cells':5.1,
'ceo':5.1,
'circuit':5.1,
'cliff':5.1,
'commissioner':5.1,
'consumption':5.1,
'curtis':5.1,
'davis':5.1,
'dealt':5.1,
'differential':5.1,
'dr':5.1,
'either':5.1,
'et':5.1,
'extent':5.1,
'factor':5.1,
'ff':5.1,
'gary':5.1,
'goldstein':5.1,
'he\'s':5.1,
'hou':5.1,
'huntington':5.1,
'ian':5.1,
'investigate':5.1,
'jb':5.1,
'jon':5.1,
'koch':5.1,
'lists':5.1,
'managers':5.1,
'mans':5.1,
'marc':5.1,
'marks':5.1,
'mata':5.1,
'merger':5.1,
'mich':5.1,
'minneapolis':5.1,
'mother-in-law':5.1,
'nhl':5.1,
'nick':5.1,
'o\'brien':5.1,
'obey':5.1,
'omg':5.1,
'phat':5.1,
'pin':5.1,
'protestant':5.1,
'puts':5.1,
'quien':5.1,
'replacement':5.1,
'requests':5.1,
'rev':5.1,
'rogers':5.1,
'routine':5.1,
'sai':5.1,
'schwartz':5.1,
'smith':5.1,
'smokin':5.1,
'sobre':5.1,
'sont':5.1,
'stack':5.1,
'steelers':5.1,
'tablet':5.1,
'thats':5.1,
'there':5.1,
'these':5.1,
'toe':5.1,
'tooo':5.1,
'wayne':5.1,
'welfare':5.1,
'wolf':5.1,
'youre':5.1,
'youu':5.1,
'specimen':5.09,
'fait':5.08,
'hump':5.08,
'kg':5.08,
'trace':5.08,
'assuming':5.08,
'dmc':5.08,
'glue':5.08,
'neutral':5.08,
'provincial':5.08,
'questa':5.08,
'sempre':5.08,
'unto':5.08,
'whispered':5.08,
'\#ohjustlikeme':5.08,
'12th':5.08,
'admitted':5.08,
'after':5.08,
'agent':5.08,
'albany':5.08,
'alfred':5.08,
'amid':5.08,
'az':5.08,
'base':5.08,
'berger':5.08,
'booked':5.08,
'bronxville':5.08,
'budget':5.08,
'buss':5.08,
'c-included':5.08,
'canaan':5.08,
'ch':5.08,
'commissioners':5.08,
'copie':5.08,
'cord':5.08,
'countdown':5.08,
'department\'s':5.08,
'districts':5.08,
'doug':5.08,
'eric':5.08,
'eugene':5.08,
'factory':5.08,
'falta':5.08,
'february':5.08,
'fence':5.08,
'fui':5.08,
'gilbert':5.08,
'hart':5.08,
'hij':5.08,
'hun':5.08,
'indonesia':5.08,
'jo':5.08,
'john':5.08,
'juan':5.08,
'knee':5.08,
'laws':5.08,
'listed':5.08,
'manhasset':5.08,
'marion':5.08,
'martinez':5.08,
'medicaid':5.08,
'medicine':5.08,
'meyer':5.08,
'might':5.08,
'morgen':5.08,
'morris':5.08,
'nas':5.08,
'necessarily':5.08,
'norman':5.08,
'noted':5.08,
'occasionally':5.08,
'ohhh':5.08,
'ooo':5.08,
'para':5.08,
'pls':5.08,
'quiere':5.08,
'requirement':5.08,
'schemes':5.08,
'scott':5.08,
'seconds':5.08,
'sen':5.08,
'sets':5.08,
'settle':5.08,
'seventh':5.08,
'so':5.08,
'soledad':5.08,
'specimens':5.08,
'squeeze':5.08,
'steel':5.08,
'stevens':5.08,
'stewart':5.08,
'stick':5.08,
'suis':5.08,
'tag':5.08,
'tattoo':5.08,
'therefore':5.08,
'timothy':5.08,
'told':5.08,
'transit':5.08,
'underground':5.08,
'va':5.08,
'wanted':5.08,
'week':5.08,
'yr':5.08,
'z':5.08,
'tiempo':5.06,
'denn':5.06,
'km':5.06,
'komt':5.06,
'mientras':5.06,
'swallowed':5.06,
'todas':5.06,
'puede':5.06,
'17th':5.06,
'19th':5.06,
'atl':5.06,
'aus':5.06,
'banker':5.06,
'belt':5.06,
'bend':5.06,
'cali':5.06,
'changed':5.06,
'changes':5.06,
'chill':5.06,
'committees':5.06,
'convo':5.06,
'corporation':5.06,
'decision':5.06,
'diego':5.06,
'diffusion':5.06,
'eighth':5.06,
'federation':5.06,
'five-year':5.06,
'flatbush':5.06,
'follows':5.06,
'frederick':5.06,
'ganas':5.06,
'gb':5.06,
'grab':5.06,
'hughes':5.06,
'ihn':5.06,
'interview':5.06,
'interviews':5.06,
'jag':5.06,
'kenneth':5.06,
'kerry':5.06,
'kimi':5.06,
'lakers':5.06,
'las':5.06,
'm':5.06,
'marilyn':5.06,
'mj':5.06,
'monitoring':5.06,
'moscow':5.06,
'moved':5.06,
'mujer':5.06,
'nel':5.06,
'nyu':5.06,
'one-year':5.06,
'p':5.06,
'phase':5.06,
'poder':5.06,
'primitive':5.06,
'rattle':5.06,
'reign':5.06,
'restated':5.06,
'rod':5.06,
'ruth':5.06,
'screening':5.06,
'sherman':5.06,
'socks':5.06,
'sought':5.06,
'speculation':5.06,
'spokesman':5.06,
'stones':5.06,
'streak':5.06,
'swept':5.06,
'sympathies':5.06,
'td':5.06,
'this':5.06,
'thompson':5.06,
'thunder':5.06,
'tiene':5.06,
'tin':5.06,
'tryin':5.06,
'tx':5.06,
'voy':5.06,
'vuoi':5.06,
'weeks':5.06,
'who':5.06,
'whoever':5.06,
'wil':5.06,
'avec':5.05,
'consequently':5.04,
'dynamite':5.04,
'judgement':5.04,
'thereby':5.04,
'voz':5.04,
'wooden':5.04,
'conquer':5.04,
'loco':5.04,
'onset':5.04,
'\'the':5.04,
'7th':5.04,
'8th':5.04,
'ada':5.04,
'advertising':5.04,
'anders':5.04,
'aqui':5.04,
'aunque':5.04,
'b-included':5.04,
'bbm':5.04,
'been':5.04,
'biz':5.04,
'blair':5.04,
'blaze':5.04,
'bone':5.04,
'bosnian':5.04,
'break':5.04,
'bronx':5.04,
'cc':5.04,
'charged':5.04,
'cole':5.04,
'complex':5.04,
'dee':5.04,
'doc':5.04,
'edith':5.04,
'esta':5.04,
'fla':5.04,
'fleet':5.04,
'fred':5.04,
'fue':5.04,
'harlem':5.04,
'hav':5.04,
'herz':5.04,
'hmm':5.04,
'hombre':5.04,
'hoy':5.04,
'hrs':5.04,
'hut':5.04,
'into':5.04,
'j':5.04,
'llegar':5.04,
'mai':5.04,
'margin':5.04,
'measures':5.04,
'mei':5.04,
'mile':5.04,
'milton':5.04,
'mm':5.04,
'myers':5.04,
'nun':5.04,
'occupied':5.04,
'officially':5.04,
'other':5.04,
'ova':5.04,
'patient':5.04,
'presbyterian':5.04,
'ps':5.04,
'put':5.04,
'replace':5.04,
'robertson':5.04,
'rochelle':5.04,
'rss':5.04,
's':5.04,
'searching':5.04,
'sha':5.04,
'sides':5.04,
'sittin':5.04,
'size':5.04,
'somos':5.04,
'spend':5.04,
'standin':5.04,
'stare':5.04,
'statistics':5.04,
'stone':5.04,
'sub':5.04,
'takes':5.04,
'tanto':5.04,
'that\'ll':5.04,
'theyre':5.04,
'tweetdeck':5.04,
'undercover':5.04,
'ves':5.04,
'vos':5.04,
'w/':5.04,
'whilst':5.04,
'wipe':5.04,
'corners':5.02,
'luz':5.02,
'nena':5.02,
'adesso':5.02,
'alle':5.02,
'betcha':5.02,
'curtain':5.02,
'getcha':5.02,
'mash':5.02,
'preach':5.02,
'puedo':5.02,
'strings':5.02,
'tubes':5.02,
'veo':5.02,
'\#quote':5.02,
'6th':5.02,
'\@theellenshow':5.02,
'administrator':5.02,
'analysts':5.02,
'anyone':5.02,
'apologize':5.02,
'blacks':5.02,
'blvd':5.02,
'bu':5.02,
'burke':5.02,
'buses':5.02,
'c-net':5.02,
'carl':5.02,
'case':5.02,
'coleman':5.02,
'competing':5.02,
'controls':5.02,
'conventional':5.02,
'cuando':5.02,
'diagnostic':5.02,
'disclosure':5.02,
'documents':5.02,
'doy':5.02,
'draft':5.02,
'esse':5.02,
'estou':5.02,
'final':5.02,
'flat':5.02,
'flip':5.02,
'foot':5.02,
'gettin':5.02,
'gotta':5.02,
'happened':5.02,
'heeft':5.02,
'hot':5.02,
'ii':5.02,
'im':5.02,
'implied':5.02,
'industrial':5.02,
'israel\'s':5.02,
'it':5.02,
'ive':5.02,
'jerome':5.02,
'kaplan':5.02,
'kent':5.02,
'levine':5.02,
'lik':5.02,
'manager':5.02,
'marcia':5.02,
'mayer':5.02,
'meer':5.02,
'mi':5.02,
'mismo':5.02,
'nacht':5.02,
'necesito':5.02,
'necessary':5.02,
'newark':5.02,
'noch':5.02,
'ordinary':5.02,
'os':5.02,
'parameters':5.02,
'parking':5.02,
'pentagon':5.02,
'phantom':5.02,
'porque':5.02,
'pr':5.02,
'procedures':5.02,
'quarterly':5.02,
'random':5.02,
'rc':5.02,
'requiring':5.02,
'richardson':5.02,
'roth':5.02,
'sama':5.02,
'san':5.02,
'sc':5.02,
'schedule':5.02,
'setting':5.02,
'sleeve':5.02,
'slice':5.02,
'solitude':5.02,
'some':5.02,
'sou':5.02,
'stake':5.02,
'stamford':5.02,
'switch':5.02,
'teh':5.02,
'themselves':5.02,
'todd':5.02,
'tu':5.02,
'twittering':5.02,
'uni':5.02,
'veil':5.02,
'vous':5.02,
'vp':5.02,
'wana':5.02,
'westport':5.02,
'where':5.02,
'you\'ve':5.02,
'binding':5.01,
'\'cause':5,
'agents':5,
'alguien':5,
'assess':5,
'b-net':5,
'because':5,
'becker':5,
'boot':5,
'cada':5,
'carbon':5,
'coeur':5,
'commands':5,
'cosas':5,
'das':5,
'dated':5,
'diggin':5,
'executives':5,
'flipmode':5,
'forex':5,
'fourth':5,
'gosh':5,
'governing':5,
'herbert':5,
'hoo':5,
'hora':5,
'hush':5,
'id':5,
'indicated':5,
'jus':5,
'k':5,
'katz':5,
'kaufman':5,
'ku':5,
'la':5,
'listings':5,
'liver':5,
'luther':5,
'marjorie':5,
'marvin':5,
'mee':5,
'membrane':5,
'mir':5,
'neil':5,
'o\'neill':5,
'odds':5,
'offices':5,
'otra':5,
'par':5,
'paying':5,
'peculiar':5,
'pensar':5,
'per':5,
'plain':5,
'price':5,
'priced':5,
'pursued':5,
'quero':5,
'questions':5,
'reports':5,
'ridgewood':5,
'ron':5,
'ronald':5,
'sentir':5,
'shaggy':5,
'situation':5,
'some1':5,
'something':5,
'standard':5,
'stir':5,
'su':5,
'supervisor':5,
'thereof':5,
'throat':5,
'throw':5,
'til':5,
'todo':5,
'tp':5,
'tra':5,
'trop':5,
'tweeters':5,
'using':5,
'vid':5,
'voglio':5,
'wa':5,
'waan':5,
'warren':5,
'weighted':5,
'where\'s':5,
'whereas':5,
'who\'s':5,
'wig':5,
'zu':5,
'zum':5,
'stretched':4.99,
'forty':4.99,
'16th':4.98,
'57th':4.98,
'5th':4.98,
'\@addthis':4.98,
'\@idothat2':4.98,
'ai':4.98,
'bei':4.98,
'billy':4.98,
'bisa':4.98,
'btw':4.98,
'by':4.98,
'cloudy':4.98,
'compared':4.98,
'corp':4.98,
'cuba':4.98,
'd8':4.98,
'dartmouth':4.98,
'dei':4.98,
'denk':4.98,
'don':4.98,
'edge':4.98,
'edwards':4.98,
'een':4.98,
'ein':4.98,
'eine':4.98,
'episcopal':4.98,
'este':4.98,
'exec':4.98,
'hace':4.98,
'hits':4.98,
'hoes':4.98,
'howard':4.98,
'io':4.98,
'jadi':4.98,
'jeder':4.98,
'judicial':4.98,
'knot':4.98,
'line':4.98,
'mb':4.98,
'meu':4.98,
'mij':4.98,
'nails':4.98,
'needs':4.98,
'novo':4.98,
'nw':4.98,
'officers':4.98,
'ogni':4.98,
'ons':4.98,
'or':4.98,
'parliament':4.98,
'part':4.98,
'paso':4.98,
'piel':4.98,
'pork':4.98,
'pound':4.98,
'pres':4.98,
'question':4.98,
'rappers':4.98,
'rather':4.98,
'requirements':4.98,
'roundup':4.98,
'scarsdale':4.98,
'schneider':4.98,
'som':4.98,
'somethin':4.98,
'soooooo':4.98,
'stared':4.98,
'sumthin':4.98,
'syracuse':4.98,
'the':4.98,
'they\'d':4.98,
'they\'ve':4.98,
'three-year':4.98,
'throws':4.98,
'to':4.98,
'tudo':4.98,
'tuesday':4.98,
'wall':4.98,
'walsh':4.98,
'why':4.98,
'yesterday':4.98,
'clause':4.98,
'clit':4.98,
'hence':4.98,
'ml':4.98,
'babylon':4.98,
'pp':4.98,
'shi':4.97,
'\#tweetmyjobs':4.96,
'11th':4.96,
'3rd':4.96,
'accounts':4.96,
'aight':4.96,
'aku':4.96,
'alan':4.96,
'algo':4.96,
'ama':4.96,
'anybody':4.96,
'assumed':4.96,
'baru':4.96,
'bem':4.96,
'bin':4.96,
'borders':4.96,
'cbs':4.96,
'cf':4.96,
'cleveland':4.96,
'coal':4.96,
'colonel':4.96,
'comme':4.96,
'company\'s':4.96,
'dan':4.96,
'def':4.96,
'dried':4.96,
'drops':4.96,
'dug':4.96,
'eq':4.96,
'esto':4.96,
'fe':4.96,
'fone':4.96,
'frm':4.96,
'haar':4.96,
'hacer':4.96,
'hail':4.96,
'iii':4.96,
'incidence':4.96,
'investigators':4.96,
'ist':4.96,
'its':4.96,
'kann':4.96,
'keer':4.96,
'ko':4.96,
'larchmont':4.96,
'med':4.96,
'memorial':4.96,
'miley':4.96,
'montclair':4.96,
'napoleon':4.96,
'nuff':4.96,
'nxt':4.96,
'o':4.96,
'op-ed':4.96,
'ordered':4.96,
'outro':4.96,
'pelo':4.96,
'perhaps':4.96,
'pero':4.96,
'raton':4.96,
'ri':4.96,
'rita':4.96,
'schon':4.96,
'sein':4.96,
'semana':4.96,
'tengo':4.96,
'thick':4.96,
'tyson':4.96,
'ufc':4.96,
'ur':4.96,
'vi':4.96,
'when':4.96,
'wis':4.96,
'yall':4.96,
'yorker':4.96,
'anche':4.96,
'jour':4.96,
'mou':4.96,
'regiment':4.96,
'socialism':4.96,
'staan':4.96,
'temps':4.96,
'veces':4.96,
'\'s':4.94,
'[a1]':4.94,
'aber':4.94,
'acabo':4.94,
'across':4.94,
'agenda':4.94,
'aka':4.94,
'alibi':4.94,
'av':4.94,
'bam':4.94,
'banging':4.94,
'bein':4.94,
'bennett':4.94,
'boca':4.94,
'campbell':4.94,
'chase':4.94,
'close':4.94,
'co':4.94,
'contrast':4.94,
'council':4.94,
'cuerpo':4.94,
'debate':4.94,
'dinkins':4.94,
'dip':4.94,
'dm':4.94,
'ele':4.94,
'fazer':4.94,
'federal':4.94,
'foi':4.94,
'ftw':4.94,
'g':4.94,
'geez':4.94,
'gen':4.94,
'gw':4.94,
'he\'d':4.94,
'hooked':4.94,
'hs':4.94,
'inter':4.94,
'ix':4.94,
'iya':4.94,
'jaja':4.94,
'jou':4.94,
'makin':4.94,
'menos':4.94,
'mesmo':4.94,
'mins':4.94,
'mo':4.94,
'msg':4.94,
'naughty':4.94,
'needing':4.94,
'nie':4.94,
'nih':4.94,
'noi':4.94,
'noting':4.94,
'nou':4.94,
'of':4.94,
'ohne':4.94,
'once':4.94,
'popped':4.94,
'procedure':4.94,
'quel':4.94,
'rap':4.94,
'razor':4.94,
'reportedly':4.94,
'restructuring':4.94,
'row':4.94,
'rubin':4.94,
'sayin':4.94,
'sixty':4.94,
'stood':4.94,
'stormy':4.94,
'tackle':4.94,
'takin':4.94,
'temperature':4.94,
'term':4.94,
'termed':4.94,
'tes':4.94,
'testified':4.94,
'that':4.94,
'those':4.94,
'ti':4.94,
'tuo':4.94,
'una':4.94,
'until':4.94,
'vez':4.94,
'which':4.94,
'whom':4.94,
'antes':4.94,
'bajo':4.94,
'dmx':4.94,
'dripping':4.94,
'han':4.94,
'homeboy':4.94,
'inna':4.94,
'kon':4.94,
'questo':4.94,
'swell':4.94,
'xi':4.94,
'youll':4.94,
'doo':4.94,
'forma':4.94,
'marginal':4.94,
'nate':4.94,
'ojos':4.94,
'vie':4.94,
'zie':4.94,
'fold':4.94,
'ad':4.92,
'affect':4.92,
'agencies':4.92,
'ainda':4.92,
'alla':4.92,
'ar':4.92,
'armies':4.92,
'atleast':4.92,
'au':4.92,
'b6':4.92,
'bishops':4.92,
'bo':4.92,
'c\'est':4.92,
'cm':4.92,
'common':4.92,
'contigo':4.92,
'crave':4.92,
'da':4.92,
'decir':4.92,
'disclosed':4.92,
'dole':4.92,
'dom':4.92,
'economists':4.92,
'filing':4.92,
'fl':4.92,
'fr':4.92,
'gap':4.92,
'gerald':4.92,
'gorbachev':4.92,
'hast':4.92,
'homie':4.92,
'illinois':4.92,
'instead':4.92,
'interim':4.92,
'itu':4.92,
'judge':4.92,
'lebron':4.92,
'marked':4.92,
'mes':4.92,
'nato':4.92,
'ni':4.92,
'nye':4.92,
'only':4.92,
'pt':4.92,
'pushin':4.92,
'reais':4.92,
'representative':4.92,
'reviewer':4.92,
'ruled':4.92,
'sabe':4.92,
'shadow':4.92,
'strap':4.92,
'strip':4.92,
'sua':4.92,
'suppose':4.92,
'task':4.92,
'tenho':4.92,
'them':4.92,
'thomas':4.92,
'tix':4.92,
'todos':4.92,
'trans':4.92,
'twitpic':4.92,
'une':4.92,
'var':4.92,
'wha':4.92,
'whenever':4.92,
'whether':4.92,
'wordt':4.92,
'x':4.92,
'bist':4.92,
'dans':4.92,
'discourse':4.92,
'elke':4.92,
'ey':4.92,
'kau':4.92,
'peasant':4.92,
'pretending':4.92,
'puttin':4.92,
'siento':4.92,
'sola':4.92,
'spinal':4.92,
've':4.92,
'bizarre':4.92,
'weet':4.92,
'moi':4.91,
'\#in2010':4.9,
'#NAME?':4.9,
'al':4.9,
'andy':4.9,
'at':4.9,
'bis':4.9,
'bloomberg':4.9,
'border':4.9,
'brb':4.9,
'campaigns':4.9,
'charge':4.9,
'chu':4.9,
'dig':4.9,
'dukakis':4.9,
'edwin':4.9,
'ela':4.9,
'eres':4.9,
'esa':4.9,
'finance':4.9,
'fog':4.9,
'gt':4.9,
'heute':4.9,
'hpa':4.9,
'ie':4.9,
'jonas':4.9,
'kinda':4.9,
'koto':4.9,
'kt':4.9,
'law':4.9,
'levin':4.9,
'lu':4.9,
'maar':4.9,
'mack':4.9,
'melt':4.9,
'merrill':4.9,
'nee':4.9,
'nh':4.9,
'obliged':4.9,
'ook':4.9,
'pointed':4.9,
'pra':4.9,
'rental':4.9,
'sector':4.9,
'sleepy':4.9,
'sometime':4.9,
'soo':4.9,
'sticks':4.9,
'subsidiary':4.9,
'te':4.9,
'testing':4.9,
'tiny':4.9,
'trey':4.9,
'uma':4.9,
'ven':4.9,
'wer':4.9,
'xm':4.9,
'yuh':4.9,
'yup':4.9,
'zo':4.9,
'deine':4.9,
'dre':4.9,
'fi':4.9,
'kommt':4.9,
'macht':4.9,
'mig':4.9,
'sono':4.9,
'static':4.9,
'toi':4.9,
'vii':4.9,
'broads':4.9,
'moe':4.9,
'liefde':4.89,
'aiyyo':4.89,
'2nd':4.88,
'\@tommcfly':4.88,
'age':4.88,
'ago':4.88,
'allein':4.88,
'b4':4.88,
'billboard':4.88,
'black':4.88,
'bt':4.88,
'causes':4.88,
'chuck':4.88,
'cited':4.88,
'dass':4.88,
'dejes':4.88,
'dentro':4.88,
'der':4.88,
'digg':4.88,
'drifting':4.88,
'du':4.88,
'elderly':4.88,
'frost':4.88,
'guard':4.88,
'herman':4.88,
'het':4.88,
'ir':4.88,
'issued':4.88,
'it\'s':4.88,
'judges':4.88,
'junto':4.88,
'lectures':4.88,
'lieu':4.88,
'mais':4.88,
'memo':4.88,
'mg':4.88,
'mis':4.88,
'moody\'s':4.88,
'nevertheless':4.88,
'oil':4.88,
'operator':4.88,
'previous':4.88,
'prior':4.88,
're':4.88,
'regulators':4.88,
'remarks':4.88,
'rt':4.88,
'scale':4.88,
'se':4.88,
'sei':4.88,
'sgt':4.88,
'sie':4.88,
'siegel':4.88,
'sp':4.88,
'st':4.88,
'thang':4.88,
'toilet':4.88,
'tryna':4.88,
'ummm':4.88,
'veel':4.88,
'viel':4.88,
'went':4.88,
'whose':4.88,
'eg':4.88,
'igual':4.88,
'qui':4.88,
'substitute':4.88,
'nous':4.88,
'senza':4.88,
'\#random':4.86,
'\@donniewahlberg':4.86,
'\@ladygaga':4.86,
'accounting':4.86,
'ap':4.86,
'arnold':4.86,
'b-after':4.86,
'bb':4.86,
'bk':4.86,
'bush':4.86,
'bustin':4.86,
'cia':4.86,
'circumstances':4.86,
'cont':4.86,
'cud':4.86,
'diff':4.86,
'divisions':4.86,
'dus':4.86,
'echt':4.86,
'elsewhere':4.86,
'ft':4.86,
'gonna':4.86,
'haben':4.86,
'hath':4.86,
'hong':4.86,
'how\'s':4.86,
'hr':4.86,
'ira':4.86,
'ish':4.86,
'ja':4.86,
'jst':4.86,
'knock':4.86,
'le':4.86,
'mah':4.86,
'mask':4.86,
'mehr':4.86,
'mijn':4.86,
'missy':4.86,
'nadie':4.86,
'nonetheless':4.86,
'nu':4.86,
'og':4.86,
'oi':4.86,
'oo':4.86,
'ou':4.86,
'rd':4.86,
'recuerdo':4.86,
'ridin':4.86,
'sensitive':4.86,
'seo':4.86,
'shapiro':4.86,
'sm':4.86,
'smoked':4.86,
'sooooo':4.86,
'sr':4.86,
'staring':4.86,
'tellin':4.86,
'tempted':4.86,
'tract':4.86,
'voor':4.86,
'vor':4.86,
'vt':4.86,
'w':4.86,
'were':4.86,
'wie':4.86,
'ze':4.86,
'toch':4.86,
'askin':4.86,
'cinta':4.86,
'eminem':4.86,
'geld':4.86,
'ibid':4.86,
'isn':4.86,
'kane':4.86,
'labour':4.86,
'pienso':4.86,
'soc':4.86,
'miedo':4.85,
'tienes':4.85,
'explicit':4.85,
'\@taylorswift13':4.84,
'abt':4.84,
'administration':4.84,
'amo':4.84,
'an':4.84,
'awhile':4.84,
'b':4.84,
'b7':4.84,
'capitalist':4.84,
'crawl':4.84,
'd1':4.84,
'dam':4.84,
'dats':4.84,
'decades':4.84,
'dem':4.84,
'desde':4.84,
'di':4.84,
'en':4.84,
'est':4.84,
'filed':4.84,
'friedman':4.84,
'hab':4.84,
'harris':4.84,
'hm':4.84,
'hows':4.84,
'ht':4.84,
'investigating':4.84,
'invisible':4.84,
'jd':4.84,
'ka':4.84,
'ke':4.84,
'keine':4.84,
'lo':4.84,
'maintenance':4.84,
'mar':4.84,
'mining':4.84,
'mn':4.84,
'nao':4.84,
'need':4.84,
'nt':4.84,
'o_o':4.84,
'oh':4.84,
'proc':4.84,
'rates':4.84,
'reagan':4.84,
'sanders':4.84,
'secret':4.84,
'setup':4.84,
'sharp':4.84,
'sih':4.84,
'sta':4.84,
't':4.84,
'temp':4.84,
'tooth':4.84,
'vc':4.84,
'vernon':4.84,
'ward':4.84,
'duele':4.84,
'horns':4.84,
'inevitably':4.84,
'jeg':4.84,
'kneel':4.84,
'partial':4.84,
'puedes':4.84,
'throwin':4.84,
'zeg':4.84,
'geen':4.83,
'louder':4.83,
'tutto':4.83,
'tout':4.83,
'temptation':4.83,
'\#omgfacts':4.82,
'\@stephenfry':4.82,
'\@tweetmeme':4.82,
'acho':4.82,
'addenda':4.82,
'administration\'s':4.82,
'altered':4.82,
'ang':4.82,
'att':4.82,
'ayer':4.82,
'b/c':4.82,
'b2':4.82,
'beard':4.82,
'cane':4.82,
'cases':4.82,
'causing':4.82,
'che':4.82,
'col':4.82,
'cum':4.82,
'de':4.82,
'deh':4.82,
'demi':4.82,
'doch':4.82,
'duties':4.82,
'eso':4.82,
'examination':4.82,
'exposure':4.82,
'finna':4.82,
'flipped':4.82,
'gm':4.82,
'hes':4.82,
'hid':4.82,
'hoje':4.82,
'hood':4.82,
'impact':4.82,
'israeli':4.82,
'lagi':4.82,
'll':4.82,
'mideast':4.82,
'municipal':4.82,
'must':4.82,
'n':4.82,
'ne':4.82,
'ng':4.82,
'ot':4.82,
'over':4.82,
'pst':4.82,
'quando':4.82,
'ralph':4.82,
'repeated':4.82,
'rushing':4.82,
'sellin':4.82,
'sich':4.82,
'smell':4.82,
'ticking':4.82,
'tt':4.82,
'udah':4.82,
'vegan':4.82,
'wah':4.82,
'warum':4.82,
'witness':4.82,
'wut':4.82,
'assumptions':4.82,
'dawg':4.82,
'dro':4.82,
'gaan':4.82,
'nerve':4.82,
'scheme':4.82,
'sus':4.82,
'vas':4.82,
'vein':4.82,
'werden':4.82,
'otro':4.81,
'toda':4.81,
'detection':4.81,
'\@jonathanrknight':4.8,
'advisory':4.8,
'ak':4.8,
'ao':4.8,
'apa':4.8,
'asap':4.8,
'bankers':4.8,
'bij':4.8,
'bosnia':4.8,
'c1':4.8,
'cock':4.8,
'det':4.8,
'dey':4.8,
'didn':4.8,
'eds':4.8,
'el':4.8,
'es':4.8,
'eu':4.8,
'fa':4.8,
'giuliani':4.8,
'h':4.8,
'it\'d':4.8,
'johns':4.8,
'judy':4.8,
'kan':4.8,
'lautner':4.8,
'lejos':4.8,
'ltd':4.8,
'lugar':4.8,
'meetings':4.8,
'mein':4.8,
'mental':4.8,
'naar':4.8,
'nai':4.8,
'nd':4.8,
'nerd':4.8,
'nom':4.8,
'olvidar':4.8,
'one-time':4.8,
'orthodox':4.8,
'pataki':4.8,
'pe':4.8,
'proceedings':4.8,
'pussy':4.8,
'rehabilitation':4.8,
'rep':4.8,
'sachs':4.8,
'slightly':4.8,
'superintendent':4.8,
'sur':4.8,
'versus':4.8,
'wats':4.8,
'wen':4.8,
'what':4.8,
'what\'s':4.8,
'whos':4.8,
'widespread':4.8,
'yrs':4.8,
'zonder':4.8,
'petition':4.8,
'gimmie':4.8,
'jamais':4.8,
'laat':4.8,
'manos':4.8,
'niets':4.8,
'passive':4.8,
'tous':4.8,
'mase':4.79,
'wij':4.79,
'\#p2':4.78,
'1/2-bath':4.78,
'aja':4.78,
'asi':4.78,
'at;t':4.78,
'b1':4.78,
'bc':4.78,
'belly':4.78,
'blizzard':4.78,
'ce':4.78,
'conditions':4.78,
'confess':4.78,
'dann':4.78,
'des':4.78,
'dha':4.78,
'difference':4.78,
'em':4.78,
'ep':4.78,
'essa':4.78,
'exit':4.78,
'fate':4.78,
'fo':4.78,
'funk':4.78,
'gat':4.78,
'gimme':4.78,
'gli':4.78,
'israel':4.78,
'je':4.78,
'juss':4.78,
'kein':4.78,
'llorar':4.78,
'meds':4.78,
'military':4.78,
'mud':4.78,
'nasdaq':4.78,
'nos':4.78,
'nur':4.78,
'ohh':4.78,
'pena':4.78,
'r':4.78,
'reserved':4.78,
'reversed':4.78,
'sigo':4.78,
'sooo':4.78,
'stakes':4.78,
'suddenly':4.78,
'though':4.78,
'throwing':4.78,
'tht':4.78,
'ver':4.78,
'wallace':4.78,
'wel':4.78,
'wieder':4.78,
'witnesses':4.78,
'wud':4.78,
'tijd':4.78,
'unseen':4.78,
'\@aplusk':4.76,
'\@ddlovato':4.76,
'ahl':4.76,
'aside':4.76,
'bak':4.76,
'board':4.76,
'buat':4.76,
'chilling':4.76,
'cnt':4.76,
'coz':4.76,
'dat':4.76,
'departure':4.76,
'dolor':4.76,
'economics':4.76,
'else':4.76,
'ese':4.76,
'essay':4.76,
'gas':4.76,
'gd':4.76,
'governors':4.76,
'kobe':4.76,
'lately':4.76,
'les':4.76,
'management':4.76,
'min':4.76,
'officials':4.76,
'ought':4.76,
'oughta':4.76,
'pieces':4.76,
'pig':4.76,
'por':4.76,
'pulled':4.76,
'quand':4.76,
're-election':4.76,
'repair':4.76,
'report':4.76,
'sa':4.76,
'sans':4.76,
'sho':4.76,
'sinai':4.76,
'somewhat':4.76,
'spent':4.76,
'ta':4.76,
'targets':4.76,
'telerate':4.76,
'tem':4.76,
'th':4.76,
'tha':4.76,
'took':4.76,
'trippin':4.76,
'tuh':4.76,
'tupac':4.76,
'weer':4.76,
'weiss':4.76,
'wenn':4.76,
'whats':4.76,
'wore':4.76,
'would\'ve':4.76,
'woulda':4.76,
'xd':4.76,
'af':4.76,
'coulda':4.76,
'drift':4.76,
'goed':4.76,
'ihr':4.76,
'niente':4.76,
'tek':4.76,
'sword':4.75,
'&c':4.74,
'\'em':4.74,
'\@adamlambert':4.74,
'admit':4.74,
'alley':4.74,
'authority':4.74,
'b-includes':4.74,
'colo':4.74,
'corner':4.74,
'dag':4.74,
'dah':4.74,
'dealers':4.74,
'depending':4.74,
'dow':4.74,
'faz':4.74,
'fml':4.74,
'gona':4.74,
'had':4.74,
'heavily':4.74,
'hook':4.74,
'imma':4.74,
'judgment':4.74,
'licht':4.74,
'load':4.74,
'long':4.74,
'mines':4.74,
'minha':4.74,
'muito':4.74,
'myspace':4.74,
'older':4.74,
'operate':4.74,
'otherwise':4.74,
'policy':4.74,
'pull':4.74,
'quem':4.74,
'res':4.74,
'resist':4.74,
'saber':4.74,
'smaller':4.74,
'smh':4.74,
'than':4.74,
'trials':4.74,
'yu':4.74,
'zijn':4.74,
'ci':4.73,
'cling':4.73,
'niemand':4.73,
'possessed':4.73,
'refrain':4.73,
'thangs':4.73,
'weg':4.73,
'bwoy':4.73,
'\#tcot':4.72,
'\'i':4.72,
'43d':4.72,
'\@johncmayer':4.72,
'a3':4.72,
'alien':4.72,
'assume':4.72,
'bent':4.72,
'bestie':4.72,
'citing':4.72,
'claims':4.72,
'condition':4.72,
'ct':4.72,
'd':4.72,
'dealer':4.72,
'depends':4.72,
'e':4.72,
'estas':4.72,
'fits':4.72,
'government\'s':4.72,
'guessing':4.72,
'huh':4.72,
'loca':4.72,
'medical':4.72,
'meh':4.72,
'melhor':4.72,
'offset':4.72,
'period':4.72,
'pulmonary':4.72,
'redman':4.72,
'repeatedly':4.72,
'ses':4.72,
'sum1':4.72,
'surrounded':4.72,
'tho':4.72,
'umm':4.72,
'underneath':4.72,
'vai':4.72,
'wake':4.72,
'wird':4.72,
'wk':4.72,
'wo':4.72,
'doesn':4.71,
'ei':4.71,
'induced':4.71,
'interference':4.71,
'komm':4.71,
'obligations':4.71,
'perder':4.71,
'pues':4.71,
'tus':4.71,
'voel':4.71,
'boundaries':4.7,
'affairs':4.7,
'almost':4.7,
'boi':4.7,
'c':4.7,
'chasing':4.7,
'corporate':4.7,
'corps':4.7,
'cos':4.7,
'crossed':4.7,
'duty':4.7,
'except':4.7,
'excessive':4.7,
'geef':4.7,
'gue':4.7,
'hella':4.7,
'hound':4.7,
'however':4.7,
'inc':4.7,
'isso':4.7,
'kno':4.7,
'lawmakers':4.7,
'legislative':4.7,
'legislature':4.7,
'loc':4.7,
'los':4.7,
'mau':4.7,
'maybe':4.7,
'mere':4.7,
'nail':4.7,
'neva':4.7,
'nichts':4.7,
'nuh':4.7,
'nya':4.7,
'payment':4.7,
'pullin':4.7,
'rocky':4.7,
'sd':4.7,
'senate':4.7,
'ser':4.7,
'serbian':4.7,
'seriously':4.7,
'slight':4.7,
'striking':4.7,
'tweeps':4.7,
'wrk':4.7,
'wth':4.7,
'yet':4.7,
'frontin':4.69,
'iets':4.69,
'sind':4.69,
'weh':4.69,
'shakes':4.69,
'uns':4.69,
'zou':4.69,
'pasar':4.68,
'ab':4.68,
'ba':4.68,
'bodies':4.68,
'borrowed':4.68,
'clinical':4.68,
'cross':4.68,
'curb':4.68,
'cuz':4.68,
'deputy':4.68,
'doen':4.68,
'dun':4.68,
'einen':4.68,
'gibt':4.68,
'har':4.68,
'how':4.68,
'inevitable':4.68,
'institutions':4.68,
'islam':4.68,
'knives':4.68,
'kono':4.68,
'nem':4.68,
'oldest':4.68,
'op':4.68,
'overcast':4.68,
'patient\'s':4.68,
'pq':4.68,
'qtr':4.68,
'rapper':4.68,
'requires':4.68,
'ruling':4.68,
'shady':4.68,
'sodium':4.68,
'spots':4.68,
'threw':4.68,
'uu':4.68,
'wereld':4.68,
'wht':4.68,
'wir':4.68,
'bg':4.67,
'bump':4.67,
'dein':4.67,
'dependence':4.67,
'flesh':4.67,
'hustle':4.67,
'immer':4.67,
'nooit':4.67,
'dicen':4.67,
'tumble':4.67,
'13th':4.66,
'agora':4.66,
'borrow':4.66,
'drip':4.66,
'forms':4.66,
'freakin':4.66,
'ga':4.66,
'hole':4.66,
'if':4.66,
'inquiry':4.66,
'islamic':4.66,
'iz':4.66,
'minor':4.66,
'nach':4.66,
'nuttin':4.66,
'odd':4.66,
'pile':4.66,
'punk':4.66,
'quisiera':4.66,
'ruff':4.66,
'seu':4.66,
'shorty':4.66,
'strung':4.66,
'ter':4.66,
'theres':4.66,
'tua':4.66,
'um':4.66,
'v':4.66,
'wanting':4.66,
'yeltsin':4.66,
'yur':4.66,
'indirect':4.65,
'rappin':4.65,
'raps':4.65,
'stripped':4.65,
'tire':4.65,
'undone':4.65,
'wolves':4.65,
'mek':4.65,
'\#ff':4.64,
'b3':4.64,
'crazy':4.64,
'dry':4.64,
'euch':4.64,
'f':4.64,
'freak':4.64,
'freaky':4.64,
'fucking':4.64,
'fuera':4.64,
'ganz':4.64,
'government':4.64,
'heb':4.64,
'hv':4.64,
'ini':4.64,
'kijk':4.64,
'lbs':4.64,
'left':4.64,
'lust':4.64,
'muslim':4.64,
'nj':4.64,
'po':4.64,
'pretend':4.64,
'que':4.64,
'slit':4.64,
'soviet':4.64,
'un':4.64,
'yer':4.64,
'compton':4.63,
'fragments':4.63,
'geht':4.63,
'stares':4.63,
'stiff':4.63,
'wasn':4.63,
'zij':4.63,
'\#mm':4.62,
'**municipal':4.62,
'\@joejonas':4.62,
'\@nickjonas':4.62,
'altijd':4.62,
'bull':4.62,
'bureau':4.62,
'dick':4.62,
'diet':4.62,
'gasoline':4.62,
'gov':4.62,
'governments':4.62,
'gray':4.62,
'holes':4.62,
'holler':4.62,
'lease':4.62,
'lebanon':4.62,
'noone':4.62,
'ol\'':4.62,
'out':4.62,
'palestinian':4.62,
'past':4.62,
'peasants':4.62,
'pigs':4.62,
'pressed':4.62,
'serbs':4.62,
'short-term':4.62,
'wid':4.62,
'year-ago':4.62,
'atomic':4.61,
'daze':4.61,
'feds':4.61,
'ib':4.61,
'jij':4.61,
'inspection':4.61,
'dit':4.6,
'76th':4.6,
'bind':4.6,
'bound':4.6,
'bruh':4.6,
'commercial':4.6,
'dar':4.6,
'differences':4.6,
'fiscal':4.6,
'flames':4.6,
'half':4.6,
'ik':4.6,
'jg':4.6,
'later':4.6,
'lemme':4.6,
'li':4.6,
'little':4.6,
'mal':4.6,
'nme':4.6,
'nunca':4.6,
'obligation':4.6,
'pretax':4.6,
'q':4.6,
'recall':4.6,
'sack':4.6,
'shawty':4.6,
'sticky':4.6,
'tight':4.6,
'trigger':4.6,
'under':4.6,
'used':4.6,
'vs':4.6,
'was':4.6,
'bark':4.59,
'disguise':4.59,
'gots':4.59,
'och':4.59,
'seldom':4.59,
'dir':4.58,
'tarde':4.58,
'\@revrunwisdom':4.58,
'aan':4.58,
'als':4.58,
'although':4.58,
'bgt':4.58,
'busy':4.58,
'c-includes':4.58,
'cit':4.58,
'congressional':4.58,
'differ':4.58,
'emo':4.58,
'excuse':4.58,
'fighter':4.58,
'gtgt':4.58,
'gunna':4.58,
'hazy':4.58,
'hit':4.58,
'insisted':4.58,
'inspector':4.58,
'institution':4.58,
'jigga':4.58,
'jurors':4.58,
'kom':4.58,
'lls':4.58,
'lock':4.58,
'police':4.58,
'radical':4.58,
'saudi':4.58,
'senator':4.58,
'stops':4.58,
'whatever':4.58,
'durch':4.57,
'unreal':4.57,
'zal':4.57,
'dose':4.56,
'uit':4.56,
'\#idothat2':4.56,
'-p':4.56,
'\@jordanknight':4.56,
'ads':4.56,
'anymore':4.56,
'auf':4.56,
'blade':4.56,
'blues':4.56,
'bout':4.56,
'boxing':4.56,
'broker':4.56,
'bust':4.56,
'crunk':4.56,
'gon':4.56,
'grey':4.56,
'hoe':4.56,
'kuwait':4.56,
'merely':4.56,
'outta':4.56,
'queda':4.56,
'seh':4.56,
'stoned':4.56,
'w/o':4.56,
'yg':4.56,
'separately':4.55,
'uhh':4.55,
'gaat':4.55,
'appendix':4.54,
'\#epicpetwars':4.54,
'\#formspringme':4.54,
'\#shoutout':4.54,
'arbitrary':4.54,
'axe':4.54,
'beneath':4.54,
'bit':4.54,
'blocks':4.54,
'bom':4.54,
'caused':4.54,
'command':4.54,
'conn':4.54,
'conservative':4.54,
'depois':4.54,
'eh':4.54,
'er':4.54,
'gah':4.54,
'gut':4.54,
'hw':4.54,
'ima':4.54,
'institutional':4.54,
'iv':4.54,
'ludacris':4.54,
'narrow':4.54,
'oder':4.54,
'pending':4.54,
'pirate':4.54,
'prolly':4.54,
'regulation':4.54,
'rs':4.54,
'senators':4.54,
'sheesh':4.54,
'terms':4.54,
'twisting':4.54,
'urged':4.54,
'chains':4.53,
'chloride':4.53,
'waits':4.53,
'06:00:00AM':4.52,
'blew':4.52,
'clique':4.52,
'crucial':4.52,
'dependent':4.52,
'former':4.52,
'gak':4.52,
'hadn':4.52,
'investigations':4.52,
'leave':4.52,
'muss':4.52,
'omfg':4.52,
'previously':4.52,
'rule':4.52,
'shud':4.52,
'small':4.52,
'und':4.52,
'utterly':4.52,
'weight':4.52,
'cocked':4.51,
'daar':4.51,
'pill':4.51,
'\@iamdiddy':4.5,
'ages':4.5,
'arab':4.5,
'corporations':4.5,
'disposed':4.5,
'distance':4.5,
'dong':4.5,
'few':4.5,
'govt':4.5,
'nah':4.5,
'outst':4.5,
'palestinians':4.5,
'prob':4.5,
'randomly':4.5,
'regulatory':4.5,
'reverse':4.5,
'sangre':4.5,
'temporary':4.5,
'goo':4.49,
'couldn':4.49,
'army':4.48,
'blow':4.48,
'bs':4.48,
'despite':4.48,
'dunno':4.48,
'elections':4.48,
'essays':4.48,
'grabbed':4.48,
'heck':4.48,
'hidden':4.48,
'issue':4.48,
'lehman':4.48,
'ms':4.48,
'negroes':4.48,
'nuthin':4.48,
'snoop':4.48,
'y':4.48,
'ashes':4.47,
'commanded':4.47,
'nerves':4.47,
'spill':4.47,
'craving':4.46,
'crowds':4.46,
'gats':4.46,
'hammer':4.46,
'isn\'t':4.46,
'kicks':4.46,
'lecture':4.46,
'neither':4.46,
'ol':4.46,
'plastic':4.46,
'politics':4.46,
'required':4.46,
'rigid':4.46,
'rumble':4.46,
'scarcely':4.46,
'short':4.46,
'shorter':4.46,
'shy':4.46,
'skool':4.46,
'thrown':4.46,
'tossed':4.46,
'tricky':4.46,
'compelled':4.45,
'infantry':4.45,
'auch':4.44,
'bail':4.44,
'blank':4.44,
'bottom':4.44,
'busta':4.44,
'cop':4.44,
'correction':4.44,
'court\'s':4.44,
'errands':4.44,
'ew':4.44,
'gay':4.44,
'gaza':4.44,
'harder':4.44,
'haze':4.44,
'liable':4.44,
'push':4.44,
'rag':4.44,
'require':4.44,
'tank':4.44,
'triste':4.44,
'unusual':4.44,
'wat':4.44,
'weed':4.44,
'glocks':4.44,
'longing':4.43,
'removal':4.43,
'-d':4.42,
'behind':4.42,
'below':4.42,
'boa':4.42,
'breaks':4.42,
'commercials':4.42,
'constraints':4.42,
'controlling':4.42,
'dum':4.42,
'emotional':4.42,
'hides':4.42,
'knocked':4.42,
'na':4.42,
'nada':4.42,
'niet':4.42,
'notwithstanding':4.42,
'recalled':4.42,
'regulations':4.42,
'remains':4.42,
'republican':4.42,
'sem':4.42,
'serious':4.42,
'stumble':4.42,
'treatment':4.42,
'vietnam':4.42,
'regime':4.42,
'\#retweetthisif':4.4,
'beats':4.4,
'brokers':4.4,
'controlled':4.4,
'cus':4.4,
'desert':4.4,
'detroit':4.4,
'fuk':4.4,
'glock':4.4,
'hearings':4.4,
'lobbying':4.4,
'morir':4.4,
'muero':4.4,
'nicht':4.4,
'opposite':4.4,
'shyt':4.4,
'tied':4.4,
'wouldn':4.4,
'dich':4.4,
'\#haiti':4.38,
'03:00:00AM':4.38,
'authorities':4.38,
'chills':4.38,
'competitors':4.38,
'economy':4.38,
'effing':4.38,
'far':4.38,
'frozen':4.38,
'mortality':4.38,
'plaintiff':4.38,
'prices':4.38,
'rarely':4.38,
'rebel':4.38,
'resistance':4.38,
'slips':4.38,
'tangled':4.38,
'acids':4.38,
'naive':4.37,
'querer':4.37,
'shack':4.36,
'\@jonasbrothers':4.36,
'billings':4.36,
'consequence':4.36,
'custody':4.36,
'dang':4.36,
'divided':4.36,
'division':4.36,
'duh':4.36,
'end':4.36,
'grease':4.36,
'hide':4.36,
'irregular':4.36,
'juvenile':4.36,
'morn':4.36,
'needle':4.36,
'operations':4.36,
'pulling':4.36,
'reducing':4.36,
'sharply':4.36,
'strange':4.36,
'tease':4.36,
'burnin':4.35,
'strictly':4.35,
'storms':4.34,
'adios':4.34,
'arent':4.34,
'blown':4.34,
'burst':4.34,
'congress':4.34,
'ditch':4.34,
'droppin':4.34,
'faded':4.34,
'hiding':4.34,
'ho':4.34,
'hurry':4.34,
'icy':4.34,
'loud':4.34,
'replaced':4.34,
'ripping':4.34,
'shook':4.34,
'vampire':4.34,
'hesitate':4.33,
'cease':4.32,
'communist':4.32,
'eff':4.32,
'fbi':4.32,
'gop':4.32,
'howling':4.32,
'hunt':4.32,
'reduced':4.32,
'scattered':4.32,
'separate':4.32,
'slowly':4.32,
'surgical':4.32,
'tripping':4.32,
'waited':4.32,
'yikes':4.32,
'bias':4.31,
'blunt':4.31,
'shaking':4.31,
'din':4.31,
'smack':4.31,
'affected':4.3,
'brokerage':4.3,
'drop':4.3,
'formerly':4.3,
'gore':4.3,
'guards':4.3,
'hadn\'t':4.3,
'ich':4.3,
'iran':4.3,
'legislation':4.3,
'monday':4.3,
'muslims':4.3,
'naw':4.3,
'pit':4.3,
'sneak':4.3,
'so-called':4.3,
'sudden':4.3,
'sue':4.3,
'tick':4.3,
'rowdy':4.29,
'slippin':4.29,
'chased':4.29,
'divide':4.29,
'leavin':4.29,
'mortal':4.29,
'rebellion':4.29,
'aged':4.28,
'aids':4.28,
'bieber':4.28,
'fooling':4.28,
'guerrillas':4.28,
'idk':4.28,
'il':4.28,
'jury':4.28,
'nor':4.28,
'petroleum':4.28,
'pimpin':4.28,
'rules':4.28,
'spider':4.28,
'swore':4.28,
'taken':4.28,
'tests':4.28,
'wasn\'t':4.28,
'moan':4.27,
'warn':4.27,
'\@justinbieber':4.26,
'blows':4.26,
'defendants':4.26,
'fck':4.26,
'fires':4.26,
'intervention':4.26,
'lawyers':4.26,
'non':4.26,
'outlaw':4.26,
'owing':4.26,
'sht':4.26,
'split':4.26,
'storm':4.26,
'concerning':4.24,
'contrary':4.24,
'04:00:00AM':4.24,
'bah':4.24,
'barely':4.24,
'but':4.24,
'courts':4.24,
'kanye':4.24,
'lower':4.24,
'minority':4.24,
'orders':4.24,
'pounding':4.24,
'protests':4.24,
'psychiatric':4.24,
'questioned':4.24,
'raw':4.24,
'rebels':4.24,
'sag':4.24,
'shoulda':4.24,
'smash':4.24,
'spy':4.24,
'stern':4.24,
'stray':4.24,
'swear':4.24,
'unless':4.24,
'worn':4.23,
'dues':4.22,
'freaks':4.22,
'\#iranelection':4.22,
'away':4.22,
'backwards':4.22,
'beware':4.22,
'blast':4.22,
'breakin':4.22,
'bush\'s':4.22,
'calories':4.22,
'cold':4.22,
'concerned':4.22,
'due':4.22,
'grind':4.22,
'iranian':4.22,
'labor':4.22,
'limit':4.22,
'limited':4.22,
'loan':4.22,
'mutha':4.22,
'python':4.22,
'republicans':4.22,
'scratch':4.22,
'veto':4.22,
'waitin':4.22,
'wtf':4.22,
'waar':4.21,
'beat':4.2,
'blah':4.2,
'darn':4.2,
'default':4.2,
'dnt':4.2,
'expenditure':4.2,
'exposed':4.2,
'grrr':4.2,
'legislators':4.2,
'levy':4.2,
'lone':4.2,
'mccain':4.2,
'periods':4.2,
'politically':4.2,
'pow':4.2,
'prosecutors':4.2,
'screw':4.2,
'uh':4.2,
'verdict':4.2,
'weird':4.2,
'whips':4.2,
'underlying':4.19,
'objection':4.18,
'arsenal':4.18,
'boss':4.18,
'capture':4.18,
'chemical':4.18,
'dis':4.18,
'ex':4.18,
'exam':4.18,
'explode':4.18,
'forces':4.18,
'grr':4.18,
'porn':4.18,
'prey':4.18,
'reduce':4.18,
'smells':4.18,
'unpublished':4.18,
'warcraft':4.18,
'implications':4.17,
'uptight':4.17,
'acute':4.16,
'blades':4.16,
'astray':4.16,
'bash':4.16,
'chop':4.16,
'clinic':4.16,
'froze':4.16,
'gambling':4.16,
'heat':4.16,
'nowhere':4.16,
'palin':4.16,
'sigh':4.16,
'stranger':4.16,
'strangers':4.16,
'sucking':4.16,
'sweat':4.16,
'vice':4.16,
'crowd':4.14,
'demand':4.14,
'drag':4.14,
'fuck':4.14,
'havent':4.14,
'minimum':4.14,
'pee':4.14,
'pirates':4.14,
'pushing':4.14,
'shark':4.14,
'ripped':4.13,
'strict':4.13,
'decrease':4.12,
'drain':4.12,
'messing':4.12,
'renal':4.12,
'05:00:00AM':4.12,
'aren\'t':4.12,
'attorney':4.12,
'bother':4.12,
'fuss':4.12,
'hittin':4.12,
'negro':4.12,
'nonsense':4.12,
'nope':4.12,
'political':4.12,
'reductions':4.12,
'rush':4.12,
'shallow':4.12,
'taxpayers':4.12,
'twisted':4.12,
'blunts':4.11,
'abyss':4.1,
'lesser':4.1,
'liability':4.1,
'murda':4.1,
'conviction':4.1,
'cost':4.1,
'demanded':4.1,
'enforcement':4.1,
'erase':4.1,
'freaking':4.1,
'hard':4.1,
'heavy':4.1,
'hunting':4.1,
'laundry':4.1,
'less':4.1,
'numb':4.1,
'pills':4.1,
'pushed':4.1,
'rid':4.1,
'sacrifice':4.1,
'takeover':4.1,
'wack':4.1,
'ego':4.08,
'rumors':4.08,
'servant':4.08,
'weary':4.08,
'conservatives':4.08,
'crumble':4.08,
'cutting':4.08,
'fallin':4.08,
'freeze':4.08,
'hung':4.08,
'knife':4.08,
'plea':4.08,
'stopping':4.08,
'surrender':4.08,
'temper':4.08,
'wont':4.08,
'cardiac':4.06,
'fading':4.06,
'blinding':4.06,
'concerns':4.06,
'flushing':4.06,
'haiti':4.06,
'kurupt':4.06,
'mondays':4.06,
'prosecutor':4.06,
'sour':4.06,
'test':4.06,
'toll':4.06,
'unfollow':4.06,
'collide':4.04,
'fade':4.04,
'needles':4.04,
'chemicals':4.04,
'colder':4.04,
'concern':4.04,
'discharge':4.04,
'dominated':4.04,
'fall':4.04,
'hollow':4.04,
'hospice':4.04,
'hunter':4.04,
'imposed':4.04,
'reduction':4.04,
'shootin':4.04,
'spittin':4.04,
'unknown':4.04,
'unlike':4.04,
'welt':4.04,
'worm':4.04,
'rust':4.02,
'distant':4.02,
'affair':4.02,
'aint':4.02,
'block':4.02,
'consequences':4.02,
'dropping':4.02,
'ending':4.02,
'goodbyes':4.02,
'hasn\'t':4.02,
'imprecisely':4.02,
'incident':4.02,
'investigation':4.02,
'off':4.02,
'strife':4.02,
'strikes':4.02,
'weren\'t':4.02,
'ain\'t':4,
'alleged':4,
'arafat':4,
'bum':4,
'ceased':4,
'cracks':4,
'creeping':4,
'defensive':4,
'didn\'t':4,
'didnt':4,
'downs':4,
'force':4,
'least':4,
'limits':4,
'racial':4,
'ridiculous':4,
'rip':4,
'roughly':4,
'twit':4,
'zombies':4,
'accidentally':3.98,
'avoided':3.98,
'bite':3.98,
'breaking':3.98,
'demands':3.98,
'diagnosis':3.98,
'fled':3.98,
'hardly':3.98,
'humidity':3.98,
'isnt':3.98,
'old':3.98,
'punks':3.98,
'terminal':3.98,
'ruins':3.98,
'cracked':3.98,
'slam':3.98,
'argh':3.96,
'bang':3.96,
'bye':3.96,
'closing':3.96,
'dagger':3.96,
'expense':3.96,
'fists':3.96,
'iraqi':3.96,
'loose':3.96,
'minus':3.96,
'slugs':3.96,
'strike':3.96,
'tough':3.96,
'trial':3.96,
'unclear':3.96,
'killa':3.96,
'skull':3.96,
'charges':3.94,
'darker':3.94,
'erroneously':3.94,
'mess':3.94,
'pakistan':3.94,
'reluctant':3.94,
'slumdog':3.94,
'strapped':3.94,
'dizzy':3.94,
'executed':3.94,
'honky':3.94,
'homework':3.92,
'nixon':3.92,
'omitted':3.92,
'stained':3.92,
'ughh':3.92,
'jaded':3.92,
'dusty':3.92,
'absent':3.9,
'alarm':3.9,
'artificial':3.9,
'defendant':3.9,
'dim':3.9,
'doesnt':3.9,
'impose':3.9,
'iraq':3.9,
'issues':3.9,
'killas':3.9,
'misses':3.9,
'neediest':3.9,
'nothing':3.9,
'opponent':3.9,
'quit':3.9,
'slipping':3.9,
'stop':3.9,
'bald':3.9,
'begged':3.9,
'dropped':3.88,
'drunk':3.88,
'mortgage':3.88,
'nooo':3.88,
'shout':3.88,
'artillery':3.88,
'goddamn':3.88,
'rags':3.88,
'restless':3.88,
'uncertain':3.88,
'fiends':3.88,
'ass':3.86,
'farewell':3.86,
'fuckin':3.86,
'hang':3.86,
'not':3.86,
'sanctions':3.86,
'stopped':3.86,
'subjected':3.86,
'tremble':3.86,
'voodoo':3.86,
'wouldnt':3.86,
'slipped':3.86,
'mold':3.85,
'shiver':3.85,
'allegations':3.84,
'armed':3.84,
'ended':3.84,
'excuses':3.84,
'gripe':3.84,
'lawyer':3.84,
'messed':3.84,
'none':3.84,
'offline':3.84,
'pleaded':3.84,
'rent':3.84,
'shouldn\'t':3.84,
'snatch':3.84,
'ghosts':3.84,
'hatin':3.84,
'fragile':3.83,
'baddest':3.82,
'blood':3.82,
'creep':3.82,
'dark':3.82,
'darkness':3.82,
'eliminate':3.82,
'forgetting':3.82,
'gang':3.82,
'hanging':3.82,
'hardest':3.82,
'haven\'t':3.82,
'junk':3.82,
'loans':3.82,
'oppose':3.82,
'slip':3.82,
'sos':3.82,
'thirst':3.82,
'erased':3.82,
'vain':3.82,
'fades':3.81,
'aggressive':3.8,
'costs':3.8,
'critics':3.8,
'fire':3.8,
'fist':3.8,
'interment':3.8,
'ow':3.8,
'pale':3.8,
'protesters':3.8,
'witch':3.8,
'chronic':3.79,
'thirsty':3.79,
'thorns':3.79,
'sink':3.79,
'battles':3.78,
'bugs':3.78,
'court':3.78,
'ends':3.78,
'exams':3.78,
'predeceased':3.78,
'risks':3.78,
'rusty':3.78,
'slow':3.78,
'wouldn\'t':3.78,
'bothered':3.78,
'unnecessary':3.78,
'nothings':3.76,
'resigned':3.76,
'symptoms':3.76,
'yell':3.76,
'gutter':3.76,
'hangs':3.76,
'void':3.76,
'bailout':3.74,
'boo':3.74,
'critic\'s':3.74,
'denying':3.74,
'last':3.74,
'noise':3.74,
'obsession':3.74,
'reckless':3.74,
'shove':3.74,
'stomp':3.74,
'wait':3.74,
'sucka':3.74,
'pimp':3.73,
'stranded':3.73,
'tearing':3.73,
'strain':3.73,
'crack':3.72,
'fewer':3.72,
'gross':3.72,
'kick':3.72,
'oops':3.72,
'operation':3.72,
'removed':3.72,
'withdrawal':3.72,
'crowded':3.71,
'lacking':3.71,
'revenge':3.71,
'foolish':3.7,
'con':3.7,
'crooked':3.7,
'demanding':3.7,
'dirt':3.7,
'don\'t':3.7,
'dont':3.7,
'goodbye':3.7,
'locked':3.7,
'remove':3.7,
'sentenced':3.7,
'wasnt':3.7,
'won\'t':3.7,
'abnormal':3.69,
'hustler':3.69,
'controversy':3.68,
'disagree':3.68,
'fees':3.68,
'hitting':3.68,
'kicking':3.68,
'mean':3.68,
'missed':3.68,
'rival':3.68,
'sucker':3.68,
'waiting':3.68,
'wrath':3.68,
'plead':3.67,
'closed':3.66,
'deadline':3.66,
'down':3.66,
'low':3.66,
'messy':3.66,
'outdated':3.66,
'patients':3.66,
'pressure':3.66,
'snitch':3.66,
'sorry':3.66,
'stuck':3.66,
'anti':3.65,
'complications':3.65,
'disappear':3.65,
'snakes':3.65,
'lesions':3.65,
'bill':3.64,
'blocked':3.64,
'bore':3.64,
'cuts':3.64,
'darkest':3.64,
'delete':3.64,
'ghost':3.64,
'miss':3.64,
'nobody':3.64,
'nothin':3.64,
'shocked':3.64,
'swine':3.64,
'uncertainty':3.64,
'fooled':3.63,
'awkward':3.62,
'baghdad':3.62,
'begging':3.62,
'brat':3.62,
'doesn\'t':3.62,
'haunt':3.62,
'hussein':3.62,
'incompletely':3.62,
'limitations':3.62,
'risk':3.62,
'tore':3.62,
'bacteria':3.61,
'crude':3.6,
'dust':3.6,
'falls':3.6,
'flies':3.6,
'indicted':3.6,
'madness':3.6,
'mistaken':3.6,
'shattered':3.6,
'suspects':3.6,
'acid':3.59,
'pistol':3.59,
'decreased':3.58,
'absence':3.58,
'couldnt':3.58,
'excluded':3.58,
'gossip':3.58,
'leaving':3.58,
'punch':3.58,
'shotgun':3.58,
'sirens':3.58,
'restricted':3.57,
'darkened':3.57,
'slut':3.57,
'servants':3.56,
'afghanistan':3.56,
'confrontation':3.56,
'confusing':3.56,
'denial':3.56,
'empty':3.56,
'fucked':3.56,
'gloom':3.56,
'misidentified':3.56,
'mob':3.56,
'offense':3.56,
'piss':3.56,
'protest':3.56,
'runaway':3.56,
'shut':3.56,
'sorely':3.56,
'dire':3.55,
'stains':3.55,
'taxation':3.55,
'flee':3.54,
'haunted':3.54,
'bug':3.54,
'caught':3.54,
'chained':3.54,
'crushed':3.54,
'despise':3.54,
'dispute':3.54,
'expensive':3.54,
'forsaken':3.54,
'hospitals':3.54,
'owe':3.54,
'poor\'s':3.54,
'rough':3.54,
'shock':3.54,
'slug':3.54,
'without':3.54,
'drunken':3.53,
'missin':3.53,
'separation':3.53,
'spite':3.53,
'addicted':3.52,
'apart':3.52,
'fallen':3.52,
'suspected':3.52,
'suspicion':3.52,
'teardrops':3.52,
'tomb':3.52,
'ugh':3.52,
'warned':3.52,
'untrue':3.51,
'casket':3.5,
'dope':3.5,
'foe':3.5,
'hospital':3.5,
'paranoid':3.5,
'snake':3.5,
'struck':3.5,
'deficiency':3.49,
'pressures':3.49,
'cant':3.48,
'inmates':3.48,
'no':3.48,
'opponents':3.48,
'opposition':3.48,
'sucked':3.48,
'tobacco':3.48,
'unlikely':3.48,
'zombie':3.48,
'screams':3.48,
'sinking':3.48,
'swollen':3.48,
'deceive':3.47,
'monsters':3.47,
'urine':3.47,
'chaos':3.46,
'creepy':3.46,
'fee':3.46,
'insanity':3.46,
'isolated':3.46,
'late':3.46,
'misspelled':3.46,
'misstated':3.46,
'misunderstood':3.46,
'monster':3.46,
'refuse':3.46,
'shoot':3.46,
'sting':3.46,
'thorn':3.46,
'wreck':3.46,
'fright':3.45,
'radiation':3.45,
'stab':3.45,
'confined':3.44,
'delays':3.44,
'deny':3.44,
'fault':3.44,
'forgot':3.44,
'ghetto':3.44,
'litigation':3.44,
'poop':3.44,
'seized':3.44,
'zero':3.44,
'cage':3.44,
'disappeared':3.44,
'trap':3.44,
'diss':3.43,
'foes':3.43,
'smashed':3.42,
'anxious':3.42,
'can\'t':3.42,
'cut':3.42,
'erroneous':3.42,
'gangsta':3.42,
'gone':3.42,
'ignorant':3.42,
'invasion':3.42,
'lame':3.42,
'obsessed':3.42,
'raging':3.42,
'shatter':3.42,
'shouting':3.42,
'troubles':3.42,
'disturbed':3.41,
'zit':3.41,
'against':3.4,
'condolences':3.4,
'muthafucka':3.4,
'separated':3.4,
'struggle':3.4,
'whores':3.4,
'deception':3.39,
'stain':3.39,
'unconscious':3.39,
'delay':3.38,
'difficulty':3.38,
'discontinued':3.38,
'eliminated':3.38,
'haunting':3.38,
'hungry':3.38,
'refused':3.38,
'wicked':3.38,
'blinded':3.37,
'hunger':3.37,
'torn':3.37,
'phony':3.36,
'argued':3.36,
'beast':3.36,
'bullet':3.36,
'busted':3.36,
'critic':3.36,
'dammit':3.36,
'deleted':3.36,
'dentist':3.36,
'forbidden':3.36,
'killin':3.36,
'syndrome':3.36,
'tornado':3.36,
'weapon':3.36,
'emptiness':3.35,
'injection':3.35,
'burnt':3.34,
'complicated':3.34,
'crap':3.34,
'never':3.34,
'politicians':3.34,
'tired':3.34,
'traffic':3.34,
'unfair':3.34,
'vulnerable':3.34,
'warning':3.34,
'fucker':3.33,
'sinner':3.33,
'envy':3.33,
'whack':3.32,
'alone':3.32,
'bleeds':3.32,
'cannot':3.32,
'confusion':3.32,
'couldn\'t':3.32,
'expenses':3.32,
'ignored':3.32,
'nigga':3.32,
'noose':3.32,
'opposed':3.32,
'restrictions':3.32,
'scars':3.32,
'shots':3.32,
'savage':3.31,
'choke':3.31,
'cigarettes':3.31,
'doubts':3.3,
'fool':3.3,
'fury':3.3,
'lowest':3.3,
'suckers':3.3,
'whip':3.3,
'helpless':3.29,
'rats':3.29,
'conspiracy':3.28,
'crashing':3.28,
'falling':3.28,
'fools':3.28,
'lazy':3.28,
'nuclear':3.28,
'scar':3.28,
'suspicious':3.28,
'scarred':3.27,
'screamed':3.27,
'cough':3.26,
'damned':3.26,
'frown':3.24,
'pimps':3.24,
'vengeance':3.24,
'canceled':3.24,
'cavity':3.24,
'delayed':3.24,
'dull':3.24,
'fat':3.24,
'jerk':3.24,
'missile':3.24,
'remorse':3.24,
'rot':3.24,
'screwed':3.24,
'gangstas':3.23,
'captured':3.22,
'critical':3.22,
'fell':3.22,
'forget':3.22,
'freezing':3.22,
'ignore':3.22,
'losers':3.22,
'lynch':3.22,
'wasting':3.22,
'defect':3.21,
'frightened':3.2,
'combat':3.2,
'convicted':3.2,
'defeat':3.2,
'dirty':3.2,
'dread':3.2,
'drug':3.2,
'inferior':3.2,
'screamin':3.2,
'cryin':3.19,
'liar':3.18,
'aching':3.18,
'difficult':3.18,
'faggot':3.18,
'FALSE':3.18,
'forgotten':3.18,
'garbage':3.18,
'kicked':3.18,
'scandal':3.18,
'sinners':3.18,
'suspension':3.18,
'woe':3.18,
'accusations':3.16,
'complain':3.16,
'declined':3.16,
'disorders':3.16,
'doubt':3.16,
'forced':3.16,
'lack':3.16,
'severe':3.16,
'smoke':3.16,
'yuck':3.16,
'feared':3.14,
'gangster':3.14,
'argument':3.14,
'avoid':3.14,
'bitch':3.14,
'bruise':3.14,
'dismissed':3.14,
'disorder':3.14,
'exhausted':3.14,
'incorrectly':3.14,
'isolation':3.14,
'scream':3.14,
'slapped':3.14,
'spit':3.14,
'suck':3.14,
'sucks':3.14,
'suspect':3.14,
'whore':3.14,
'wrong':3.14,
'cursed':3.12,
'doom':3.12,
'desperate':3.12,
'lonesome':3.12,
'regret':3.12,
'rob':3.12,
'defects':3.1,
'ambulance':3.1,
'annoy':3.1,
'conflict':3.1,
'criticism':3.1,
'execution':3.1,
'fought':3.1,
'indictment':3.1,
'pity':3.1,
'smoking':3.1,
'stink':3.1,
'tear':3.1,
'unable':3.1,
'cigarette':3.09,
'beg':3.08,
'prejudice':3.08,
'bullshit':3.08,
'decay':3.08,
'decline':3.08,
'deficit':3.08,
'difficulties':3.08,
'graves':3.08,
'regrets':3.08,
'suspended':3.08,
'trapped':3.08,
'yelling':3.08,
'aging':3.06,
'arguing':3.06,
'bullets':3.06,
'dumb':3.06,
'emergency':3.06,
'greed':3.06,
'idiot':3.06,
'idiots':3.06,
'inadequate':3.06,
'refugees':3.06,
'turmoil':3.06,
'rotting':3.04,
'greedy':3.04,
'havoc':3.04,
'arguments':3.04,
'bled':3.04,
'bored':3.04,
'complaints':3.04,
'horror':3.04,
'insane':3.04,
'jealousy':3.04,
'lawsuits':3.04,
'rat':3.04,
'resignation':3.04,
'scare':3.04,
'anxiety':3.03,
'fiend':3.02,
'hostile':3.02,
'weeping':3.02,
'broken':3.02,
'criticized':3.02,
'offensive':3.02,
'trembling':3.02,
'argue':3,
'argues':3,
'bitter':3,
'condemned':3,
'fights':3,
'muthafuckin':3,
'vicious':3,
'battle':2.98,
'confused':2.98,
'crappy':2.98,
'damn':2.98,
'guns':2.98,
'ignorance':2.98,
'missing':2.98,
'niggaz':2.98,
'problem':2.98,
'worthless':2.98,
'insecure':2.98,
'coffin':2.96,
'conflicts':2.96,
'damages':2.96,
'lawsuit':2.96,
'niggas':2.96,
'screaming':2.96,
'wound':2.96,
'bloody':2.94,
'cemetery':2.94,
'choking':2.94,
'explosion':2.94,
'foul':2.94,
'nervous':2.94,
'sore':2.94,
'tension':2.94,
'thief':2.94,
'thug':2.94,
'unfortunate':2.94,
'weakness':2.94,
'breakdown':2.94,
'bury':2.93,
'accused':2.92,
'awful':2.92,
'burn':2.92,
'cries':2.92,
'hangover':2.92,
'mistakes':2.92,
'problems':2.92,
'riot':2.92,
'sleepless':2.92,
'demon':2.92,
'boring':2.9,
'bruised':2.9,
'burned':2.9,
'collapse':2.9,
'complained':2.9,
'debt':2.9,
'fake':2.9,
'frustrated':2.9,
'impossible':2.9,
'ouch':2.9,
'deadly':2.9,
'disrespect':2.9,
'drown':2.9,
'badly':2.88,
'banned':2.88,
'burning':2.88,
'cancelled':2.88,
'dislike':2.88,
'threats':2.88,
'sins':2.88,
'bombs':2.86,
'complaint':2.86,
'errors':2.86,
'illegal':2.86,
'lonely':2.86,
'mourns':2.86,
'prisoner':2.86,
'stress':2.86,
'tax':2.86,
'violations':2.86,
'widow':2.86,
'addict':2.84,
'buried':2.84,
'devils':2.84,
'dump':2.84,
'hater':2.84,
'incorrect':2.84,
'infection':2.84,
'neglected':2.84,
'penalty':2.84,
'terrible':2.84,
'unkind':2.84,
'weak':2.84,
'annoying':2.82,
'bills':2.82,
'blame':2.82,
'burden':2.82,
'complaining':2.82,
'danger':2.82,
'demise':2.82,
'despair':2.82,
'disabled':2.82,
'discrimination':2.82,
'filthy':2.82,
'gun':2.82,
'lied':2.82,
'missiles':2.82,
'mourners':2.82,
'obituary':2.82,
'prosecution':2.82,
'worry':2.82,
'mafia':2.81,
'wounds':2.8,
'burns':2.78,
'cowards':2.78,
'fever':2.78,
'mistake':2.78,
'trouble':2.78,
'troubled':2.78,
'wasted':2.78,
'bitches':2.76,
'bleeding':2.76,
'fighting':2.76,
'lose':2.76,
'lost':2.76,
'pathetic':2.76,
'unfortunately':2.76,
'neglect':2.76,
'defeated':2.74,
'loses':2.74,
'stressed':2.74,
'ugly':2.74,
'violation':2.74,
'unholy':2.73,
'addiction':2.72,
'arrests':2.72,
'disgrace':2.72,
'heartbreaker':2.72,
'mourn':2.72,
'struggling':2.72,
'desperation':2.7,
'distress':2.7,
'fight':2.7,
'spam':2.7,
'taxes':2.7,
'waste':2.7,
'worse':2.7,
'sorrows':2.69,
'bleed':2.69,
'ache':2.68,
'bastards':2.68,
'fears':2.68,
'injuries':2.68,
'jealous':2.68,
'misery':2.68,
'ruin':2.68,
'shame':2.68,
'stupid':2.68,
'trash':2.68,
'deaf':2.67,
'afraid':2.66,
'ban':2.66,
'drugs':2.66,
'loneliness':2.66,
'penalties':2.66,
'surgery':2.66,
'tensions':2.66,
'bad':2.64,
'curse':2.64,
'demons':2.64,
'enemy':2.64,
'guilty':2.64,
'inflation':2.64,
'motherfucking':2.64,
'sin':2.64,
'heartaches':2.63,
'\#fail':2.62,
'beaten':2.62,
'lies':2.62,
'losing':2.62,
'nasty':2.62,
'retarded':2.62,
'rude':2.62,
'threatened':2.62,
'violated':2.62,
'thugs':2.61,
'abortion':2.6,
'brutal':2.6,
'crash':2.6,
'error':2.6,
'lie':2.6,
'mad':2.6,
'selfish':2.6,
'stole':2.6,
'worries':2.6,
'ashamed':2.59,
'infections':2.59,
'annoyed':2.58,
'blind':2.58,
'cheated':2.58,
'damage':2.58,
'disgusting':2.58,
'guilt':2.58,
'lying':2.58,
'motherfuckin':2.58,
'rotten':2.58,
'scared':2.58,
'scary':2.58,
'shitty':2.58,
'starving':2.58,
'stroke':2.58,
'betrayed':2.57,
'nightmares':2.56,
'assault':2.56,
'beating':2.56,
'grave':2.56,
'hopeless':2.56,
'loss':2.56,
'rage':2.56,
'satan':2.56,
'upset':2.56,
'corpse':2.55,
'abandoned':2.54,
'broke':2.54,
'cocaine':2.54,
'denied':2.54,
'harm':2.54,
'hurricane':2.54,
'miserable':2.54,
'pissed':2.54,
'ruined':2.54,
'tumor':2.53,
'attacked':2.52,
'bastard':2.52,
'destroy':2.52,
'failing':2.52,
'shooting':2.52,
'useless':2.52,
'motherfuckers':2.51,
'betray':2.5,
'psycho':2.5,
'shit':2.5,
'shot':2.5,
'stolen':2.5,
'crisis':2.48,
'damaged':2.48,
'haters':2.48,
'recession':2.48,
'saddam':2.48,
'slap':2.48,
'attacks':2.46,
'crashed':2.46,
'losses':2.46,
'panic':2.46,
'steal':2.46,
'stealing':2.46,
'tears':2.46,
'burial':2.44,
'cheat':2.44,
'dangerous':2.44,
'drowning':2.44,
'enemies':2.44,
'hating':2.44,
'prisoners':2.44,
'saddened':2.44,
'arrest':2.42,
'attack':2.42,
'flood':2.42,
'ill':2.42,
'killer':2.42,
'negative':2.42,
'worried':2.42,
'wounded':2.42,
'nigger':2.41,
'slaughter':2.41,
'asshole':2.4,
'flu':2.4,
'weapons':2.4,
'graveside':2.38,
'sad':2.38,
'victim':2.38,
'hurting':2.36,
'threat':2.36,
'frustration':2.34,
'hate':2.34,
'tragic':2.34,
'grief':2.33,
'accident':2.32,
'angry':2.32,
'fear':2.32,
'nightmare':2.32,
'poor':2.32,
'victims':2.32,
'anger':2.3,
'fired':2.3,
'fraud':2.3,
'theft':2.3,
'thieves':2.29,
'heartache':2.28,
'sadly':2.28,
'cheating':2.26,
'destruction':2.26,
'disappointed':2.26,
'bombing':2.24,
'devil':2.24,
'horrible':2.24,
'suffered':2.24,
'hatred':2.22,
'weep':2.22,
'hell':2.22,
'holocaust':2.22,
'injured':2.22,
'suffering':2.22,
'cried':2.2,
'crime':2.2,
'loser':2.2,
'depressed':2.18,
'divorce':2.18,
'hurt':2.18,
'robbed':2.18,
'tsunami':2.18,
'agony':2.16,
'drowned':2.16,
'homeless':2.16,
'pollution':2.16,
'corruption':2.14,
'crimes':2.14,
'hated':2.14,
'hurts':2.14,
'painful':2.12,
'sorrow':2.12,
'unemployment':2.12,
'unhappy':2.12,
'heartbreak':2.11,
'dying':2.1,
'funeral':2.1,
'pain':2.1,
'worst':2.1,
'dies':2.08,
'racist':2.08,
'rejected':2.08,
'robbery':2.08,
'suffer':2.08,
'virus':2.08,
'bankruptcy':2.06,
'fails':2.06,
'failure':2.06,
'hates':2.06,
'prison':2.06,
'slave':2.06,
'slaves':2.06,
'tragedy':2.06,
'violent':2.06,
'crying':2.04,
'destroyed':2.04,
'injury':2.04,
'rejection':2.02,
'motherfucker':2.02,
'sick':2.02,
'slavery':2.02,
'dead':2,
'disease':2,
'illness':2,
'killers':2,
'punishment':2,
'criminal':1.98,
'depression':1.98,
'headache':1.98,
'poverty':1.98,
'tumors':1.98,
'bomb':1.96,
'disaster':1.96,
'fail':1.96,
'poison':1.94,
'depressing':1.9,
'earthquake':1.9,
'evil':1.9,
'wars':1.9,
'abuse':1.88,
'diseases':1.88,
'sadness':1.88,
'violence':1.86,
'cruel':1.84,
'cry':1.84,
'failed':1.84,
'sickness':1.84,
'abused':1.83,
'tortured':1.82,
'fatal':1.8,
'killings':1.8,
'murdered':1.8,
'war':1.8,
'kills':1.78,
'jail':1.76,
'terror':1.76,
'die':1.74,
'killing':1.7,
'arrested':1.64,
'deaths':1.64,
'raped':1.64,
'torture':1.58,
'died':1.56,
'kill':1.56,
'killed':1.56,
'cancer':1.54,
'death':1.54,
'murder':1.48,
'terrorism':1.48,
'rape':1.44,
'suicide':1.3,
'terrorist':1.3
} | 15.071401 | 37 | 0.600363 |
happiness_dictionary={'laughter':8.5,
'happiness':8.44,
'love':8.42,
'happy':8.3,
'laughed':8.26,
'laugh':8.22,
'laughing':8.2,
'excellent':8.18,
'laughs':8.18,
'joy':8.16,
'successful':8.16,
'win':8.12,
'rainbow':8.1,
'smile':8.1,
'won':8.1,
'pleasure':8.08,
'smiled':8.08,
'rainbows':8.06,
'winning':8.04,
'celebration':8.02,
'enjoyed':8.02,
'healthy':8.02,
'music':8.02,
'celebrating':8,
'congratulations':8,
'weekend':8,
'celebrate':7.98,
'comedy':7.98,
'jokes':7.98,
'rich':7.98,
'victory':7.98,
'christmas':7.96,
'free':7.96,
'friendship':7.96,
'fun':7.96,
'holidays':7.96,
'loved':7.96,
'loves':7.96,
'loving':7.96,
'beach':7.94,
'hahaha':7.94,
'kissing':7.94,
'sunshine':7.94,
'beautiful':7.92,
'delicious':7.92,
'friends':7.92,
'funny':7.92,
'outstanding':7.92,
'paradise':7.92,
'sweetest':7.92,
'vacation':7.92,
'butterflies':7.92,
'freedom':7.9,
'flower':7.88,
'great':7.88,
'sunlight':7.88,
'sweetheart':7.88,
'sweetness':7.88,
'award':7.86,
'chocolate':7.86,
'hahahaha':7.86,
'heaven':7.86,
'peace':7.86,
'splendid':7.86,
'success':7.86,
'enjoying':7.84,
'kissed':7.84,
'attraction':7.82,
'celebrated':7.8,
'hero':7.8,
'hugs':7.8,
'positive':7.8,
'sun':7.8,
'birthday':7.78,
'blessed':7.78,
'fantastic':7.78,
'winner':7.78,
'delight':7.78,
'beauty':7.76,
'butterfly':7.76,
'entertainment':7.76,
'funniest':7.76,
'honesty':7.76,
'sky':7.76,
'smiles':7.76,
'succeed':7.76,
'wonderful':7.76,
'glorious':7.74,
'kisses':7.74,
'promotion':7.74,
'family':7.72,
'gift':7.72,
'humor':7.72,
'romantic':7.72,
'cupcakes':7.7,
'festival':7.7,
'hahahahaha':7.7,
'honour':7.7,
'relax':7.7,
'weekends':7.7,
'angel':7.68,
'b-day':7.68,
'bonus':7.68,
'brilliant':7.68,
'diamonds':7.68,
'holiday':7.68,
'lucky':7.68,
'mother':7.68,
'super':7.68,
'amazing':7.66,
'angels':7.66,
'enjoy':7.66,
'friend':7.66,
'friendly':7.66,
'mother\'s':7.66,
'profit':7.66,
'finest':7.66,
'bday':7.64,
'champion':7.64,
'grandmother':7.64,
'haha':7.64,
'kiss':7.64,
'kitten':7.64,
'miracle':7.64,
'mom':7.64,
'sweet':7.64,
'blessings':7.62,
'bright':7.62,
'cutest':7.62,
'entertaining':7.62,
'excited':7.62,
'excitement':7.62,
'joke':7.62,
'millionaire':7.62,
'prize':7.62,
'succeeded':7.62,
'successfully':7.62,
'winners':7.62,
'shines':7.6,
'awesome':7.6,
'genius':7.6,
'achievement':7.58,
'cake':7.58,
'cheers':7.58,
'exciting':7.58,
'goodness':7.58,
'hug':7.58,
'income':7.58,
'party':7.58,
'puppy':7.58,
'smiling':7.58,
'song':7.58,
'succeeding':7.58,
'tasty':7.58,
'victories':7.58,
'achieved':7.56,
'billion':7.56,
'cakes':7.56,
'easier':7.56,
'flowers':7.56,
'gifts':7.56,
'gold':7.56,
'merry':7.56,
'families':7.54,
'handsome':7.54,
'lovers':7.54,
'affection':7.53,
'candy':7.52,
'cute':7.52,
'diamond':7.52,
'earnings':7.52,
'interesting':7.52,
'peacefully':7.52,
'praise':7.52,
'relaxing':7.52,
'roses':7.52,
'saturdays':7.52,
'faithful':7.51,
'heavens':7.51,
'cherish':7.5,
'comfort':7.5,
'congrats':7.5,
'cupcake':7.5,
'earn':7.5,
'extraordinary':7.5,
'glory':7.5,
'hilarious':7.5,
'moonlight':7.5,
'optimistic':7.5,
'peaceful':7.5,
'romance':7.5,
'feast':7.49,
'attractive':7.48,
'glad':7.48,
'grandma':7.48,
'internet':7.48,
'pleasant':7.48,
'profits':7.48,
'smart':7.48,
'x-mas':7.48,
'babies':7.46,
'cheer':7.46,
'courage':7.46,
'enthusiasm':7.46,
'honest':7.46,
'loyal':7.46,
'opportunities':7.46,
'triumph':7.46,
'wow':7.46,
'jewels':7.46,
'forests':7.45,
'apple':7.44,
'dreams':7.44,
'fantasy':7.44,
'food':7.44,
'honey':7.44,
'miracles':7.44,
'sex':7.44,
'sing':7.44,
'starlight':7.44,
'thankful':7.44,
'wins':7.44,
'achieve':7.42,
'adored':7.42,
'cash':7.42,
'dances':7.42,
'gorgeous':7.42,
'grandchildren':7.42,
'incredible':7.42,
'lunch':7.42,
'mommy':7.42,
'parties':7.42,
'perfect':7.42,
'saturday':7.42,
'surprise':7.42,
'truth':7.42,
'blessing':7.4,
'creative':7.4,
'dinner':7.4,
'kindness':7.4,
'pleased':7.4,
'sexy':7.4,
'strength':7.4,
'thank':7.4,
'thanks':7.4,
'thanksgiving':7.4,
'treasure':7.4,
'valentine':7.4,
'riches':7.39,
'awarded':7.38,
'fabulous':7.38,
'grandfather':7.38,
'heavenly':7.38,
'hope':7.38,
'kids':7.38,
'magical':7.38,
'million':7.38,
'nice':7.38,
'sundays':7.38,
'wealth':7.38,
'fantasies':7.36,
'cares':7.36,
'dance':7.36,
'daughters':7.36,
'favorable':7.36,
'friend\'s':7.36,
'generosity':7.36,
'grateful':7.36,
'inspired':7.36,
'mothers':7.36,
'parents':7.36,
'valentine\'s':7.36,
'intelligent':7.35,
'liberation':7.35,
'melody':7.35,
'wonderland':7.35,
'beloved':7.34,
'caring':7.34,
'homemade':7.34,
'inspiring':7.34,
'movies':7.34,
'precious':7.34,
'respect':7.34,
'satisfaction':7.34,
'satisfy':7.34,
'wedding':7.34,
'accomplished':7.32,
'adorable':7.32,
'championship':7.32,
'comfortable':7.32,
'cuddle':7.32,
'games':7.32,
'grandson':7.32,
'life':7.32,
'lovely':7.32,
'pretty':7.32,
'proud':7.32,
'rose':7.32,
'united':7.32,
'fruits':7.31,
'adventure':7.3,
'couple':7.3,
'dollars':7.3,
'eating':7.3,
'fortune':7.3,
'generous':7.3,
'golden':7.3,
'hahah':7.3,
'hooray':7.3,
'intelligence':7.3,
'lover':7.3,
'luxury':7.3,
'money':7.3,
'passion':7.3,
'prosperity':7.3,
'remarkable':7.3,
'sweetie':7.3,
'valentines':7.3,
'educated':7.29,
'gently':7.29,
'baby':7.28,
'books':7.28,
'bride':7.28,
'cherished':7.28,
'cookies':7.28,
'dessert':7.28,
'employed':7.28,
'glow':7.28,
'god':7.28,
'great-grandchildren':7.28,
'helped':7.28,
'independence':7.28,
'likes':7.28,
'luckily':7.28,
'moon':7.28,
'perfectly':7.28,
'satisfied':7.28,
'sunday':7.28,
'juicy':7.27,
'championships':7.26,
'divine':7.26,
'dreaming':7.26,
'foods':7.26,
'fresh':7.26,
'gladly':7.26,
'greatest':7.26,
'hearts':7.26,
'luck':7.26,
'millions':7.26,
'musicians':7.26,
'play':7.26,
'progress':7.26,
'savings':7.26,
'appreciation':7.24,
'bliss':7.24,
'bloom':7.24,
'book':7.24,
'child':7.24,
'companion':7.24,
'computer':7.24,
'gardens':7.24,
'gentle':7.24,
'hahahah':7.24,
'helpful':7.24,
'impressed':7.24,
'kind':7.24,
'knowledge':7.24,
'liberty':7.24,
'mama':7.24,
'nature':7.24,
'pal':7.24,
'passionate':7.24,
'promoted':7.24,
'reward':7.24,
'warmth':7.24,
'xmas':7.24,
'danced':7.22,
'amazed':7.22,
'appreciate':7.22,
'brother':7.22,
'confidence':7.22,
'darling':7.22,
'encouraging':7.22,
'energy':7.22,
'films':7.22,
'garden':7.22,
'graduated':7.22,
'guitar':7.22,
'health':7.22,
'heart':7.22,
'honor':7.22,
'like':7.22,
'musical':7.22,
'pets':7.22,
'relaxed':7.22,
'salary':7.22,
'star':7.22,
'sweeter':7.22,
'trust':7.22,
'yummy':7.22,
'ecstasy':7.2,
'eternal':7.2,
'approved':7.2,
'benefits':7.2,
'cartoon':7.2,
'comforted':7.2,
'cool':7.2,
'discount':7.2,
'good':7.2,
'google':7.2,
'ladies':7.2,
'libraries':7.2,
'luv':7.2,
'perfection':7.2,
'presents':7.2,
'prizes':7.2,
'special':7.2,
'wishes':7.2,
'alive':7.18,
'awards':7.18,
'bed':7.18,
'best':7.18,
'coffee':7.18,
'comfy':7.18,
'fiesta':7.18,
'genuine':7.18,
'helping':7.18,
'imagine':7.18,
'leisure':7.18,
'meal':7.18,
'promise':7.18,
'respected':7.18,
'rest':7.18,
'travel':7.18,
'abundant':7.16,
'attracted':7.16,
'devoted':7.16,
'favourite':7.16,
'granddaughter':7.16,
'heroes':7.16,
'ideas':7.16,
'liked':7.16,
'oceans':7.16,
'pizza':7.16,
'skies':7.16,
'sleep':7.16,
'spring':7.16,
'sunset':7.16,
'welcome':7.16,
'1st':7.14,
'adoring':7.14,
'brighter':7.14,
'children\'s':7.14,
'cure':7.14,
'fireworks':7.14,
'home':7.14,
'honored':7.14,
'journey':7.14,
'lovin':7.14,
'opportunity':7.14,
'paid':7.14,
'parks':7.14,
'playing':7.14,
'shine':7.14,
'strawberry':7.14,
'summertime':7.14,
'wealthy':7.14,
'appreciated':7.12,
'artistic':7.12,
'birth':7.12,
'children':7.12,
'fruit':7.12,
'inspire':7.12,
'juice':7.12,
'laptop':7.12,
'partners':7.12,
'son':7.12,
'stronger':7.12,
'superman':7.12,
'tree':7.12,
'valuable':7.12,
'woman\'s':7.12,
'women':7.12,
'glowing':7.1,
'admiration':7.1,
'carnival':7.1,
'computers':7.1,
'confident':7.1,
'cookie':7.1,
'cutie':7.1,
'dearest':7.1,
'dream':7.1,
'freely':7.1,
'fridays':7.1,
'plants':7.1,
'quality':7.1,
'rabbit':7.1,
'resort':7.1,
'shopping':7.1,
'sincere':7.1,
'snack':7.1,
'stars':7.1,
'toys':7.1,
'useful':7.1,
'wise':7.1,
'yum':7.1,
'desirable':7.08,
'sparkle':7.08,
'bless':7.08,
'comic':7.08,
'cooking':7.08,
'dancing':7.08,
'earned':7.08,
'equality':7.08,
'faith':7.08,
'graduate':7.08,
'improvements':7.08,
'memories':7.08,
'park':7.08,
'pet':7.08,
'powerful':7.08,
'princess':7.08,
'qualities':7.08,
'thrill':7.08,
'TRUE':7.08,
'wonder':7.08,
'everlasting':7.06,
'mamma':7.06,
'caress':7.06,
'charm':7.06,
'clever':7.06,
'father':7.06,
'grand':7.06,
'hehehe':7.06,
'idea':7.06,
'pearl':7.06,
'pictures':7.06,
'restaurant':7.06,
'sandwich':7.06,
'sharing':7.06,
'strong':7.06,
'talent':7.06,
'talented':7.06,
'tenderness':7.06,
'weddings':7.06,
'dove':7.04,
'awsome':7.04,
'cherry':7.04,
'daughter':7.04,
'eat':7.04,
'favorite':7.04,
'girlfriend':7.04,
'hoping':7.04,
'impressive':7.04,
'loyalty':7.04,
'parent':7.04,
'relationship':7.04,
'safe':7.04,
'scholarship':7.04,
'shining':7.04,
'sunrise':7.04,
'yoga':7.04,
'respects':7.02,
'fairy':7.02,
'humanity':7.02,
'productivity':7.02,
'brave':7.02,
'colours':7.02,
'correct':7.02,
'dad':7.02,
'daddy':7.02,
'dollar':7.02,
'easily':7.02,
'fans':7.02,
'goal':7.02,
'hawaii':7.02,
'honestly':7.02,
'inspiration':7.02,
'olympics':7.02,
'saints':7.02,
'sleeping':7.02,
'wisdom':7.02,
'believed':7,
'better':7,
'color':7,
'colors':7,
'dad\'s':7,
'determination':7,
'discovered':7,
'gentlemen':7,
'girl':7,
'harmony':7,
'hello':7,
'hopes':7,
'noble':7,
'praised':7,
'reliable':7,
'trip':7,
'agreed':6.98,
'approval':6.98,
'brothers':6.98,
'concerts':6.98,
'cooperation':6.98,
'encouraged':6.98,
'giving':6.98,
'goals':6.98,
'ideal':6.98,
'intellectual':6.98,
'invitation':6.98,
'marry':6.98,
'musician':6.98,
'outdoors':6.98,
'photography':6.98,
'plenty':6.98,
'rome':6.98,
'trees':6.98,
'trips':6.98,
'unique':6.98,
'wildlife':6.98,
'lullaby':6.98,
'thrills':6.98,
'abroad':6.96,
'bath':6.96,
'benefit':6.96,
'birds':6.96,
'dads':6.96,
'elegant':6.96,
'eternally':6.96,
'fair':6.96,
'fancy':6.96,
'great-grandfather':6.96,
'imagination':6.96,
'improving':6.96,
'mountains':6.96,
'ocean':6.96,
'pancakes':6.96,
'photograph':6.96,
'praying':6.96,
'present':6.96,
'reunion':6.96,
'safely':6.96,
'saving':6.96,
'singing':6.96,
'songs':6.96,
'sunny':6.96,
'terrific':6.96,
'theater':6.96,
'vanilla':6.96,
'adore':6.96,
'gentleman':6.96,
'autumn':6.94,
'cinema':6.94,
'college':6.94,
'concert':6.94,
'correctly':6.94,
'cozy':6.94,
'dear':6.94,
'earning':6.94,
'earns':6.94,
'gardening':6.94,
'girls':6.94,
'massage':6.94,
'outdoor':6.94,
'photos':6.94,
'piano':6.94,
'sea':6.94,
'trusted':6.94,
'albums':6.92,
'dignity':6.92,
'favored':6.92,
'fitness':6.92,
'game':6.92,
'healing':6.92,
'learned':6.92,
'learning':6.92,
'prayers':6.92,
'promote':6.92,
'secure':6.92,
'spa':6.92,
'unity':6.92,
'wish':6.92,
'youtube':6.92,
'favour':6.92,
'clean':6.9,
'dynamic':6.9,
'encourage':6.9,
'infant':6.9,
'jewelry':6.9,
'necklace':6.9,
'paintings':6.9,
'stability':6.9,
'voyage':6.9,
'worthy':6.9,
'fulfill':6.9,
'eternity':6.9,
'accuracy':6.88,
'bookstores':6.88,
'breeze':6.88,
'bunny':6.88,
'cheese':6.88,
'comics':6.88,
'donated':6.88,
'easter':6.88,
'education':6.88,
'email':6.88,
'farmer':6.88,
'female':6.88,
'flavor':6.88,
'friday':6.88,
'moms':6.88,
'photo':6.88,
'pillow':6.88,
'pure':6.88,
'saved':6.88,
'shakespeare':6.88,
'survived':6.88,
'taste':6.88,
'valued':6.88,
'vitamin':6.88,
'infants':6.88,
'silk':6.88,
'dreamed':6.87,
'\#music':6.86,
'acceptance':6.86,
'banana':6.86,
'breakfast':6.86,
'cooperative':6.86,
'dancer':6.86,
'grace':6.86,
'greatly':6.86,
'guarantee':6.86,
'improved':6.86,
'improvement':6.86,
'independent':6.86,
'liking':6.86,
'paris':6.86,
'pasta':6.86,
'photographs':6.86,
'recipes':6.86,
'relationships':6.86,
'relief':6.86,
'sailing':6.86,
'science':6.86,
'seas':6.86,
'toast':6.86,
'truly':6.86,
'platinum':6.86,
'superstar':6.86,
'understands':6.86,
'accurately':6.84,
'advantage':6.84,
'belonging':6.84,
'buddy':6.84,
'childhood':6.84,
'daylight':6.84,
'discover':6.84,
'forgiveness':6.84,
'great-grandmother':6.84,
'hopefully':6.84,
'horses':6.84,
'interested':6.84,
'kid':6.84,
'live':6.84,
'lol':6.84,
'movie':6.84,
'popularity':6.84,
'solution':6.84,
'swim':6.84,
'toy':6.84,
'understanding':6.84,
'universe':6.84,
'woman':6.84,
'woohoo':6.84,
'rivers':6.84,
'sail':6.84,
'cared':6.83,
'active':6.82,
'artists':6.82,
'babe':6.82,
'believes':6.82,
'born':6.82,
'champagne':6.82,
'compassion':6.82,
'completed':6.82,
'create':6.82,
'dedicated':6.82,
'experienced':6.82,
'fathers':6.82,
'first':6.82,
'gains':6.82,
'heal':6.82,
'new':6.82,
'significant':6.82,
'singer':6.82,
'surprisingly':6.82,
'young':6.82,
'mansion':6.82,
'prevail':6.82,
'qualified':6.81,
'air':6.8,
'amazon':6.8,
'animal':6.8,
'bedroom':6.8,
'camera':6.8,
'cream':6.8,
'dreamer':6.8,
'forgiven':6.8,
'highest':6.8,
'horse':6.8,
'magic':6.8,
'manners':6.8,
'naturally':6.8,
'novels':6.8,
'performers':6.8,
'pies':6.8,
'protect':6.8,
'santa':6.8,
'shared':6.8,
'smooth':6.8,
'together':6.8,
'uncle':6.8,
'efficient':6.8,
'elevated':6.8,
'cafe':6.78,
'coke':6.78,
'completion':6.78,
'coolest':6.78,
'creation':6.78,
'dogs':6.78,
'effectiveness':6.78,
'esteemed':6.78,
'finished':6.78,
'glee':6.78,
'green':6.78,
'heartbeat':6.78,
'island':6.78,
'jukebox':6.78,
'medal':6.78,
'mom\'s':6.78,
'museums':6.78,
'painting':6.78,
'pie':6.78,
'pool':6.78,
'reading':6.78,
'real':6.78,
'ruby':6.78,
'share':6.78,
'sons':6.78,
'traveling':6.78,
'variety':6.78,
'wonders':6.78,
'worth':6.78,
'guaranteed':6.78,
'raindrops':6.78,
'visions':6.78,
'pearls':6.77,
'america':6.76,
'easy':6.76,
'effective':6.76,
'future':6.76,
'humans':6.76,
'intimate':6.76,
'married':6.76,
'muffin':6.76,
'papa':6.76,
'plus':6.76,
'popcorn':6.76,
'savior':6.76,
'seasons':6.76,
'shop':6.76,
'sister':6.76,
'style':6.76,
'supporter':6.76,
'switzerland':6.76,
'tenderly':6.76,
'top':6.76,
'oxygen':6.76,
'rhyme':6.76,
'allright':6.74,
'american':6.74,
'artist':6.74,
'capable':6.74,
'complete':6.74,
'convenient':6.74,
'courtesy':6.74,
'donate':6.74,
'drinks':6.74,
'father\'s':6.74,
'fine':6.74,
'focused':6.74,
'guitars':6.74,
'hi':6.74,
'integrity':6.74,
'justice':6.74,
'lake':6.74,
'mankind':6.74,
'mentor':6.74,
'merit':6.74,
'performance':6.74,
'plant':6.74,
'prepared':6.74,
'raise':6.74,
'romeo':6.74,
'shiny':6.74,
'sugar':6.74,
'surprising':6.74,
'technology':6.74,
'treat':6.74,
'university':6.74,
'wishing':6.74,
'yes':6.74,
'desires':6.73,
'wished':6.73,
'4-bedroom':6.72,
'attract':6.72,
'bike':6.72,
'car':6.72,
'civilization':6.72,
'classy':6.72,
'confirmed':6.72,
'costumes':6.72,
'creating':6.72,
'culture':6.72,
'finish':6.72,
'gallery':6.72,
'knowing':6.72,
'lifelong':6.72,
'momma':6.72,
'neat':6.72,
'niece':6.72,
'online':6.72,
'orchestra':6.72,
'plays':6.72,
'revenue':6.72,
'shower':6.72,
'spiritual':6.72,
'surprised':6.72,
'tremendous':6.72,
'values':6.72,
'villages':6.72,
'warm':6.72,
'doggy':6.71,
'hallelujah':6.71,
'candle':6.71,
'secured':6.71,
'valid':6.71,
'agree':6.7,
'anniversary':6.7,
'antiques':6.7,
'believe':6.7,
'bucks':6.7,
'cruise':6.7,
'dancers':6.7,
'dine':6.7,
'dog':6.7,
'florida':6.7,
'grandsons':6.7,
'grants':6.7,
'hired':6.7,
'learn':6.7,
'marriage':6.7,
'mum':6.7,
'partner':6.7,
'productive':6.7,
'rockin':6.7,
'teaches':6.7,
'treats':6.7,
'tv':6.7,
'water':6.7,
'grin':6.69,
'invention':6.69,
'virtues':6.69,
'brains':6.69,
'sensation':6.68,
'ability':6.68,
'ace':6.68,
'animals':6.68,
'bake':6.68,
'bridegroom':6.68,
'desire':6.68,
'famous':6.68,
'forest':6.68,
'fountain':6.68,
'goodmorning':6.68,
'greater':6.68,
'grow':6.68,
'heritage':6.68,
'landscape':6.68,
'liberties':6.68,
'living':6.68,
'lyrics':6.68,
'mercy':6.68,
'museum':6.68,
'novel':6.68,
'palace':6.68,
'pianist':6.68,
'potential':6.68,
'power':6.68,
'privilege':6.68,
'proceed':6.68,
'promised':6.68,
'river':6.68,
'scotland':6.68,
'shares':6.68,
'skating':6.68,
'thanx':6.68,
'theatre':6.68,
'tours':6.68,
'well':6.68,
'acceptable':6.67,
'possibilities':6.67,
'accurate':6.67,
'candles':6.67,
'approve':6.66,
'assets':6.66,
'aunt':6.66,
'career':6.66,
'charms':6.66,
'communicate':6.66,
'competent':6.66,
'currency':6.66,
'dedication':6.66,
'dvd':6.66,
'eligible':6.66,
'fan':6.66,
'firefighters':6.66,
'greet':6.66,
'motivation':6.66,
'nieces':6.66,
'personality':6.66,
'powers':6.66,
'raises':6.66,
'sculpture':6.66,
'survivors':6.66,
'tea':6.66,
'television':6.66,
'tour':6.66,
'pony':6.65,
'rhythm':6.65,
'bird':6.64,
'care':6.64,
'cat':6.64,
'cook':6.64,
'corn':6.64,
'deposits':6.64,
'expert':6.64,
'high':6.64,
'holy':6.64,
'invite':6.64,
'leading':6.64,
'photographer':6.64,
'picture':6.64,
'promising':6.64,
'recover':6.64,
'recovered':6.64,
'recovery':6.64,
'salad':6.64,
'shops':6.64,
'solutions':6.64,
'sparks':6.64,
'sport':6.64,
'supreme':6.64,
'theaters':6.64,
'tunes':6.64,
'unite':6.64,
'volunteers':6.64,
'simplicity':6.62,
'attained':6.62,
'book\'s':6.62,
'cameras':6.62,
'chatting':6.62,
'crown':6.62,
'disney':6.62,
'dresses':6.62,
'heartfelt':6.62,
'homes':6.62,
'husband':6.62,
'immortal':6.62,
'invest':6.62,
'kitty':6.62,
'offer':6.62,
'organized':6.62,
'performances':6.62,
'perfume':6.62,
'pray':6.62,
'rescue':6.62,
'restaurants':6.62,
'salaries':6.62,
'sisters':6.62,
'slept':6.62,
'steak':6.62,
'stories':6.62,
'varieties':6.62,
'vision':6.62,
'wife':6.62,
'youth':6.62,
'zoo':6.62,
'stimulation':6.61,
'touching':6.61,
'furnished':6.6,
'suitable':6.6,
'album':6.6,
'amour':6.6,
'art':6.6,
'beam':6.6,
'captain':6.6,
'certainty':6.6,
'child\'s':6.6,
'clothing':6.6,
'conservation':6.6,
'desired':6.6,
'dress':6.6,
'favorited':6.6,
'females':6.6,
'growth':6.6,
'helps':6.6,
'highly':6.6,
'ideals':6.6,
'lady':6.6,
'lime':6.6,
'popular':6.6,
'proposal':6.6,
'protected':6.6,
'relatives':6.6,
'rhymes':6.6,
'singers':6.6,
'specialty':6.6,
'spirit':6.6,
'starry':6.6,
'stroll':6.6,
'supported':6.6,
'therapeutic':6.6,
'unlimited':6.6,
'visiting':6.6,
'expressions':6.6,
'efficiency':6.59,
'sleeps':6.59,
'vocals':6.59,
'impress':6.58,
'sympathetic':6.58,
'advance':6.58,
'advanced':6.58,
'arts':6.58,
'available':6.58,
'baking':6.58,
'classic':6.58,
'classical':6.58,
'colour':6.58,
'drawing':6.58,
'english':6.58,
'exhibition':6.58,
'expecting':6.58,
'fish':6.58,
'goodnight':6.58,
'invented':6.58,
'islands':6.58,
'language':6.58,
'majesty':6.58,
'me':6.58,
'preferred':6.58,
'radio':6.58,
'ready':6.58,
'relative':6.58,
'sale':6.58,
'solve':6.58,
'springs':6.58,
'student':6.58,
'symphony':6.58,
'traditions':6.58,
'understood':6.58,
'upgrade':6.58,
'usa':6.58,
'saviour':6.57,
'skill':6.57,
'belonged':6.56,
'muscles':6.56,
'able':6.56,
'ahaha':6.56,
'butter':6.56,
'circus':6.56,
'cosmic':6.56,
'coupon':6.56,
'diploma':6.56,
'donations':6.56,
'e-mail':6.56,
'encore':6.56,
'film':6.56,
'guidance':6.56,
'illustration':6.56,
'increase':6.56,
'international':6.56,
'ipod':6.56,
'morning':6.56,
'natural':6.56,
'okay':6.56,
'preservation':6.56,
'progressive':6.56,
'protection':6.56,
'raised':6.56,
'showers':6.56,
'tacos':6.56,
'teach':6.56,
'traveler':6.56,
'understand':6.56,
'universities':6.56,
'worldwide':6.56,
'privileges':6.55,
'accepted':6.54,
'adoption':6.54,
'asset':6.54,
'blanket':6.54,
'cats':6.54,
'cleaned':6.54,
'coin':6.54,
'cooked':6.54,
'crystal':6.54,
'dawn':6.54,
'dearly':6.54,
'discovery':6.54,
'done':6.54,
'eager':6.54,
'emails':6.54,
'exercises':6.54,
'found':6.54,
'give':6.54,
'groovy':6.54,
'haven':6.54,
'invited':6.54,
'iphone':6.54,
'moral':6.54,
'nephew':6.54,
'orange':6.54,
'overcome':6.54,
'pays':6.54,
'potato':6.54,
'premiere':6.54,
'pride':6.54,
'receiving':6.54,
'recognition':6.54,
'reindeer':6.54,
'right':6.54,
'rising':6.54,
'save':6.54,
'scholars':6.54,
'shelter':6.54,
'solar':6.54,
'spontaneous':6.54,
'tasting':6.54,
'ultimate':6.54,
'visit':6.54,
'advantages':6.53,
'sailed':6.53,
'feather':6.52,
'ambitious':6.52,
'baker':6.52,
'brain':6.52,
'champ':6.52,
'communication':6.52,
'compensation':6.52,
'ease':6.52,
'ethics':6.52,
'extra':6.52,
'fries':6.52,
'growing':6.52,
'guest':6.52,
'incredibly':6.52,
'initiative':6.52,
'jesus':6.52,
'lips':6.52,
'literature':6.52,
'nights':6.52,
'phenomenon':6.52,
'planet':6.52,
'poem':6.52,
'poet':6.52,
'prefer':6.52,
'read':6.52,
'sang':6.52,
'soup':6.52,
'surf':6.52,
'swimming':6.52,
'videos':6.52,
'wings':6.52,
'world':6.52,
'amore':6.51,
'bounce':6.51,
'cultures':6.51,
'eden':6.51,
'interaction':6.51,
'mercedes':6.51,
'velvet':6.51,
'balanced':6.51,
'agriculture':6.5,
'allies':6.5,
'americans':6.5,
'bells':6.5,
'chips':6.5,
'contribute':6.5,
'couples':6.5,
'cousins':6.5,
'deals':6.5,
'determined':6.5,
'eaten':6.5,
'fame':6.5,
'gives':6.5,
'hire':6.5,
'innocence':6.5,
'ipad':6.5,
'leadership':6.5,
'legend':6.5,
'lounge':6.5,
'mature':6.5,
'newest':6.5,
'newly':6.5,
'performing':6.5,
'receive':6.5,
'recipe':6.5,
'roast':6.5,
'starting':6.5,
'stunning':6.5,
'tales':6.5,
'elder':6.49,
'grows':6.49,
'herb':6.49,
'illustrations':6.49,
'rays':6.49,
'relevant':6.49,
'sanity':6.49,
'acoustic':6.48,
'always':6.48,
'answers':6.48,
'bible':6.48,
'boost':6.48,
'clap':6.48,
'dining':6.48,
'electronics':6.48,
'exclusive':6.48,
'family\'s':6.48,
'gathering':6.48,
'hehe':6.48,
'humble':6.48,
'information':6.48,
'italian':6.48,
'library':6.48,
'mate':6.48,
'modern':6.48,
'offers':6.48,
'paperbacks':6.48,
'perform':6.48,
'poems':6.48,
'potatoes':6.48,
'prayer':6.48,
'pumpkin':6.48,
'restored':6.48,
'rights':6.48,
'scholar':6.48,
'screenplay':6.48,
'shopper':6.48,
'sings':6.48,
'soft':6.48,
'starbucks':6.48,
'story':6.48,
'supporting':6.48,
'video':6.48,
'instrumental':6.48,
'backyard':6.47,
'drums':6.47,
'virtue':6.47,
'activities':6.46,
'athletic':6.46,
'clothes':6.46,
'cultivated':6.46,
'forever':6.46,
'goods':6.46,
'grass':6.46,
'higher':6.46,
'literary':6.46,
'london':6.46,
'memory':6.46,
'mint':6.46,
'nephews':6.46,
'prime':6.46,
'prospect':6.46,
'reception':6.46,
'recommended':6.46,
'research':6.46,
'resource':6.46,
'resources':6.46,
'riverside':6.46,
'rocking':6.46,
'scored':6.46,
'talking':6.46,
'believer':6.46,
'functioning':6.46,
'poets':6.46,
'boats':6.45,
'remedy':6.45,
'tender':6.45,
'aaah':6.44,
'beatles':6.44,
'chance':6.44,
'coast':6.44,
'draw':6.44,
'earth':6.44,
'eats':6.44,
'effectively':6.44,
'familiar':6.44,
'fast':6.44,
'forgive':6.44,
'gained':6.44,
'graphics':6.44,
'improve':6.44,
'increases':6.44,
'infinite':6.44,
'languages':6.44,
'likely':6.44,
'nap':6.44,
'philosophy':6.44,
'phone':6.44,
'prince':6.44,
'princes':6.44,
'professional':6.44,
'revival':6.44,
'rice':6.44,
'rides':6.44,
'satisfactory':6.44,
'scientific':6.44,
'scoring':6.44,
'sis':6.44,
'soccer':6.44,
'supermarkets':6.44,
'support':6.44,
'teachers':6.44,
'teaching':6.44,
'wage':6.44,
'whale':6.44,
'wink':6.44,
'wit':6.44,
'accept':6.42,
'assist':6.42,
'band':6.42,
'chat':6.42,
'composer':6.42,
'contribution':6.42,
'cousin':6.42,
'curves':6.42,
'dates':6.42,
'delivered':6.42,
'environmental':6.42,
'evening':6.42,
'feed':6.42,
'fest':6.42,
'gaming':6.42,
'india':6.42,
'interests':6.42,
'jazz':6.42,
'novelist':6.42,
'panties':6.42,
'partnership':6.42,
'party\'s':6.42,
'portrait':6.42,
'remember':6.42,
'residence':6.42,
'shore':6.42,
'simply':6.42,
'stream':6.42,
'traveled':6.42,
'wine':6.42,
'wondered':6.42,
'farming':6.42,
'hats':6.41,
'hearted':6.41,
'1980s':6.4,
'actress':6.4,
'adopt':6.4,
'altogether':6.4,
'architecture':6.4,
'australia':6.4,
'baked':6.4,
'buying':6.4,
'ceremony':6.4,
'charity':6.4,
'chicken':6.4,
'chorus':6.4,
'consciousness':6.4,
'cultivation':6.4,
'dating':6.4,
'deserve':6.4,
'destination':6.4,
'documentary':6.4,
'drawings':6.4,
'educational':6.4,
'electronic':6.4,
'equally':6.4,
'europe':6.4,
'floating':6.4,
'futures':6.4,
'gain':6.4,
'generations':6.4,
'gmail':6.4,
'hills':6.4,
'increasing':6.4,
'kidding':6.4,
'launch':6.4,
'light':6.4,
'mountain':6.4,
'participate':6.4,
'pics':6.4,
'playin':6.4,
'poetry':6.4,
'possibility':6.4,
'provide':6.4,
'resolved':6.4,
'shores':6.4,
'studies':6.4,
'summer':6.4,
'tennis':6.4,
'touch':6.4,
'touched':6.4,
'tradition':6.4,
'twins':6.4,
'visits':6.4,
'wages':6.4,
'waves':6.4,
'willing':6.4,
'younger':6.4,
'exercised':6.39,
'enabled':6.39,
'greeks':6.39,
'purely':6.39,
'seeds':6.39,
'sixteen':6.39,
'softly':6.39,
'cradle':6.38,
'80\'s':6.38,
'americas':6.38,
'arose':6.38,
'bigger':6.38,
'boyfriend':6.38,
'breath':6.38,
'committed':6.38,
'contributing':6.38,
'craft':6.38,
'designers':6.38,
'development':6.38,
'distinction':6.38,
'faster':6.38,
'functional':6.38,
'giveaway':6.38,
'increased':6.38,
'lamb':6.38,
'leader':6.38,
'lottery':6.38,
'maximum':6.38,
'meet':6.38,
'neighborhood':6.38,
'ownership':6.38,
'painter':6.38,
'played':6.38,
'preserve':6.38,
'purchased':6.38,
'queens':6.38,
'reasonable':6.38,
'revenues':6.38,
'rocket':6.38,
'sails':6.38,
'saves':6.38,
'score':6.38,
'seeing':6.38,
'silver':6.38,
'skills':6.38,
'sung':6.38,
'tasted':6.38,
'tastes':6.38,
'thinks':6.38,
'thought':6.38,
'touches':6.38,
'we':6.38,
'agricultural':6.38,
'belle':6.37,
'explore':6.37,
'sketch':6.37,
'voluntary':6.37,
'acquire':6.36,
'april':6.36,
'architect':6.36,
'broadway':6.36,
'calm':6.36,
'climbed':6.36,
'colleagues':6.36,
'curious':6.36,
'definite':6.36,
'democracy':6.36,
'deposit':6.36,
'developed':6.36,
'distinguished':6.36,
'dressed':6.36,
'drink':6.36,
'employment':6.36,
'farms':6.36,
'fashion':6.36,
'gravy':6.36,
'guiding':6.36,
'imagined':6.36,
'innocent':6.36,
'instantly':6.36,
'interest':6.36,
'justified':6.36,
'logical':6.36,
'mail':6.36,
'maintained':6.36,
'mario':6.36,
'mobile':6.36,
'mp3':6.36,
'obtained':6.36,
'original':6.36,
'patience':6.36,
'performed':6.36,
'please':6.36,
'prayed':6.36,
'rain':6.36,
'rational':6.36,
'relation':6.36,
'rings':6.36,
'rise':6.36,
'rudolph':6.36,
'teacher':6.36,
'technologies':6.36,
'value':6.36,
'vegas':6.36,
'volunteer':6.36,
'wifi':6.36,
'revealed':6.35,
'branches':6.35,
'existed':6.35,
'spotlight':6.35,
'bread':6.34,
'castle':6.34,
'cheddar':6.34,
'clouds':6.34,
'clubs':6.34,
'colleges':6.34,
'completely':6.34,
'connected':6.34,
'december':6.34,
'dew':6.34,
'employ':6.34,
'exists':6.34,
'expedition':6.34,
'experience':6.34,
'farmers':6.34,
'firefox':6.34,
'football':6.34,
'grant':6.34,
'hiring':6.34,
'hollywood':6.34,
'house':6.34,
'illustrated':6.34,
'images':6.34,
'jeans':6.34,
'largest':6.34,
'linguistic':6.34,
'lord':6.34,
'purchase':6.34,
'received':6.34,
'released':6.34,
'saint':6.34,
'scientists':6.34,
'september':6.34,
'soon':6.34,
'soul':6.34,
'soundtrack':6.34,
'studio':6.34,
'tickets':6.34,
'wave':6.34,
'continuity':6.33,
'equilibrium':6.33,
'activity':6.32,
'agreement':6.32,
'amor':6.32,
'arrival':6.32,
'arrive':6.32,
'asian':6.32,
'bbq':6.32,
'bedtime':6.32,
'berry':6.32,
'brunch':6.32,
'commitment':6.32,
'date':6.32,
'deal':6.32,
'democratic':6.32,
'design':6.32,
'designer':6.32,
'devotion':6.32,
'experiences':6.32,
'fly':6.32,
'foxy':6.32,
'france':6.32,
'handy':6.32,
'importance':6.32,
'important':6.32,
'jamaica':6.32,
'jobs':6.32,
'june':6.32,
'kin':6.32,
'lights':6.32,
'mornings':6.32,
'newspaper':6.32,
'offering':6.32,
'organic':6.32,
'parade':6.32,
'pink':6.32,
'published':6.32,
'reader':6.32,
'remembered':6.32,
'resolve':6.32,
'ring':6.32,
'rofl':6.32,
'selected':6.32,
'snow':6.32,
'streams':6.32,
'sufficient':6.32,
'sufficiently':6.32,
'sure':6.32,
'universal':6.32,
'unlocked':6.32,
'visitors':6.32,
'waters':6.32,
'women\'s':6.32,
'worship':6.32,
'writers':6.32,
'assembled':6.31,
'chickens':6.31,
'wheat':6.31,
'connections':6.31,
'scent':6.31,
'volumes':6.31,
'whistle':6.31,
'absolutely':6.3,
'atmosphere':6.3,
'belongs':6.3,
'bought':6.3,
'chess':6.3,
'christian':6.3,
'clear':6.3,
'clearer':6.3,
'commonwealth':6.3,
'conversations':6.3,
'designed':6.3,
'downloaded':6.3,
'earrings':6.3,
'engineer':6.3,
'epic':6.3,
'exercise':6.3,
'expansion':6.3,
'feeding':6.3,
'flowing':6.3,
'headphones':6.3,
'indians':6.3,
'joined':6.3,
'lipstick':6.3,
'metropolitan':6.3,
'mine':6.3,
'myself':6.3,
'paint':6.3,
'painted':6.3,
'plane':6.3,
'produced':6.3,
'protecting':6.3,
'reasoning':6.3,
'relations':6.3,
'salvation':6.3,
'sciences':6.3,
'sense':6.3,
'software':6.3,
'suite':6.3,
'surplus':6.3,
'swing':6.3,
'visited':6.3,
'cheeks':6.29,
'observation':6.29,
'calcium':6.29,
'conceived':6.29,
'rum':6.29,
'amigo':6.28,
'babes':6.28,
'begin':6.28,
'breathe':6.28,
'bridegroom\'s':6.28,
'buy':6.28,
'community':6.28,
'cooler':6.28,
'country':6.28,
'disco':6.28,
'emerging':6.28,
'england':6.28,
'experts':6.28,
'fairly':6.28,
'fix':6.28,
'founded':6.28,
'globe':6.28,
'honorary':6.28,
'hoped':6.28,
'introduced':6.28,
'lead':6.28,
'listening':6.28,
'lots':6.28,
'market':6.28,
'monkey':6.28,
'olympic':6.28,
'pioneer':6.28,
'plaza':6.28,
'professionals':6.28,
'reflect':6.28,
'remembering':6.28,
'reputation':6.28,
'sentimental':6.28,
'skype':6.28,
'students':6.28,
'sweden':6.28,
'technological':6.28,
'themes':6.28,
'thinking':6.28,
'tips':6.28,
'vehicles':6.28,
'village':6.28,
'virginia':6.28,
'website':6.28,
'white':6.28,
'wines':6.28,
'reasonably':6.27,
'uptown':6.27,
'aims':6.27,
'observe':6.27,
'regards':6.27,
'allows':6.26,
'appropriate':6.26,
'australian':6.26,
'blackberry':6.26,
'breathing':6.26,
'camp':6.26,
'cars':6.26,
'considerable':6.26,
'costume':6.26,
'degree':6.26,
'develop':6.26,
'egypt':6.26,
'events':6.26,
'flag':6.26,
'gave':6.26,
'gods':6.26,
'gr8':6.26,
'hotels':6.26,
'human':6.26,
'indian':6.26,
'leap':6.26,
'lifetime':6.26,
'magnetic':6.26,
'mirror':6.26,
'mmmm':6.26,
'occasion':6.26,
'produce':6.26,
'prominent':6.26,
'promises':6.26,
'proved':6.26,
'raising':6.26,
'school':6.26,
'shirt':6.26,
'spark':6.26,
'surely':6.26,
'team':6.26,
'travelers':6.26,
'upcoming':6.26,
'us':6.26,
'valley':6.26,
'vintage':6.26,
'proteins':6.25,
'almighty':6.24,
'horizon':6.24,
'insight':6.24,
'ooooh':6.24,
'poetic':6.24,
'spirits':6.24,
'aboard':6.24,
'acknowledge':6.24,
'actors':6.24,
'advances':6.24,
'aid':6.24,
'answer':6.24,
'athletes':6.24,
'bowling':6.24,
'boy':6.24,
'built':6.24,
'choice':6.24,
'constitution':6.24,
'conversation':6.24,
'cowboy':6.24,
'day':6.24,
'deliver':6.24,
'developments':6.24,
'distinctive':6.24,
'dvds':6.24,
'edison':6.24,
'eighteen':6.24,
'enterprise':6.24,
'eyes':6.24,
'flying':6.24,
'grad':6.24,
'grammy':6.24,
'grill':6.24,
'halloween':6.24,
'holland':6.24,
'jelly':6.24,
'jingle':6.24,
'legitimate':6.24,
'making':6.24,
'more':6.24,
'options':6.24,
'possible':6.24,
'practical':6.24,
'proceeds':6.24,
'proposed':6.24,
'provides':6.24,
'queen':6.24,
'revolutionary':6.24,
'rises':6.24,
'samsung':6.24,
'self':6.24,
'show':6.24,
'sooner':6.24,
'speed':6.24,
'strategy':6.24,
'tale':6.24,
'tip':6.24,
'updating':6.24,
'vip':6.24,
'websites':6.24,
'worlds':6.24,
'writing':6.24,
'xbox':6.24,
'you':6.24,
'yours':6.24,
'yourself':6.24,
'collective':6.23,
'embrace':6.22,
'produces':6.22,
'meanings':6.22,
'accompanied':6.22,
'advice':6.22,
'all':6.22,
'answered':6.22,
'architectural':6.22,
'asia':6.22,
'authors':6.22,
'avid':6.22,
'batman':6.22,
'big':6.22,
'breast':6.22,
'bro':6.22,
'build':6.22,
'chef':6.22,
'clowns':6.22,
'contacts':6.22,
'contributions':6.22,
'cotton':6.22,
'cowboys':6.22,
'decent':6.22,
'designs':6.22,
'downloading':6.22,
'environment':6.22,
'evolution':6.22,
'farm':6.22,
'finishing':6.22,
'fit':6.22,
'foundations':6.22,
'full':6.22,
'guys':6.22,
'instrument':6.22,
'join':6.22,
'karma':6.22,
'knight':6.22,
'lives':6.22,
'logic':6.22,
'milk':6.22,
'most':6.22,
'neon':6.22,
'night':6.22,
'package':6.22,
'participation':6.22,
'penny':6.22,
'pregnant':6.22,
'properly':6.22,
'quest':6.22,
'restoration':6.22,
'seventeen':6.22,
'social':6.22,
'styles':6.22,
'supports':6.22,
'tech':6.22,
'thai':6.22,
'thoughts':6.22,
'today':6.22,
'transformation':6.22,
'treaty':6.22,
'tribute':6.22,
'aesthetic':6.21,
'upside':6.21,
'behold':6.2,
'dough':6.2,
'sands':6.2,
'3-bedroom':6.2,
'actor':6.2,
'agreements':6.2,
'arise':6.2,
'assured':6.2,
'bubble':6.2,
'cereal':6.2,
'definitely':6.2,
'dime':6.2,
'engage':6.2,
'erected':6.2,
'estate':6.2,
'ethical':6.2,
'everybody':6.2,
'faces':6.2,
'feeds':6.2,
'haircut':6.2,
'halo':6.2,
'jacket':6.2,
'joining':6.2,
'kingdom':6.2,
'lifted':6.2,
'listened':6.2,
'meat':6.2,
'menu':6.2,
'nurse':6.2,
'opening':6.2,
'pension':6.2,
'phd':6.2,
'phones':6.2,
'plans':6.2,
'premier':6.2,
'proposals':6.2,
'protein':6.2,
'providence':6.2,
'recommendations':6.2,
'sexual':6.2,
'soda':6.2,
'spain':6.2,
'stable':6.2,
'succession':6.2,
'supporters':6.2,
'taco':6.2,
'think':6.2,
'trading':6.2,
'upward':6.2,
'yields':6.2,
'sailor':6.19,
'dynamics':6.19,
'lyrical':6.19,
'copper':6.18,
'realise':6.18,
'righteous':6.18,
'transformed':6.18,
'venus':6.18,
'80s':6.18,
'advocates':6.18,
'aha':6.18,
'ate':6.18,
'atlantic':6.18,
'awareness':6.18,
'balance':6.18,
'blonde':6.18,
'burger':6.18,
'buyer':6.18,
'certificate':6.18,
'chances':6.18,
'chief':6.18,
'clearly':6.18,
'cultural':6.18,
'draws':6.18,
'driving':6.18,
'duck':6.18,
'eagle':6.18,
'emotions':6.18,
'established':6.18,
'experiments':6.18,
'expression':6.18,
'fishing':6.18,
'fri':6.18,
'fully':6.18,
'informed':6.18,
'initiated':6.18,
'italy':6.18,
'king':6.18,
'land':6.18,
'lion':6.18,
'miami':6.18,
'midnight':6.18,
'mineral':6.18,
'nomination':6.18,
'oak':6.18,
'occasions':6.18,
'philosophical':6.18,
'playlist':6.18,
'profound':6.18,
'provided':6.18,
'resolution':6.18,
'riding':6.18,
'safety':6.18,
'scientist':6.18,
'she':6.18,
'sight':6.18,
'spice':6.18,
'steady':6.18,
'survey':6.18,
'swiss':6.18,
't-shirt':6.18,
'tiger':6.18,
'tomorrow':6.18,
'tourist':6.18,
'tournament':6.18,
'trade':6.18,
'trains':6.18,
'tune':6.18,
'victor':6.18,
'walking':6.18,
'wireless':6.18,
'www':6.18,
'yea':6.18,
'beds':6.17,
'preference':6.17,
'applying':6.16,
'crop':6.16,
'enable':6.16,
'interactions':6.16,
'narrative':6.16,
'railway':6.16,
'afford':6.16,
'allowing':6.16,
'automobile':6.16,
'bands':6.16,
'boys':6.16,
'cds':6.16,
'christ':6.16,
'dictionary':6.16,
'downloads':6.16,
'eagles':6.16,
'engaged':6.16,
'especially':6.16,
'fiction':6.16,
'grocery':6.16,
'hotel':6.16,
'houses':6.16,
'hubby':6.16,
'included':6.16,
'lemon':6.16,
'mellow':6.16,
'minds':6.16,
'my':6.16,
'own':6.16,
'pacific':6.16,
'people':6.16,
'planning':6.16,
'polish':6.16,
'premium':6.16,
'providing':6.16,
'readers':6.16,
'rocked':6.16,
'sausage':6.16,
'south':6.16,
'transportation':6.16,
'turkey':6.16,
'wed':6.16,
'wheels':6.16,
'woods':6.16,
'yacht':6.16,
'livin':6.15,
'believing':6.14,
'chemistry':6.14,
'continuous':6.14,
'persons':6.14,
'seed':6.14,
'sheep':6.14,
'successive':6.14,
'adult':6.14,
'amsterdam':6.14,
'arises':6.14,
'arrived':6.14,
'asleep':6.14,
'aviation':6.14,
'basketball':6.14,
'browser':6.14,
'cathedral':6.14,
'cd':6.14,
'cheek':6.14,
'combination':6.14,
'conscious':6.14,
'cricket':6.14,
'debut':6.14,
'dividends':6.14,
'drinking':6.14,
'elizabeth':6.14,
'eye':6.14,
'generate':6.14,
'granted':6.14,
'guests':6.14,
'huge':6.14,
'jumping':6.14,
'kindle':6.14,
'launches':6.14,
'mend':6.14,
'models':6.14,
'mutual':6.14,
'offered':6.14,
'places':6.14,
'plan':6.14,
'principles':6.14,
'recovering':6.14,
'respectively':6.14,
'restore':6.14,
'ride':6.14,
'rock':6.14,
'shirts':6.14,
'sony':6.14,
'strategies':6.14,
'strongly':6.14,
'temple':6.14,
'thousands':6.14,
'tonight':6.14,
'trail':6.14,
'twin':6.14,
'up':6.14,
'updates':6.14,
'vagina':6.14,
'yahoo':6.14,
'receives':6.13,
'exclusively':6.12,
'writings':6.12,
'destiny':6.12,
'outcomes':6.12,
'quicker':6.12,
'boulevard':6.12,
'chapels':6.12,
'consideration':6.12,
'digital':6.12,
'dish':6.12,
'eat-in':6.12,
'ensure':6.12,
'event':6.12,
'everyone':6.12,
'face':6.12,
'focus':6.12,
'funds':6.12,
'garlic':6.12,
'investing':6.12,
'keyboard':6.12,
'knows':6.12,
'leaf':6.12,
'males':6.12,
'maps':6.12,
'masters':6.12,
'networking':6.12,
'nursing':6.12,
'patiently':6.12,
'proceeded':6.12,
'proceeding':6.12,
'profession':6.12,
'robot':6.12,
'snowing':6.12,
'studied':6.12,
'study':6.12,
'theme':6.12,
'toward':6.12,
'traditional':6.12,
'treasurer':6.12,
'university\'s':6.12,
'v-day':6.12,
'very':6.12,
'voted':6.12,
'wii':6.12,
'waving':6.11,
'extending':6.1,
'readily':6.1,
'mirrors':6.1,
'nearer':6.1,
'nurses':6.1,
'preserved':6.1,
'senses':6.1,
'aah':6.1,
'acknowledged':6.1,
'beers':6.1,
'bentley':6.1,
'brazil':6.1,
'cattle':6.1,
'challenging':6.1,
'check':6.1,
'chili':6.1,
'citizens':6.1,
'collection':6.1,
'comprehend':6.1,
'customers':6.1,
'elected':6.1,
'electricity':6.1,
'enters':6.1,
'essence':6.1,
'fab':6.1,
'forthcoming':6.1,
'forward':6.1,
'guide':6.1,
'herself':6.1,
'increasingly':6.1,
'info':6.1,
'investments':6.1,
'justification':6.1,
'karaoke':6.1,
'keeping':6.1,
'know':6.1,
'launched':6.1,
'life\'s':6.1,
'madame':6.1,
'markets':6.1,
'moments':6.1,
'nike':6.1,
'november':6.1,
'open':6.1,
'oscar':6.1,
'owner':6.1,
'practically':6.1,
'precise':6.1,
'release':6.1,
'romans':6.1,
'security':6.1,
'shade':6.1,
'shoulders':6.1,
'soap':6.1,
'springfield':6.1,
'start':6.1,
'telecommunications':6.1,
'tomorrow\'s':6.1,
'trinity':6.1,
'western':6.1,
'window':6.1,
'woof':6.1,
'yay':6.1,
'roam':6.09,
'dawning':6.08,
'choir':6.08,
'crops':6.08,
'elvis':6.08,
'significance':6.08,
'throne':6.08,
'velocity':6.08,
'acquainted':6.08,
'ahead':6.08,
'alright':6.08,
'audiences':6.08,
'ball':6.08,
'belief':6.08,
'bff':6.08,
'boat':6.08,
'boots':6.08,
'california':6.08,
'centuries':6.08,
'cheaper':6.08,
'clue':6.08,
'coat':6.08,
'consensus':6.08,
'contact':6.08,
'deserved':6.08,
'drive':6.08,
'facebook':6.08,
'freelance':6.08,
'greek':6.08,
'grown':6.08,
'help':6.08,
'housing':6.08,
'instant':6.08,
'integrated':6.08,
'introduction':6.08,
'legit':6.08,
'ma':6.08,
'message':6.08,
'negotiate':6.08,
'neighbor':6.08,
'neighborhoods':6.08,
'numerous':6.08,
'our':6.08,
'oven':6.08,
'picked':6.08,
'reached':6.08,
'recognize':6.08,
'recognized':6.08,
'rider':6.08,
'shows':6.08,
'significantly':6.08,
'specialist':6.08,
'suggestions':6.08,
'superior':6.08,
'tempo':6.08,
'tourists':6.08,
'ups':6.08,
'validity':6.08,
'vehicle':6.08,
'votes':6.08,
'theories':6.06,
'associations':6.06,
'attachment':6.06,
'fluid':6.06,
'shells':6.06,
'1970s':6.06,
'adults':6.06,
'advocacy':6.06,
'bella':6.06,
'brazilian':6.06,
'bueno':6.06,
'certain':6.06,
'certainly':6.06,
'combinations':6.06,
'composed':6.06,
'composition':6.06,
'couch':6.06,
'created':6.06,
'creek':6.06,
'dimes':6.06,
'distinct':6.06,
'equal':6.06,
'facts':6.06,
'flight':6.06,
'gaze':6.06,
'goodman':6.06,
'harbor':6.06,
'hey':6.06,
'historian':6.06,
'host':6.06,
'icon':6.06,
'influences':6.06,
'instruments':6.06,
'landmark':6.06,
'large':6.06,
'latest':6.06,
'leads':6.06,
'legs':6.06,
'liverpool':6.06,
'magazines':6.06,
'membership':6.06,
'muscle':6.06,
'nation':6.06,
'outlets':6.06,
'overseas':6.06,
'peanut':6.06,
'personal':6.06,
'photoshop':6.06,
'preparation':6.06,
'quantities':6.06,
'racing':6.06,
'reflection':6.06,
'representation':6.06,
'respective':6.06,
'see':6.06,
'servings':6.06,
'shoes':6.06,
'slim':6.06,
'sports':6.06,
'starring':6.06,
'straight':6.06,
'talk':6.06,
'towns':6.06,
'updated':6.06,
'wood':6.06,
'solving':6.04,
'bridges':6.04,
'climbing':6.04,
'geographical':6.04,
'skirt':6.04,
'1960s':6.04,
'academy':6.04,
'accompanying':6.04,
'acquired':6.04,
'acting':6.04,
'alumni':6.04,
'america\'s':6.04,
'approaches':6.04,
'bass':6.04,
'beginning':6.04,
'bringing':6.04,
'campus':6.04,
'casino':6.04,
'choices':6.04,
'contributed':6.04,
'exact':6.04,
'expand':6.04,
'express':6.04,
'fave':6.04,
'feliz':6.04,
'folks':6.04,
'fund':6.04,
'furniture':6.04,
'groove':6.04,
'hair':6.04,
'hint':6.04,
'installed':6.04,
'interactive':6.04,
'kitchen':6.04,
'melbourne':6.04,
'mind':6.04,
'numbers':6.04,
'perspective':6.04,
'points':6.04,
'prevention':6.04,
'professor':6.04,
'prospective':6.04,
'prospects':6.04,
'purple':6.04,
'purpose':6.04,
'replied':6.04,
'sauce':6.04,
'signing':6.04,
'sofa':6.04,
'supplies':6.04,
'tops':6.04,
'transport':6.04,
'union':6.04,
'visible':6.04,
'vocal':6.04,
'washington':6.04,
'words':6.04,
'xp':6.04,
'carriage':6.02,
'beings':6.02,
'colored':6.02,
'considerations':6.02,
'nearest':6.02,
'porch':6.02,
'relate':6.02,
'seventeenth':6.02,
'vibe':6.02,
'1980\'s':6.02,
'acres':6.02,
'aircraft':6.02,
'amen':6.02,
'basket':6.02,
'blog':6.02,
'cards':6.02,
'celebrity':6.02,
'christians':6.02,
'concepts':6.02,
'content':6.02,
'creates':6.02,
'delivery':6.02,
'developing':6.02,
'doll':6.02,
'download':6.02,
'eggs':6.02,
'engineers':6.02,
'essential':6.02,
'fixed':6.02,
'float':6.02,
'fridge':6.02,
'fund-raising':6.02,
'inn':6.02,
'jam':6.02,
'japanese':6.02,
'male':6.02,
'monetary':6.02,
'native':6.02,
'newspapers':6.02,
'objectives':6.02,
'pregnancy':6.02,
'presence':6.02,
'production':6.02,
'programs':6.02,
'pub':6.02,
'quick':6.02,
'rare':6.02,
'records':6.02,
'retire':6.02,
'simple':6.02,
'sophisticated':6.02,
'teams':6.02,
'totally':6.02,
'try':6.02,
'unwind':6.02,
'voting':6.02,
'walk':6.02,
'will':6.02,
'windows':6.02,
'wondering':6.02,
'writes':6.02,
'xoxo':6.02,
'rains':6.01,
'1990\'s':6,
'act':6,
'adapted':6,
'alliance':6,
'allow':6,
'applicable':6,
'archives':6,
'attend':6,
'attending':6,
'automatic':6,
'automatically':6,
'avatar':6,
'beans':6,
'beliefs':6,
'bien':6,
'biggest':6,
'brew':6,
'brook':6,
'cambridge':6,
'concentrations':6,
'conscience':6,
'continent':6,
'crimson':6,
'eighteenth':6,
'exactly':6,
'extend':6,
'favor':6,
'finale':6,
'find':6,
'fireplace':6,
'fixing':6,
'glance':6,
'global':6,
'ha':6,
'hands':6,
'heating':6,
'indeed':6,
'integral':6,
'itunes':6,
'japan':6,
'jenny':6,
'king\'s':6,
'lawn':6,
'lighting':6,
'likewise':6,
'lmfao':6,
'make':6,
'meaning':6,
'mega':6,
'metals':6,
'mucho':6,
'nations':6,
'network':6,
'olive':6,
'opened':6,
'oregon':6,
'owns':6,
'participants':6,
'pilot':6,
'principle':6,
'religion':6,
'result':6,
'service':6,
'sights':6,
'sites':6,
'sponsor':6,
'started':6,
'stereo':6,
'stores':6,
'successor':6,
'survive':6,
'surviving':6,
'today\'s':6,
'tuned':6,
'virgin':6,
'vista':6,
'walked':6,
'2-car':5.98,
'action':5.98,
'afternoon':5.98,
'anytime':5.98,
'attempting':5.98,
'audience':5.98,
'august':5.98,
'author':5.98,
'awww':5.98,
'bbc':5.98,
'began':5.98,
'biography':5.98,
'broadcast':5.98,
'canada':5.98,
'communities':5.98,
'contributor':5.98,
'creatures':5.98,
'declaration':5.98,
'dell':5.98,
'dialogue':5.98,
'drum':5.98,
'ebook':5.98,
'egg':5.98,
'explained':5.98,
'fabric':5.98,
'father-in-law':5.98,
'feature':5.98,
'ferry':5.98,
'fingertips':5.98,
'flash':5.98,
'flights':5.98,
'folk':5.98,
'gathered':5.98,
'grammys':5.98,
'heh':5.98,
'hill':5.98,
'http':5.98,
'identity':5.98,
'informal':5.98,
'ireland':5.98,
'java':5.98,
'july':5.98,
'keys':5.98,
'lego':5.98,
'lessons':5.98,
'looks':5.98,
'macbook':5.98,
'mcdonalds':5.98,
'meets':5.98,
'messages':5.98,
'national':5.98,
'netherlands':5.98,
'nintendo':5.98,
'normal':5.98,
'nyc':5.98,
'organization':5.98,
'originally':5.98,
'ours':5.98,
'ourselves':5.98,
'pairs':5.98,
'pic':5.98,
'planned':5.98,
'pop':5.98,
'prose':5.98,
'recordings':5.98,
'represented':5.98,
'robin':5.98,
'schools':5.98,
'singapore':5.98,
'sounds':5.98,
'specialized':5.98,
'store':5.98,
'sweater':5.98,
'tonight\'s':5.98,
'train':5.98,
'triple':5.98,
'wing':5.98,
'faire':5.98,
'lasts':5.98,
'nana':5.98,
'precisely':5.98,
'probable':5.98,
'refer':5.98,
'spoon':5.98,
'similarly':5.98,
'glimpse':5.98,
'souls':5.98,
'above':5.96,
'academic':5.96,
'allowed':5.96,
'assistance':5.96,
'authorized':5.96,
'bacon':5.96,
'bay':5.96,
'bf':5.96,
'body':5.96,
'collected':5.96,
'convinced':5.96,
'destined':5.96,
'discuss':5.96,
'driven':5.96,
'everyone\'s':5.96,
'everything':5.96,
'fav':5.96,
'features':5.96,
'flickr':5.96,
'french':5.96,
'gig':5.96,
'gracias':5.96,
'gym':5.96,
'head':5.96,
'heels':5.96,
'hundreds':5.96,
'including':5.96,
'islanders':5.96,
'jeep':5.96,
'job':5.96,
'largely':5.96,
'made':5.96,
'mambo':5.96,
'match':5.96,
'memoir':5.96,
'mighty':5.96,
'mmmmm':5.96,
'net':5.96,
'netflix':5.96,
'players':5.96,
'potentially':5.96,
'presently':5.96,
'proof':5.96,
'reaches':5.96,
'reflecting':5.96,
'related':5.96,
'releases':5.96,
'reveal':5.96,
'reveals':5.96,
'rocks':5.96,
'roommate':5.96,
'season':5.96,
'selection':5.96,
'ship':5.96,
'ships':5.96,
'similar':5.96,
'space':5.96,
'stadium':5.96,
'starts':5.96,
'taught':5.96,
'world\'s':5.96,
'writer':5.96,
'yep':5.96,
'justify':5.96,
'pupil':5.96,
'spreading':5.96,
'wales':5.96,
'whoo':5.96,
'deeds':5.96,
'exhibit':5.96,
'fiddle':5.96,
'exceed':5.96,
'3d':5.94,
'alternative':5.94,
'approach':5.94,
'awe':5.94,
'ballet':5.94,
'begins':5.94,
'building':5.94,
'business':5.94,
'carpet':5.94,
'chick':5.94,
'choose':5.94,
'consent':5.94,
'continental':5.94,
'correspondence':5.94,
'custom':5.94,
'decided':5.94,
'diary':5.94,
'echo':5.94,
'elevation':5.94,
'european':5.94,
'exports':5.94,
'finds':5.94,
'forum':5.94,
'framework':5.94,
'frank':5.94,
'gather':5.94,
'germany':5.94,
'image':5.94,
'impression':5.94,
'include':5.94,
'inherent':5.94,
'intention':5.94,
'investor':5.94,
'jet':5.94,
'joyce':5.94,
'kings':5.94,
'knew':5.94,
'larger':5.94,
'letter':5.94,
'listen':5.94,
'looking':5.94,
'mba':5.94,
'member':5.94,
'men':5.94,
'movement':5.94,
'nation\'s':5.94,
'obama':5.94,
'ok':5.94,
'oooh':5.94,
'option':5.94,
'phoenix':5.94,
'player':5.94,
'portfolio':5.94,
'preparations':5.94,
'presidential':5.94,
'prom':5.94,
'proper':5.94,
'pulse':5.94,
'reality':5.94,
'regularly':5.94,
'reservations':5.94,
'salmon':5.94,
'scene':5.94,
'societies':5.94,
'submitted':5.94,
'substantial':5.94,
'swift':5.94,
'technique':5.94,
'thnx':5.94,
'thx':5.94,
'tide':5.94,
'trends':5.94,
'visual':5.94,
'wallet':5.94,
'wear':5.94,
'formation':5.94,
'cloth':5.94,
'delicate':5.94,
'echoes':5.94,
'geography':5.94,
'processing':5.94,
'swinging':5.94,
'1970\'s':5.92,
'aides':5.92,
'bank':5.92,
'banks':5.92,
'beer':5.92,
'boobs':5.92,
'capital':5.92,
'chapters':5.92,
'chicks':5.92,
'chiefs':5.92,
'christianity':5.92,
'citizen':5.92,
'collections':5.92,
'conclude':5.92,
'constant':5.92,
'covered':5.92,
'devices':5.92,
'diagram':5.92,
'directors':5.92,
'doubtless':5.92,
'equity':5.92,
'fields':5.92,
'florence':5.92,
'forecast':5.92,
'get':5.92,
'group':5.92,
'guy':5.92,
'hah':5.92,
'harvard':5.92,
'historic':5.92,
'i':5.92,
'laboratory':5.92,
'linux':5.92,
'opens':5.92,
'orlando':5.92,
'pants':5.92,
'patterns':5.92,
'private':5.92,
'publishing':5.92,
'raining':5.92,
'residential':5.92,
'retirement':5.92,
'runnin':5.92,
'salon':5.92,
'sends':5.92,
'shorts':5.92,
'shown':5.92,
'skinny':5.92,
'solid':5.92,
'stoked':5.92,
'substantially':5.92,
'teen':5.92,
'theatrical':5.92,
'toyota':5.92,
'translated':5.92,
'tribe':5.92,
'umbrella':5.92,
'vienna':5.92,
'views':5.92,
'viva':5.92,
'washed':5.92,
'wholly':5.92,
'alternatives':5.92,
'applies':5.92,
'generated':5.92,
'merchant':5.92,
'missionary':5.92,
'vine':5.92,
'vive':5.91,
'add':5.9,
'addition':5.9,
'alike':5.9,
'attributed':5.9,
'blu-ray':5.9,
'both':5.9,
'brought':5.9,
'buyers':5.9,
'chillin':5.9,
'co-op':5.9,
'conception':5.9,
'conclusions':5.9,
'considered':5.9,
'daughter-in-law':5.9,
'diaries':5.9,
'dividend':5.9,
'doe':5.9,
'establish':5.9,
'exist':5.9,
'existence':5.9,
'expect':5.9,
'fact':5.9,
'featured':5.9,
'feel':5.9,
'gin':5.9,
'grew':5.9,
'hand':5.9,
'hosting':5.9,
'legacy':5.9,
'letters':5.9,
'lip':5.9,
'lolz':5.9,
'magazine':5.9,
'majority':5.9,
'mall':5.9,
'man':5.9,
'modest':5.9,
'naked':5.9,
'neighbors':5.9,
'nokia':5.9,
'notebook':5.9,
'now':5.9,
'pass':5.9,
'peak':5.9,
'permit':5.9,
'personally':5.9,
'planes':5.9,
'ratings':5.9,
'recording':5.9,
'replies':5.9,
'results':5.9,
'retail':5.9,
'scenes':5.9,
'scores':5.9,
'seattle':5.9,
'settlement':5.9,
'speak':5.9,
'stanford':5.9,
'strategic':5.9,
'symbols':5.9,
'talked':5.9,
'thousand':5.9,
'twenty':5.9,
'winter':5.9,
'yeah':5.9,
'angle':5.9,
'bun':5.9,
'displayed':5.9,
'dolly':5.9,
'illustrate':5.9,
'pockets':5.9,
'puppet':5.9,
'sensory':5.9,
'grande':5.9,
'mixture':5.9,
'myth':5.9,
'admiral':5.89,
'intensity':5.89,
'access':5.88,
'adobe':5.88,
'airport':5.88,
'allied':5.88,
'applications':5.88,
'architects':5.88,
'audio':5.88,
'austria':5.88,
'celeb':5.88,
'chosen':5.88,
'city\'s':5.88,
'coordinator':5.88,
'cyber':5.88,
'deserves':5.88,
'distinguish':5.88,
'drivin':5.88,
'entire':5.88,
'evidently':5.88,
'expanded':5.88,
'feedback':5.88,
'field':5.88,
'flew':5.88,
'founder':5.88,
'hip':5.88,
'includes':5.88,
'keeps':5.88,
'leaders':5.88,
'lmaooo':5.88,
'mary':5.88,
'mood':5.88,
'mrs':5.88,
'october':5.88,
'organism':5.88,
'outlook':5.88,
'philharmonic':5.88,
'physical':5.88,
'poland':5.88,
'primary':5.88,
'printed':5.88,
'privacy':5.88,
'pro':5.88,
'producer':5.88,
'railroad':5.88,
'researchers':5.88,
'scout':5.88,
'sequence':5.88,
'sovereign':5.88,
'speaking':5.88,
'sustained':5.88,
'town':5.88,
'twilight':5.88,
'victoria':5.88,
'weather':5.88,
'whole':5.88,
'yeh':5.88,
'pun':5.88,
'demonstration':5.88,
'misty':5.88,
'sovereignty':5.88,
'scripture':5.88,
'sleigh':5.88,
'flex':5.87,
'2morrow':5.86,
'adopted':5.86,
'aim':5.86,
'amounts':5.86,
'applied':5.86,
'arrangement':5.86,
'articles':5.86,
'balls':5.86,
'barbie':5.86,
'bear':5.86,
'boogie':5.86,
'bridge':5.86,
'brooks':5.86,
'brother-in-law':5.86,
'chrome':5.86,
'club':5.86,
'columbus':5.86,
'connect':5.86,
'constitutional':5.86,
'contemporary':5.86,
'country\'s':5.86,
'credit':5.86,
'credits':5.86,
'curve':5.86,
'diverse':5.86,
'dj':5.86,
'effort':5.86,
'engineering':5.86,
'equipment':5.86,
'figures':5.86,
'freeway':5.86,
'front-page':5.86,
'frontier':5.86,
'hotter':5.86,
'household':5.86,
'integration':5.86,
'introduce':5.86,
'japan\'s':5.86,
'jennifer':5.86,
'keep':5.86,
'layout':5.86,
'lens':5.86,
'leo':5.86,
'located':5.86,
'metro':5.86,
'newman':5.86,
'nut':5.86,
'nuts':5.86,
'observations':5.86,
'obtain':5.86,
'pc':5.86,
'position':5.86,
'potter':5.86,
'president':5.86,
'productions':5.86,
'property':5.86,
'pumping':5.86,
'revelation':5.86,
'road':5.86,
'sand':5.86,
'seat':5.86,
'services':5.86,
'sound':5.86,
'survival':5.86,
'teens':5.86,
'thursday':5.86,
'trained':5.86,
'variations':5.86,
'viewers':5.86,
'wrapped':5.86,
'attitudes':5.86,
'autonomy':5.86,
'concentrated':5.86,
'deeper':5.86,
'fifteen':5.86,
'fourteen':5.86,
'gum':5.86,
'liquid':5.86,
'organizational':5.86,
'output':5.86,
'phenomena':5.86,
'seal':5.86,
'concentration':5.85,
'props':5.85,
'construct':5.85,
'amount':5.84,
'angeles':5.84,
'appear':5.84,
'arena':5.84,
'banking':5.84,
'baseball':5.84,
'begun':5.84,
'being':5.84,
'benz':5.84,
'blogs':5.84,
'buck':5.84,
'canadian':5.84,
'checks':5.84,
'chicago':5.84,
'circles':5.84,
'classes':5.84,
'colorado':5.84,
'coming':5.84,
'conducting':5.84,
'crossword':5.84,
'curry':5.84,
'decide':5.84,
'descriptions':5.84,
'desktop':5.84,
'element':5.84,
'enter':5.84,
'escaped':5.84,
'ethnic':5.84,
'experimental':5.84,
'feelings':5.84,
'germans':5.84,
'gets':5.84,
'grain':5.84,
'grammar':5.84,
'gravity':5.84,
'hear':5.84,
'her':5.84,
'history':5.84,
'individuals':5.84,
'landed':5.84,
'lands':5.84,
'lays':5.84,
'maryland':5.84,
'matrix':5.84,
'mexico':5.84,
'nationwide':5.84,
'ooh':5.84,
'oral':5.84,
'patents':5.84,
'poster':5.84,
'producing':5.84,
'programming':5.84,
'prophet':5.84,
'provisions':5.84,
'puff':5.84,
'quartet':5.84,
'realize':5.84,
'really':5.84,
'responses':5.84,
'sample':5.84,
'shoe':5.84,
'showing':5.84,
'ski':5.84,
'stages':5.84,
'stored':5.84,
'suggestion':5.84,
'tall':5.84,
'telephone':5.84,
'theoretical':5.84,
'uk':5.84,
'urban':5.84,
'watching':5.84,
'web':5.84,
'absorption':5.84,
'constructed':5.84,
'dimensions':5.84,
'examples':5.84,
'interpretation':5.84,
'programme':5.84,
'relating':5.84,
'shades':5.84,
'subtle':5.84,
'instruction':5.83,
'rotation':5.83,
'wagon':5.83,
'10:00:00PM':5.82,
'7-9pm':5.82,
'apply':5.82,
'arising':5.82,
'bar':5.82,
'becoming':5.82,
'blogging':5.82,
'closer':5.82,
'come':5.82,
'communications':5.82,
'connection':5.82,
'consistent':5.82,
'cow':5.82,
'detail':5.82,
'diplomatic':5.82,
'east':5.82,
'eatin':5.82,
'emphasized':5.82,
'endowment':5.82,
'entered':5.82,
'expressed':5.82,
'fig':5.82,
'have':5.82,
'hearing':5.82,
'homey':5.82,
'hundred':5.82,
'investment':5.82,
'involved':5.82,
'irish':5.82,
'jean':5.82,
'key':5.82,
'landing':5.82,
'lived':5.82,
'maine':5.82,
'maker':5.82,
'many':5.82,
'met':5.82,
'montreal':5.82,
'nashville':5.82,
'opinion':5.82,
'owl':5.82,
'pair':5.82,
'path':5.82,
'peoples':5.82,
'philosophers':5.82,
'publisher':5.82,
'quickly':5.82,
'realised':5.82,
'regarded':5.82,
'royal':5.82,
'sane':5.82,
'sister-in-law':5.82,
'southwest':5.82,
'spanish':5.82,
'sum':5.82,
'talks':5.82,
'teen-agers':5.82,
'tennessee':5.82,
'toronto':5.82,
'upper':5.82,
'woot':5.82,
'workin':5.82,
'diversity':5.82,
'ideology':5.82,
'mist':5.82,
'movements':5.82,
'outline':5.82,
'continually':5.81,
'obtaining':5.81,
'06:00:00PM':5.8,
'accordingly':5.8,
'acquisition':5.8,
'addressed':5.8,
'analysis':5.8,
'appearance':5.8,
'attention':5.8,
'attitude':5.8,
'bean':5.8,
'becomes':5.8,
'belong':5.8,
'brings':5.8,
'caffeine':5.8,
'changing':5.8,
'climate':5.8,
'commonly':5.8,
'courses':5.8,
'crib':5.8,
'definition':5.8,
'determine':5.8,
'director':5.8,
'double':5.8,
'dude':5.8,
'entre':5.8,
'establishing':5.8,
'extended':5.8,
'finding':5.8,
'god\'s':5.8,
'gradually':5.8,
'group\'s':5.8,
'grove':5.8,
'hai':5.8,
'headed':5.8,
'ice':5.8,
'interior':5.8,
'kentucky':5.8,
'known':5.8,
'league':5.8,
'liberal':5.8,
'lmao':5.8,
'master\'s':5.8,
'men\'s':5.8,
'mix':5.8,
'model':5.8,
'mostly':5.8,
'mouth':5.8,
'networks':5.8,
'northeast':5.8,
'outside':5.8,
'paper':5.8,
'pardon':5.8,
'perceive':5.8,
'pilots':5.8,
'podcast':5.8,
'practice':5.8,
'psychology':5.8,
'pumped':5.8,
'rapid':5.8,
'reconstruction':5.8,
'rehearsal':5.8,
'responsible':5.8,
'roads':5.8,
'root':5.8,
'rubber':5.8,
'sales':5.8,
'sending':5.8,
'shaped':5.8,
'simultaneously':5.8,
'spoke':5.8,
'stock':5.8,
'tended':5.8,
'vivo':5.8,
'vote':5.8,
'wind':5.8,
'write':5.8,
'yellow':5.8,
'seated':5.8,
'behaviour':5.8,
'description':5.8,
'dimension':5.8,
'gender':5.8,
'impulse':5.8,
'involve':5.8,
'maintaining':5.8,
'manufacture':5.8,
'occupation':5.8,
'provinces':5.8,
'quantity':5.8,
'sentiment':5.8,
'natives':5.79,
'thirty':5.79,
'arch':5.79,
'actions':5.78,
'added':5.78,
'additional':5.78,
'admission':5.78,
'ahhhh':5.78,
'ambassador':5.78,
'amber':5.78,
'anna':5.78,
'annie':5.78,
'attributes':5.78,
'auction':5.78,
'aware':5.78,
'backup':5.78,
'britain':5.78,
'carefully':5.78,
'century':5.78,
'challenge':5.78,
'characters':5.78,
'colleague':5.78,
'containing':5.78,
'contest':5.78,
'convince':5.78,
'downtown':5.78,
'drives':5.78,
'ebay':5.78,
'egyptian':5.78,
'entering':5.78,
'featuring':5.78,
'fed':5.78,
'fibers':5.78,
'fitted':5.78,
'flows':5.78,
'founding':5.78,
'frequent':5.78,
'having':5.78,
'hd':5.78,
'hosted':5.78,
'hottest':5.78,
'intervals':5.78,
'inventory':5.78,
'lift':5.78,
'link':5.78,
'lot':5.78,
'march':5.78,
'mare':5.78,
'morality':5.78,
'newton':5.78,
'optical':5.78,
'passages':5.78,
'plasma':5.78,
'plates':5.78,
'poker':5.78,
'pops':5.78,
'possibly':5.78,
'realized':5.78,
'record':5.78,
'resident':5.78,
'respond':5.78,
'rural':5.78,
'shuttle':5.78,
'society':5.78,
'texts':5.78,
'total':5.78,
'trying':5.78,
'uploaded':5.78,
'various':5.78,
'volume':5.78,
'wheel':5.78,
'woo':5.78,
'workers':5.78,
'workout':5.78,
'yess':5.78,
'sober':5.78,
'components':5.78,
'defined':5.78,
'flashing':5.78,
'momento':5.78,
'movin':5.78,
'rollin':5.78,
'rover':5.78,
'vessel':5.78,
'printing':5.77,
'spatial':5.77,
'corresponding':5.77,
'accord':5.76,
'afterwards':5.76,
'apparatus':5.76,
'approaching':5.76,
'boston':5.76,
'brands':5.76,
'characteristic':5.76,
'city':5.76,
'coach':5.76,
'commission':5.76,
'continue':5.76,
'continuing':5.76,
'days':5.76,
'deeply':5.76,
'describes':5.76,
'diana':5.76,
'discussing':5.76,
'do':5.76,
'eastern':5.76,
'emotion':5.76,
'ensemble':5.76,
'episode':5.76,
'essentially':5.76,
'everywhere':5.76,
'experiment':5.76,
'facilities':5.76,
'functions':5.76,
'ginger':5.76,
'glass':5.76,
'greece':5.76,
'historical':5.76,
'horny':5.76,
'install':5.76,
'jessica':5.76,
'just':5.76,
'lamp':5.76,
'lincoln':5.76,
'magnitude':5.76,
'maintain':5.76,
'major':5.76,
'makers':5.76,
'makeup':5.76,
'manor':5.76,
'manual':5.76,
'mechanisms':5.76,
'michelle':5.76,
'motion':5.76,
'outfit':5.76,
'oxford':5.76,
'payments':5.76,
'permitted':5.76,
'preparing':5.76,
'preview':5.76,
'privately':5.76,
'probability':5.76,
'producers':5.76,
'products':5.76,
'ps3':5.76,
'publication':5.76,
'race':5.76,
'rachel':5.76,
'referring':5.76,
'remix':5.76,
'representing':5.76,
'republic':5.76,
'sees':5.76,
'selling':5.76,
'slide':5.76,
'species':5.76,
'staying':5.76,
'supplied':5.76,
'supply':5.76,
'things':5.76,
'tokyo':5.76,
'viewing':5.76,
'vital':5.76,
'voice':5.76,
'wednesdays':5.76,
'whisper':5.76,
'workshop':5.76,
'chiefly':5.76,
'dimensional':5.76,
'handed':5.76,
'interval':5.76,
'ladder':5.76,
'oooooh':5.76,
'perception':5.76,
'pupils':5.76,
'shield':5.76,
'thoroughly':5.76,
'considerably':5.75,
'manuscript':5.75,
'symbolic':5.74,
'07:00:00PM':5.74,
'awake':5.74,
'booty':5.74,
'cadillac':5.74,
'call':5.74,
'calling':5.74,
'catch':5.74,
'challenges':5.74,
'chelsea':5.74,
'chile':5.74,
'concentrate':5.74,
'deep':5.74,
'details':5.74,
'diplomacy':5.74,
'dragon':5.74,
'employee':5.74,
'endorsed':5.74,
'entry':5.74,
'estates':5.74,
'everyday':5.74,
'expected':5.74,
'forth':5.74,
'fundamental':5.74,
'gf':5.74,
'given':5.74,
'i\'m':5.74,
'inclined':5.74,
'kept':5.74,
'kinds':5.74,
'lace':5.74,
'mac':5.74,
'manage':5.74,
'much':5.74,
'name':5.74,
'newsstands':5.74,
'ninja':5.74,
'nite':5.74,
'observed':5.74,
'orientation':5.74,
'owners':5.74,
'powder':5.74,
'presented':5.74,
'princeton':5.74,
'project':5.74,
'prove':5.74,
'quarters':5.74,
'reach':5.74,
'responded':5.74,
'rio':5.74,
'screen':5.74,
'serves':5.74,
'settled':5.74,
'showed':5.74,
'situated':5.74,
'spare':5.74,
'spokeswoman':5.74,
'suitcase':5.74,
'suits':5.74,
'swag':5.74,
'team\'s':5.74,
'thin':5.74,
'time':5.74,
'todays':5.74,
'training':5.74,
'transactions':5.74,
'treasury':5.74,
'walkin':5.74,
'warrior':5.74,
'wash':5.74,
'wives':5.74,
'cave':5.73,
'involves':5.73,
'mechanical':5.73,
'sphere':5.73,
'structural':5.73,
'identification':5.73,
'shell':5.73,
'nod':5.72,
'pose':5.72,
'3g':5.72,
'09:00:00PM':5.72,
'adding':5.72,
'affiliation':5.72,
'alexander':5.72,
'apt':5.72,
'argentina':5.72,
'blend':5.72,
'canal':5.72,
'card':5.72,
'channels':5.72,
'click':5.72,
'detailed':5.72,
'distinguishable':5.72,
'dvr':5.72,
'ears':5.72,
'euro':5.72,
'expanding':5.72,
'funky':5.72,
'goldman':5.72,
'happening':5.72,
'hypothesis':5.72,
'implementation':5.72,
'import':5.72,
'individual':5.72,
'jewish':5.72,
'kathryn':5.72,
'knowin':5.72,
'marine':5.72,
'midtown':5.72,
'missouri':5.72,
'modification':5.72,
'move':5.72,
'near':5.72,
'passenger':5.72,
'passengers':5.72,
'pen':5.72,
'persuade':5.72,
'philadelphia':5.72,
'plate':5.72,
'publications':5.72,
'quietly':5.72,
'races':5.72,
'rank':5.72,
'registered':5.72,
'responsibility':5.72,
'roles':5.72,
'satellite':5.72,
'script':5.72,
'seek':5.72,
'signed':5.72,
'source':5.72,
'spectrum':5.72,
'stage':5.72,
'surrounds':5.72,
'taxi':5.72,
'three':5.72,
'towards':5.72,
'translation':5.72,
'ultimately':5.72,
'update':5.72,
'uses':5.72,
'view':5.72,
'waking':5.72,
'whiskey':5.72,
'winds':5.72,
'with':5.72,
'wrap':5.72,
'contains':5.71,
'employer':5.71,
'fifty':5.71,
'immense':5.71,
'opinions':5.71,
'temperatures':5.71,
'fella':5.71,
'flippin':5.71,
'hears':5.71,
'scope':5.71,
'soil':5.71,
'timber':5.71,
'objective':5.7,
'willow':5.7,
'1960\'s':5.7,
'05:00:00PM':5.7,
'accumulation':5.7,
'android':5.7,
'appointed':5.7,
'approximately':5.7,
'arrangements':5.7,
'atm':5.7,
'attribute':5.7,
'banner':5.7,
'become':5.7,
'biggie':5.7,
'bunch':5.7,
'churches':5.7,
'contain':5.7,
'data':5.7,
'demonstrated':5.7,
'developer':5.7,
'disc':5.7,
'discussion':5.7,
'dozens':5.7,
'driver':5.7,
'earliest':5.7,
'elementary':5.7,
'engine':5.7,
'extremely':5.7,
'feat':5.7,
'feeling':5.7,
'fill':5.7,
'fried':5.7,
'grade':5.7,
'hat':5.7,
'hold':5.7,
'identical':5.7,
'jackson':5.7,
'january':5.7,
'johnny':5.7,
'journal':5.7,
'manhattan':5.7,
'master':5.7,
'max':5.7,
'michael':5.7,
'migration':5.7,
'mild':5.7,
'mmm':5.7,
'multiple':5.7,
'noon':5.7,
'northwest':5.7,
'observer':5.7,
'placing':5.7,
'pocket':5.7,
'prevented':5.7,
'rally':5.7,
'rankings':5.7,
'raymond':5.7,
'reforms':5.7,
'regular':5.7,
'rolling':5.7,
'roman':5.7,
'running':5.7,
'sippin':5.7,
'sonic':5.7,
'streaming':5.7,
'superbowl':5.7,
'synthesis':5.7,
'thickness':5.7,
'thumb':5.7,
'tonite':5.7,
'vertical':5.7,
'walks':5.7,
'want':5.7,
'wassup':5.7,
'watch':5.7,
'wendy':5.7,
'whites':5.7,
'written':5.7,
'xo':5.7,
'yaa':5.7,
'correlation':5.69,
'jungle':5.69,
'keepin':5.69,
'paragraph':5.69,
'yonder':5.69,
'determining':5.69,
'dusk':5.69,
'gal':5.69,
'hindu':5.69,
'mechanism':5.69,
'\#jobs':5.68,
'affiliate':5.68,
'amongst':5.68,
'angles':5.68,
'announce':5.68,
'appears':5.68,
'associated':5.68,
'avenue':5.68,
'bars':5.68,
'be':5.68,
'benjamin':5.68,
'bond':5.68,
'broadcasting':5.68,
'button':5.68,
'cabinet':5.68,
'cent':5.68,
'character':5.68,
'civic':5.68,
'climb':5.68,
'clinton':5.68,
'countries':5.68,
'database':5.68,
'degrees':5.68,
'direct':5.68,
'emerged':5.68,
'emphasis':5.68,
'enterprises':5.68,
'exchange':5.68,
'footage':5.68,
'foreign':5.68,
'formula':5.68,
'fort':5.68,
'gaga':5.68,
'getting':5.68,
'graham':5.68,
'grasp':5.68,
'greenwich':5.68,
'grounds':5.68,
'jill':5.68,
'laude':5.68,
'location':5.68,
'logo':5.68,
'machines':5.68,
'managed':5.68,
'marching':5.68,
'mars':5.68,
'merchants':5.68,
'mission':5.68,
'mississippi':5.68,
'moment':5.68,
'moves':5.68,
'nearby':5.68,
'nuevo':5.68,
'often':5.68,
'organs':5.68,
'permanent':5.68,
'perspectives':5.68,
'physiological':5.68,
'playoff':5.68,
'portland':5.68,
'program':5.68,
'publicity':5.68,
'publishers':5.68,
'pursue':5.68,
'response':5.68,
'resume':5.68,
'role':5.68,
'salt':5.68,
'seeks':5.68,
'sitting':5.68,
'southeast':5.68,
'speaker':5.68,
'speaks':5.68,
'spoken':5.68,
'stimulus':5.68,
'suggests':5.68,
'sydney':5.68,
'tot':5.68,
'trustee':5.68,
'usb':5.68,
'west':5.68,
'moses':5.67,
'occurring':5.67,
'saddle':5.67,
'samples':5.67,
'tail':5.67,
'thrust':5.67,
'vow':5.67,
'conversion':5.67,
'evident':5.67,
'\#travel':5.66,
'21st':5.66,
'08:00:00PM':5.66,
'absorbed':5.66,
'african':5.66,
'alexandra':5.66,
'among':5.66,
'aspect':5.66,
'association':5.66,
'auto':5.66,
'blue':5.66,
'bold':5.66,
'british':5.66,
'casa':5.66,
'cents':5.66,
'chose':5.66,
'claus':5.66,
'collect':5.66,
'compete':5.66,
'concluded':5.66,
'conclusion':5.66,
'continues':5.66,
'coverage':5.66,
'cup':5.66,
'customer':5.66,
'describe':5.66,
'developmental':5.66,
'digest':5.66,
'discussions':5.66,
'drawn':5.66,
'drew':5.66,
'early':5.66,
'electric':5.66,
'entrance':5.66,
'exchanges':5.66,
'follow':5.66,
'foundation':5.66,
'glove':5.66,
'gps':5.66,
'groups':5.66,
'ham':5.66,
'immediately':5.66,
'indiana':5.66,
'indie':5.66,
'intense':5.66,
'leopard':5.66,
'louisiana':5.66,
'mane':5.66,
'manufacturing':5.66,
'members':5.66,
'molecules':5.66,
'obama\'s':5.66,
'occupy':5.66,
'oooo':5.66,
'parkway':5.66,
'passed':5.66,
'people\'s':5.66,
'phillips':5.66,
'playoffs':5.66,
'practices':5.66,
'prepare':5.66,
'priority':5.66,
'reap':5.66,
'regard':5.66,
'residents':5.66,
'rode':5.66,
'roll':5.66,
'roots':5.66,
'rugged':5.66,
'sake':5.66,
'sandy':5.66,
'served':5.66,
'seven':5.66,
'several':5.66,
'shareholders':5.66,
'sidney':5.66,
'sign':5.66,
'silkk':5.66,
'sol':5.66,
'son-in-law':5.66,
'stretch':5.66,
'tenure':5.66,
'timing':5.66,
'tongues':5.66,
'tower':5.66,
'upstairs':5.66,
'usually':5.66,
'verse':5.66,
'wrapping':5.66,
'yard':5.66,
'adequate':5.66,
'explains':5.66,
'doorway':5.65,
'drinkin':5.65,
'examined':5.65,
'height':5.65,
'influenced':5.65,
'mami':5.65,
'mathematics':5.65,
'organisation':5.65,
'phases':5.65,
'realm':5.65,
'remarked':5.65,
'structures':5.65,
'consisting':5.65,
'clown':5.65,
'equations':5.65,
'hum':5.65,
'1950\'s':5.64,
'2day':5.64,
'ac':5.64,
'alaska':5.64,
'amanda':5.64,
'asterisk':5.64,
'bag':5.64,
'bra':5.64,
'businesses':5.64,
'cable':5.64,
'charger':5.64,
'chester':5.64,
'chinese':5.64,
'circular':5.64,
'civilian':5.64,
'civilians':5.64,
'closely':5.64,
'cognitive':5.64,
'combo':5.64,
'commons':5.64,
'competition':5.64,
'construction':5.64,
'designated':5.64,
'dive':5.64,
'editor':5.64,
'employees':5.64,
'entitled':5.64,
'escape':5.64,
'every':5.64,
'evidence':5.64,
'expects':5.64,
'financing':5.64,
'flown':5.64,
'followers':5.64,
'gucci':5.64,
'guess':5.64,
'hamilton':5.64,
'handful':5.64,
'heads':5.64,
'heights':5.64,
'holding':5.64,
'importa':5.64,
'influence':5.64,
'inning':5.64,
'involvement':5.64,
'kathleen':5.64,
'kit':5.64,
'layer':5.64,
'lit':5.64,
'means':5.64,
'mtv':5.64,
'mystery':5.64,
'night\'s':5.64,
'obvious':5.64,
'oriented':5.64,
'owned':5.64,
'pace':5.64,
'pennsylvania':5.64,
'portions':5.64,
'presidents':5.64,
'probably':5.64,
'provision':5.64,
'purposes':5.64,
'rainy':5.64,
'rang':5.64,
'rangers':5.64,
'recommend':5.64,
'ricky':5.64,
'secular':5.64,
'senior':5.64,
'serving':5.64,
'sheets':5.64,
'southern':5.64,
'soy':5.64,
'speakers':5.64,
'spin':5.64,
'states':5.64,
'streets':5.64,
'symbol':5.64,
'techniques':5.64,
'tee':5.64,
'tends':5.64,
'tokio':5.64,
'trend':5.64,
'upload':5.64,
'use':5.64,
'vast':5.64,
'venture':5.64,
'veterans':5.64,
'wholesalers':5.64,
'wrote':5.64,
'elite':5.64,
'genes':5.64,
'hydrogen':5.63,
'intentions':5.63,
'lungs':5.63,
'measuring':5.63,
'origin':5.63,
'peripheral':5.63,
'twentieth':5.63,
'riders':5.62,
'spaces':5.62,
'vary':5.62,
'accent':5.62,
'airline':5.62,
'alma':5.62,
'appeal':5.62,
'around':5.62,
'assistant':5.62,
'associate':5.62,
'became':5.62,
'behavioral':5.62,
'bottle':5.62,
'buildings':5.62,
'buzz':5.62,
'can':5.62,
'catching':5.62,
'characteristics':5.62,
'charlie':5.62,
'clock':5.62,
'cloud':5.62,
'comments':5.62,
'corazon':5.62,
'cycle':5.62,
'describing':5.62,
'dice':5.62,
'display':5.62,
'dudes':5.62,
'dutch':5.62,
'espn':5.62,
'eve':5.62,
'ford':5.62,
'formal':5.62,
'fry':5.62,
'heading':5.62,
'heather':5.62,
'homies':5.62,
'instances':5.62,
'jews':5.62,
'leaves':5.62,
'leg':5.62,
'lolol':5.62,
'managing':5.62,
'material':5.62,
'media':5.62,
'microsoft':5.62,
'next':5.62,
'oct':5.62,
'organisms':5.62,
'page':5.62,
'pages':5.62,
'pitch':5.62,
'poll':5.62,
'printer':5.62,
'proportion':5.62,
'proportions':5.62,
'referred':5.62,
'reflects':5.62,
'reply':5.62,
'resulted':5.62,
'rockville':5.62,
'runs':5.62,
'sacred':5.62,
'sells':5.62,
'sidewalk':5.62,
'snowed':5.62,
'status':5.62,
'they':5.62,
'through':5.62,
'tommy':5.62,
'transaction':5.62,
'tub':5.62,
'variable':5.62,
'vday':5.62,
'virtual':5.62,
'watches':5.62,
'we\'ve':5.62,
'widely':5.62,
'ya\'ll':5.62,
'york':5.62,
'ideological':5.61,
'midst':5.61,
'comparatively':5.6,
'address':5.6,
'airlines':5.6,
'ancient':5.6,
'apartment':5.6,
'apparent':5.6,
'arranged':5.6,
'assembly':5.6,
'bathroom':5.6,
'bees':5.6,
'bon':5.6,
'brasil':5.6,
'called':5.6,
'caroline':5.6,
'centers':5.6,
'central':5.6,
'chapter':5.6,
'cleaning':5.6,
'columns':5.6,
'combined':5.6,
'concrete':5.6,
'considering':5.6,
'consulting':5.6,
'covers':5.6,
'crew':5.6,
'edinburgh':5.6,
'efforts':5.6,
'eleven':5.6,
'enormous':5.6,
'entirely':5.6,
'evan':5.6,
'francisco':5.6,
'frequency':5.6,
'function':5.6,
'got':5.6,
'historians':5.6,
'hop':5.6,
'idol':5.6,
'immediate':5.6,
'indianapolis':5.6,
'involving':5.6,
'layers':5.6,
'level':5.6,
'links':5.6,
'lisa':5.6,
'mouths':5.6,
'news':5.6,
'occasional':5.6,
'outcome':5.6,
'pat':5.6,
'patent':5.6,
'perceived':5.6,
'pick':5.6,
'pope':5.6,
'priest':5.6,
'pronounced':5.6,
'quotes':5.6,
'reaching':5.6,
'relatively':5.6,
'reminded':5.6,
'reynolds':5.6,
'runner':5.6,
'saying':5.6,
'seeking':5.6,
'specific':5.6,
'spell':5.6,
'stand':5.6,
'suggested':5.6,
'title':5.6,
'topics':5.6,
'trustees':5.6,
'twice':5.6,
'utilities':5.6,
'veteran':5.6,
'viewed':5.6,
'virtually':5.6,
'walker':5.6,
'watchin':5.6,
'your':5.6,
'accustomed':5.6,
'deed':5.6,
'besos':5.59,
'classroom':5.59,
'comparative':5.59,
'constituted':5.59,
'indicating':5.59,
'occurs':5.59,
'parallel':5.59,
'sentences':5.59,
'vita':5.59,
'habits':5.58,
'\#iphone':5.58,
'allison':5.58,
'appeals':5.58,
'apps':5.58,
'arizona':5.58,
'attached':5.58,
'bags':5.58,
'barack':5.58,
'bears':5.58,
'bell':5.58,
'brand':5.58,
'broad':5.58,
'broader':5.58,
'bulletin':5.58,
'cara':5.58,
'casey':5.58,
'cerebral':5.58,
'chew':5.58,
'circle':5.58,
'cities':5.58,
'client':5.58,
'comes':5.58,
'comment':5.58,
'consists':5.58,
'corrected':5.58,
'current':5.58,
'danny':5.58,
'decisions':5.58,
'delaware':5.58,
'described':5.58,
'did':5.58,
'estimated':5.58,
'even':5.58,
'example':5.58,
'executive':5.58,
'feet':5.58,
'filling':5.58,
'finally':5.58,
'financed':5.58,
'fingers':5.58,
'formed':5.58,
'front':5.58,
'gran':5.58,
'hier':5.58,
'hips':5.58,
'i\'ve':5.58,
'ibm':5.58,
'identify':5.58,
'intro':5.58,
'kennedy':5.58,
'laura':5.58,
'lay':5.58,
'lets':5.58,
'lewis':5.58,
'linda':5.58,
'long-term':5.58,
'looked':5.58,
'lords':5.58,
'man\'s':5.58,
'marie':5.58,
'massachusetts':5.58,
'microphone':5.58,
'mills':5.58,
'ministry':5.58,
'mumbai':5.58,
'named':5.58,
'navy':5.58,
'operative':5.58,
'overnight':5.58,
'peep':5.58,
'pot':5.58,
'pursuit':5.58,
'rapidly':5.58,
'recorded':5.58,
'returning':5.58,
'rooms':5.58,
'seats':5.58,
'set':5.58,
'shortly':5.58,
'shoutout':5.58,
'soho':5.58,
'solely':5.58,
'stuff':5.58,
'suburban':5.58,
'talkin':5.58,
'teenage':5.58,
'thighs':5.58,
'thing':5.58,
'times':5.58,
'traders':5.58,
'trending':5.58,
'tries':5.58,
'valve':5.58,
'vermont':5.58,
'voters':5.58,
'waist':5.58,
'warming':5.58,
'we\'ll':5.58,
'yang':5.58,
'declare':5.57,
'departments':5.57,
'mathematical':5.57,
'sow':5.57,
'density':5.57,
'colony':5.56,
'component':5.56,
'illusion':5.56,
'sip':5.56,
'stride':5.56,
'summary':5.56,
'\#musicmonday':5.56,
'acted':5.56,
'aide':5.56,
'alot':5.56,
'analyst':5.56,
'announces':5.56,
'aspects':5.56,
'associates':5.56,
'attempt':5.56,
'basically':5.56,
'blowin':5.56,
'bong':5.56,
'brush':5.56,
'camps':5.56,
'cap':5.56,
'change':5.56,
'characterized':5.56,
'christopher':5.56,
'civil':5.56,
'clients':5.56,
'columnist':5.56,
'connecticut':5.56,
'consider':5.56,
'consumers':5.56,
'contents':5.56,
'dial':5.56,
'directly':5.56,
'discussed':5.56,
'electron':5.56,
'elle':5.56,
'era':5.56,
'evaluate':5.56,
'explanation':5.56,
'extends':5.56,
'fairfield':5.56,
'format':5.56,
'forming':5.56,
'golf':5.56,
'hampshire':5.56,
'his':5.56,
'inbox':5.56,
'indication':5.56,
'ink':5.56,
'innings':5.56,
'jay-z':5.56,
'jumped':5.56,
'kelly':5.56,
'lauren':5.56,
'leather':5.56,
'license':5.56,
'makes':5.56,
'manchester':5.56,
'marathon':5.56,
'matches':5.56,
'measured':5.56,
'method':5.56,
'mounted':5.56,
'nickel':5.56,
'on':5.56,
'opera':5.56,
'organizations':5.56,
'pan':5.56,
'passage':5.56,
'password':5.56,
'place':5.56,
'playa':5.56,
'presidency':5.56,
'product':5.56,
'promo':5.56,
'quarter':5.56,
'range':5.56,
'ranked':5.56,
'recent':5.56,
'red':5.56,
'regarding':5.56,
'remained':5.56,
'rolls':5.56,
'solo':5.56,
'stay':5.56,
'steppin':5.56,
'stepping':5.56,
'studying':5.56,
'substance':5.56,
'systematic':5.56,
'titles':5.56,
'tons':5.56,
'treated':5.56,
'turkish':5.56,
'type':5.56,
'varied':5.56,
'verbal':5.56,
'vida':5.56,
'vodka':5.56,
'voices':5.56,
'voltage':5.56,
'winding':5.56,
'wisconsin':5.56,
'woke':5.56,
'word':5.56,
'worker':5.56,
'working':5.56,
'yonkers':5.56,
'input':5.55,
'analyses':5.55,
'array':5.55,
'calculations':5.55,
'dixie':5.55,
'floss':5.55,
'molecular':5.55,
'pavement':5.55,
'tame':5.55,
'warriors':5.55,
'gospel':5.55,
'theological':5.55,
'depth':5.54,
'acquisitions':5.54,
'adam':5.54,
'apparently':5.54,
'attempts':5.54,
'attended':5.54,
'awwww':5.54,
'biological':5.54,
'bobby':5.54,
'box':5.54,
'bradley':5.54,
'brooklyn':5.54,
'brunswick':5.54,
'bud':5.54,
'buena':5.54,
'cardinal':5.54,
'catherine':5.54,
'chapel':5.54,
'chest':5.54,
'circulation':5.54,
'criteria':5.54,
'cuban':5.54,
'dame':5.54,
'daniel':5.54,
'dealing':5.54,
'device':5.54,
'direction':5.54,
'doctors':5.54,
'domain':5.54,
'dubai':5.54,
'ear':5.54,
'electrical':5.54,
'emily':5.54,
'every1':5.54,
'firmly':5.54,
'frame':5.54,
'gee':5.54,
'go':5.54,
'gusta':5.54,
'hablar':5.54,
'handle':5.54,
'him':5.54,
'holdings':5.54,
'journalist':5.54,
'lap':5.54,
'look':5.54,
'machinery':5.54,
'materials':5.54,
'mount':5.54,
'mysterious':5.54,
'nicholas':5.54,
'notes':5.54,
'ny':5.54,
'obviously':5.54,
'pamela':5.54,
'panel':5.54,
'particular':5.54,
'person':5.54,
'posted':5.54,
'ppl':5.54,
'prompted':5.54,
'properties':5.54,
'quarterback':5.54,
'rating':5.54,
'rearrange':5.54,
'reason':5.54,
'regions':5.54,
'reminder':5.54,
'rims':5.54,
'ross':5.54,
'round':5.54,
'say':5.54,
'signs':5.54,
'simpson':5.54,
'sources':5.54,
'sponsored':5.54,
'stephen':5.54,
'suggesting':5.54,
'suzanne':5.54,
'texas':5.54,
'thursdays':5.54,
'timeline':5.54,
'tools':5.54,
'trio':5.54,
'yale':5.54,
'abstract':5.53,
'accordance':5.53,
'fellas':5.53,
'lean':5.53,
'outward':5.53,
'particle':5.53,
'pipe':5.53,
'rely':5.53,
'sheet':5.53,
'sole':5.53,
'whatcha':5.53,
'arrow':5.52,
'blazing':5.52,
'wherever':5.52,
'aimed':5.52,
'alice':5.52,
'alicia':5.52,
'amendment':5.52,
'amy':5.52,
'appointment':5.52,
'archive':5.52,
'article':5.52,
'beth':5.52,
'beverly':5.52,
'beyonce':5.52,
'bottles':5.52,
'boxes':5.52,
'branch':5.52,
'carol':5.52,
'categories':5.52,
'class':5.52,
'coaches':5.52,
'compounds':5.52,
'could':5.52,
'cube':5.52,
'demonstrate':5.52,
'edition':5.52,
'employers':5.52,
'episodes':5.52,
'eva':5.52,
'exceptions':5.52,
'extension':5.52,
'filled':5.52,
'findings':5.52,
'graphic':5.52,
'headline':5.52,
'horizontal':5.52,
'jefferson':5.52,
'jets':5.52,
'jose':5.52,
'josh':5.52,
'journalism':5.52,
'monica':5.52,
'montgomery':5.52,
'nbc':5.52,
'north':5.52,
'outer':5.52,
'papi':5.52,
'parker':5.52,
'patrick':5.52,
'peas':5.52,
'plains':5.52,
'pm':5.52,
'positions':5.52,
'posting':5.52,
'ray':5.52,
'reaction':5.52,
'reference':5.52,
'reflected':5.52,
'remain':5.52,
'reserves':5.52,
'rockefeller':5.52,
'room':5.52,
'russian':5.52,
'ryan':5.52,
'sara':5.52,
'she\'s':5.52,
'subjective':5.52,
'suggest':5.52,
'summit':5.52,
'synagogue':5.52,
'taylor':5.52,
'throughout':5.52,
'tony':5.52,
'traded':5.52,
'trailer':5.52,
'twitters':5.52,
'u':5.52,
'url':5.52,
'usage':5.52,
'vet':5.52,
'vikings':5.52,
'whispers':5.52,
'beso':5.52,
'respiratory':5.52,
'fills':5.51,
'behaviors':5.51,
'breed':5.51,
'layin':5.51,
'maze':5.51,
'measurement':5.51,
'occurrence':5.51,
'priests':5.51,
'receptor':5.51,
'slope':5.51,
'tener':5.51,
'thong':5.51,
'account':5.5,
'adds':5.5,
'anglo':5.5,
'application':5.5,
'arm':5.5,
'atoms':5.5,
'austin':5.5,
'behavior':5.5,
'beyond':5.5,
'bloggers':5.5,
'bow':5.5,
'brief':5.5,
'buffalo':5.5,
'capacity':5.5,
'chairwoman':5.5,
'channel':5.5,
'charlotte':5.5,
'christine':5.5,
'clay':5.5,
'consumer':5.5,
'count':5.5,
'crews':5.5,
'david':5.5,
'democrats':5.5,
'doing':5.5,
'editions':5.5,
'effects':5.5,
'equivalent':5.5,
'eyed':5.5,
'faculty':5.5,
'feels':5.5,
'fellow':5.5,
'figure':5.5,
'finals':5.5,
'fm':5.5,
'footsteps':5.5,
'frequently':5.5,
'generation':5.5,
'genetic':5.5,
'glasses':5.5,
'halfway':5.5,
'handled':5.5,
'in':5.5,
'insure':5.5,
'investors':5.5,
'item':5.5,
'jack':5.5,
'jane':5.5,
'jones':5.5,
'leven':5.5,
'manifest':5.5,
'map':5.5,
'maria':5.5,
'melissa':5.5,
'minute':5.5,
'normally':5.5,
'pastor':5.5,
'patricia':5.5,
'pole':5.5,
'preface':5.5,
'prep':5.5,
'quiet':5.5,
'ran':5.5,
'rated':5.5,
'repertory':5.5,
'retailers':5.5,
'retain':5.5,
'rub':5.5,
'russia':5.5,
'settings':5.5,
'skin':5.5,
'sms':5.5,
'specifically':5.5,
'steven':5.5,
'stevie':5.5,
'texting':5.5,
'tie':5.5,
'transmission':5.5,
'unit':5.5,
'variation':5.5,
'vol':5.5,
'wanna':5.5,
'we\'re':5.5,
'wearing':5.5,
'westside':5.5,
'wild':5.5,
'womb':5.5,
'works':5.5,
'yankee':5.5,
'responsibilities':5.49,
'awaits':5.49,
'interface':5.49,
'mics':5.49,
'modified':5.49,
'remark':5.49,
'supervision':5.49,
'weave':5.49,
'flame':5.49,
'interpreted':5.49,
'11:00:00AM':5.48,
'acts':5.48,
'actual':5.48,
'adviser':5.48,
'advisers':5.48,
'ahhh':5.48,
'alabama':5.48,
'along':5.48,
'announced':5.48,
'approached':5.48,
'attempted':5.48,
'ballin':5.48,
'beta':5.48,
'bling':5.48,
'canon':5.48,
'cheap':5.48,
'church':5.48,
'cincinnati':5.48,
'column':5.48,
'compound':5.48,
'concept':5.48,
'consultant':5.48,
'convention':5.48,
'coupe':5.48,
'cumulative':5.48,
'demo':5.48,
'donald':5.48,
'elements':5.48,
'encountered':5.48,
'everytime':5.48,
'exception':5.48,
'export':5.48,
'extensive':5.48,
'external':5.48,
'fb':5.48,
'felt':5.48,
'guild':5.48,
'habit':5.48,
'he\'ll':5.48,
'here':5.48,
'highway':5.48,
'holmes':5.48,
'ikea':5.48,
'indicate':5.48,
'janet':5.48,
'joan':5.48,
'jump':5.48,
'kate':5.48,
'katherine':5.48,
'katie':5.48,
'katy':5.48,
'korean':5.48,
'laurie':5.48,
'led':5.48,
'lei':5.48,
'longest':5.48,
'luna':5.48,
'madison':5.48,
'mark':5.48,
'may':5.48,
'methods':5.48,
'mixed':5.48,
'motor':5.48,
'naval':5.48,
'nicole':5.48,
'nose':5.48,
'oklahoma':5.48,
'ole':5.48,
'operates':5.48,
'palm':5.48,
'particles':5.48,
'pepper':5.48,
'physics':5.48,
'picking':5.48,
'portion':5.48,
'post':5.48,
'powell':5.48,
'predicted':5.48,
'quoted':5.48,
'remote':5.48,
'requested':5.48,
'roller':5.48,
'route':5.48,
'run':5.48,
'sally':5.48,
'sell':5.48,
'server':5.48,
'sessions':5.48,
'shape':5.48,
'spread':5.48,
'square':5.48,
'stephanie':5.48,
'surfaces':5.48,
'surname':5.48,
'tampa':5.48,
'township':5.48,
'trek':5.48,
'tried':5.48,
'truck':5.48,
'tweet':5.48,
'user':5.48,
'users':5.48,
'utility':5.48,
'vamos':5.48,
'wells':5.48,
'ceiling':5.47,
'dwell':5.47,
'elaborate':5.47,
'grip':5.47,
'halls':5.47,
'loaded':5.47,
'metabolism':5.47,
'spinning':5.47,
'\#nowplaying':5.46,
'alex':5.46,
'appeared':5.46,
'arlington':5.46,
'blogger':5.46,
'bricks':5.46,
'cam':5.46,
'caption':5.46,
'carries':5.46,
'carroll':5.46,
'cavalry':5.46,
'challenged':5.46,
'chi':5.46,
'chronicle':5.46,
'coalition':5.46,
'colonies':5.46,
'competitive':5.46,
'conducted':5.46,
'consisted':5.46,
'contract':5.46,
'developers':5.46,
'diameter':5.46,
'directed':5.46,
'distributed':5.46,
'domestic':5.46,
'dozen':5.46,
'enough':5.46,
'equation':5.46,
'expectations':5.46,
'explain':5.46,
'followed':5.46,
'fox':5.46,
'further':5.46,
'gears':5.46,
'guts':5.46,
'helen':5.46,
'index':5.46,
'instructions':5.46,
'jeffrey':5.46,
'jerry':5.46,
'lafayette':5.46,
'laid':5.46,
'let\'s':5.46,
'linked':5.46,
'list':5.46,
'local':5.46,
'loop':5.46,
'manufacturers':5.46,
'math':5.46,
'matthew':5.46,
'meeting':5.46,
'megan':5.46,
'mornin':5.46,
'needed':5.46,
'object':5.46,
'organ':5.46,
'particularly':5.46,
'philly':5.46,
'process':5.46,
'projects':5.46,
'pulls':5.46,
'quote':5.46,
'rebecca':5.46,
'reform':5.46,
'religious':5.46,
'richmond':5.46,
'sandra':5.46,
'segment':5.46,
'sent':5.46,
'series':5.46,
'serve':5.46,
'sharon':5.46,
'shipping':5.46,
'shoulder':5.46,
'stacks':5.46,
'statements':5.46,
'surrounding':5.46,
'therapy':5.46,
'thy':5.46,
'twitter':5.46,
'uno':5.46,
'vincent':5.46,
'watched':5.46,
'wide':5.46,
'william':5.46,
'dome':5.46,
'filter':5.46,
'notions':5.46,
'unfold':5.46,
'administered':5.45,
'furthermore':5.45,
'situations':5.45,
'sociology':5.45,
'subsequent':5.45,
'sway':5.45,
'wrists':5.45,
'drawers':5.45,
'undoubtedly':5.45,
'2nite':5.44,
'amp':5.44,
'anita':5.44,
'area':5.44,
'arthur':5.44,
'assigned':5.44,
'aug':5.44,
'axis':5.44,
'battery':5.44,
'beside':5.44,
'bob':5.44,
'brown':5.44,
'calculation':5.44,
'carolina':5.44,
'carried':5.44,
'centres':5.44,
'chair':5.44,
'charter':5.44,
'columbia':5.44,
'company':5.44,
'consist':5.44,
'cope':5.44,
'counter':5.44,
'curtains':5.44,
'deck':5.44,
'den':5.44,
'doors':5.44,
'earl':5.44,
'editors':5.44,
'evelyn':5.44,
'fisher':5.44,
'flow':5.44,
'georgia':5.44,
'i\'d':5.44,
'imports':5.44,
'jay':5.44,
'joel':5.44,
'jordan':5.44,
'kong':5.44,
'lab':5.44,
'lateral':5.44,
'mass':5.44,
'meant':5.44,
'metal':5.44,
'mister':5.44,
'montana':5.44,
'moore':5.44,
'noche':5.44,
'nov':5.44,
'operating':5.44,
'overall':5.44,
'passes':5.44,
'passing':5.44,
'paul':5.44,
'phrase':5.44,
'possess':5.44,
'quantitative':5.44,
'recently':5.44,
'refers':5.44,
'represent':5.44,
'saw':5.44,
'search':5.44,
'sept':5.44,
'seventy':5.44,
'signal':5.44,
'solomon':5.44,
'stations':5.44,
'storage':5.44,
'street':5.44,
'subject':5.44,
'submit':5.44,
'surround':5.44,
'ten':5.44,
'tenants':5.44,
'thurs':5.44,
'tone':5.44,
'tongue':5.44,
'trunk':5.44,
'tweeted':5.44,
'versions':5.44,
'wagner':5.44,
'wax':5.44,
'wilson':5.44,
'worked':5.44,
'yen':5.44,
'zion':5.44,
'measurements':5.44,
'reactions':5.44,
'adjacent':5.43,
'bailar':5.43,
'kara':5.43,
'modes':5.43,
'proposition':5.43,
'remainder':5.43,
'steam':5.43,
'10:00:00AM':5.42,
'again':5.42,
'also':5.42,
'ashley':5.42,
'aye':5.42,
'background':5.42,
'bailey':5.42,
'barrel':5.42,
'bedford':5.42,
'booth':5.42,
'bowl':5.42,
'businessman':5.42,
'calls':5.42,
'came':5.42,
'carolyn':5.42,
'category':5.42,
'centre':5.42,
'chip':5.42,
'com':5.42,
'comprehensive':5.42,
'compromise':5.42,
'conductor':5.42,
'course':5.42,
'crow':5.42,
'dennis':5.42,
'derived':5.42,
'duration':5.42,
'enzyme':5.42,
'ever':5.42,
'financial':5.42,
'floors':5.42,
'frances':5.42,
'gene':5.42,
'going':5.42,
'gotten':5.42,
'he':5.42,
'himself':5.42,
'hockey':5.42,
'hopkins':5.42,
'initial':5.42,
'inner':5.42,
'instance':5.42,
'jeanne':5.42,
'jeremy':5.42,
'jr':5.42,
'julia':5.42,
'julie':5.42,
'listenin':5.42,
'livingston':5.42,
'memphis':5.42,
'mentioned':5.42,
'mercury':5.42,
'mini':5.42,
'monthly':5.42,
'nine':5.42,
'note':5.42,
'nowadays':5.42,
'om':5.42,
'oprah':5.42,
'pasa':5.42,
'penn':5.42,
'peter':5.42,
'point':5.42,
'polls':5.42,
'presentation':5.42,
'primarily':5.42,
'ranks':5.42,
'references':5.42,
'resulting':5.42,
'riley':5.42,
'rolled':5.42,
'roof':5.42,
'sam':5.42,
'sean':5.42,
'secretary':5.42,
'select':5.42,
'signals':5.42,
'snl':5.42,
'spencer':5.42,
'state\'s':5.42,
'subjects':5.42,
'tables':5.42,
'tell':5.42,
'terry':5.42,
'theory':5.42,
'tom':5.42,
'topic':5.42,
'toss':5.42,
'treasury\'s':5.42,
'tweets':5.42,
'yorkers':5.42,
'you\'ll':5.42,
'calculated':5.42,
'configuration':5.42,
'inhabitants':5.42,
'statute':5.42,
'interlude':5.41,
'clerk':5.41,
'constitutes':5.41,
'cylinder':5.41,
'knocks':5.41,
'ratio':5.41,
'tissue':5.41,
'variables':5.41,
'vector':5.41,
'vols':5.41,
'whassup':5.41,
'width':5.41,
'absolute':5.4,
'ah':5.4,
'alison':5.4,
'anne':5.4,
'arabia':5.4,
'arkansas':5.4,
'boldface':5.4,
'cast':5.4,
'chamber':5.4,
'china':5.4,
'claimed':5.4,
'conquest':5.4,
'consecutive':5.4,
'daily':5.4,
'dana':5.4,
'definitions':5.4,
'distribution':5.4,
'dna':5.4,
'document':5.4,
'each':5.4,
'earlier':5.4,
'embassy':5.4,
'esp':5.4,
'estimate':5.4,
'fam':5.4,
'figured':5.4,
'fuel':5.4,
'gulf':5.4,
'headquarters':5.4,
'healthcare':5.4,
'hee':5.4,
'holds':5.4,
'inside':5.4,
'intent':5.4,
'jan':5.4,
'johnson':5.4,
'joseph':5.4,
'lah':5.4,
'lawrence':5.4,
'lick':5.4,
'lou':5.4,
'lung':5.4,
'main':5.4,
'malcolm':5.4,
'margaret':5.4,
'matter':5.4,
'mexican':5.4,
'ministers':5.4,
'mixtape':5.4,
'nancy':5.4,
'oakland':5.4,
'obedience':5.4,
'one':5.4,
'paula':5.4,
'picks':5.4,
'processes':5.4,
'putting':5.4,
'ranging':5.4,
'reminds':5.4,
'reorganization':5.4,
'represents':5.4,
'rien':5.4,
'riverdale':5.4,
'sarah':5.4,
'seen':5.4,
'statistical':5.4,
'stayed':5.4,
'stomach':5.4,
'string':5.4,
'sushi':5.4,
'tap':5.4,
'testament':5.4,
'thee':5.4,
'they\'ll':5.4,
'transfer':5.4,
'two':5.4,
'xxx':5.4,
'origins':5.4,
'actin':5.39,
'cielo':5.39,
'defence':5.39,
'dub':5.39,
'empirical':5.39,
'explicitly':5.39,
'jive':5.39,
'reprinted':5.39,
'spins':5.39,
'\#letsbehonest':5.38,
'ahh':5.38,
'am':5.38,
'announcement':5.38,
'arms':5.38,
'baltimore':5.38,
'basis':5.38,
'butler':5.38,
'camino':5.38,
'carved':5.38,
'clark':5.38,
'coefficient':5.38,
'comp':5.38,
'control':5.38,
'copy':5.38,
'core':5.38,
'curriculum':5.38,
'dec':5.38,
'deemed':5.38,
'detective':5.38,
'different':5.38,
'doctrine':5.38,
'door':5.38,
'files':5.38,
'following':5.38,
'grams':5.38,
'hp':5.38,
'hudson':5.38,
'i\'ll':5.38,
'industry':5.38,
'items':5.38,
'jamie':5.38,
'jesse':5.38,
'latin':5.38,
'let':5.38,
'lite':5.38,
'lookin':5.38,
'machine':5.38,
'manner':5.38,
'mit':5.38,
'nelson':5.38,
'nitrogen':5.38,
'nucleus':5.38,
'official':5.38,
'overtime':5.38,
'personnel':5.38,
'pitching':5.38,
'projected':5.38,
'province':5.38,
'rope':5.38,
'said':5.38,
'second':5.38,
'securities':5.38,
'send':5.38,
'sensitivity':5.38,
'shall':5.38,
'soldiers':5.38,
'standards':5.38,
'statistically':5.38,
'steps':5.38,
'steve':5.38,
'tan':5.38,
'technical':5.38,
'text':5.38,
'thread':5.38,
'tierra':5.38,
'timbaland':5.38,
'tricks':5.38,
'tunnel':5.38,
'twelve':5.38,
'wants':5.38,
'wednesday':5.38,
'whew':5.38,
'wordpress':5.38,
'would':5.38,
'yards':5.38,
'year':5.38,
'yesterday\'s':5.38,
'comparison':5.37,
'ella':5.37,
'givin':5.37,
'hem':5.37,
'parish':5.37,
'silently':5.37,
'sits':5.37,
'whispering':5.37,
'illusions':5.36,
'asked':5.36,
'bee':5.36,
'briefing':5.36,
'britney':5.36,
'capitol':5.36,
'caps':5.36,
'claire':5.36,
'clip':5.36,
'clips':5.36,
'colonial':5.36,
'constitute':5.36,
'contracts':5.36,
'covering':5.36,
'customs':5.36,
'dash':5.36,
'delta':5.36,
'dishes':5.36,
'economic':5.36,
'edit':5.36,
'eileen':5.36,
'establishment':5.36,
'finger':5.36,
'georgetown':5.36,
'gloria':5.36,
'greene':5.36,
'gud':5.36,
'hall':5.36,
'hay':5.36,
'heard':5.36,
'jimmy':5.36,
'linear':5.36,
'liquor':5.36,
'listing':5.36,
'lmaoo':5.36,
'mason':5.36,
'miller':5.36,
'milwaukee':5.36,
'monde':5.36,
'mouse':5.36,
'moving':5.36,
'msn':5.36,
'nba':5.36,
'nude':5.36,
'nuestro':5.36,
'overview':5.36,
'oz':5.36,
'pattern':5.36,
'port':5.36,
'possession':5.36,
'press':5.36,
'principal':5.36,
'pronto':5.36,
'quiero':5.36,
'rabbi':5.36,
'reposing':5.36,
'russell':5.36,
'same':5.36,
'si':5.36,
'sim':5.36,
'sit':5.36,
'sold':5.36,
'sounded':5.36,
'staff':5.36,
'standing':5.36,
'stocks':5.36,
'structure':5.36,
'stuart':5.36,
'subsequently':5.36,
'sympathy':5.36,
'taiwan':5.36,
'target':5.36,
'teeth':5.36,
'trenton':5.36,
'tres':5.36,
'trucks':5.36,
'tuesdays':5.36,
'tummy':5.36,
'tweeting':5.36,
'verb':5.36,
'vest':5.36,
'wakes':5.36,
'walter':5.36,
'we\'d':5.36,
'westchester':5.36,
'wi':5.36,
'wright':5.36,
'you\'d':5.36,
'yugoslavia':5.36,
'emperor':5.35,
'thesis':5.35,
'chevy':5.35,
'della':5.35,
'finite':5.35,
'loot':5.35,
'motive':5.35,
'define':5.34,
'\#news':5.34,
'adams':5.34,
'advised':5.34,
'andrea':5.34,
'anonymity':5.34,
'anthony':5.34,
'anything':5.34,
'anywhere':5.34,
'arc':5.34,
'areas':5.34,
'ay':5.34,
'backs':5.34,
'bros':5.34,
'campaign':5.34,
'candidate':5.34,
'carter':5.34,
'checked':5.34,
'classified':5.34,
'colts':5.34,
'comparable':5.34,
'crossing':5.34,
'currently':5.34,
'denver':5.34,
'ding':5.34,
'doctor':5.34,
'drank':5.34,
'editorial':5.34,
'flick':5.34,
'fur':5.34,
'gear':5.34,
'geek':5.34,
'german':5.34,
'giant':5.34,
'giants':5.34,
'hampton':5.34,
'harold':5.34,
'ily':5.34,
'iron':5.34,
'karen':5.34,
'korea':5.34,
'liebe':5.34,
'lillian':5.34,
'log':5.34,
'manufacturer':5.34,
'massive':5.34,
'maureen':5.34,
'mc':5.34,
'middle':5.34,
'moderate':5.34,
'nog':5.34,
'noticed':5.34,
'occurred':5.34,
'ohhhh':5.34,
'orleans':5.34,
'ounce':5.34,
'pack':5.34,
'percent':5.34,
'phil':5.34,
'physician':5.34,
'rate':5.34,
'regional':5.34,
'request':5.34,
'revolution':5.34,
'rihanna':5.34,
'roosevelt':5.34,
'session':5.34,
'six':5.34,
'sullivan':5.34,
'surgeon':5.34,
'susan':5.34,
'sylvia':5.34,
'then':5.34,
'they\'re':5.34,
'thinkin':5.34,
'tmrw':5.34,
'transmitted':5.34,
'tube':5.34,
'typing':5.34,
'upon':5.34,
'walmart':5.34,
'whitman':5.34,
'whitney':5.34,
'wider':5.34,
'within':5.34,
'yo':5.34,
'blink':5.33,
'noches':5.33,
'threshold':5.33,
'bringin':5.33,
'tutti':5.33,
'verdad':5.33,
'abraham':5.32,
'alter':5.32,
'andre':5.32,
'beep':5.32,
'bench':5.32,
'bucket':5.32,
'calif':5.32,
'chin':5.32,
'commerce':5.32,
'compare':5.32,
'cover':5.32,
'currents':5.32,
'deepest':5.32,
'dorothy':5.32,
'editorials':5.32,
'emeritus':5.32,
'endless':5.32,
'estimates':5.32,
'evaluation':5.32,
'firm':5.32,
'francis':5.32,
'general':5.32,
'gregory':5.32,
'hoffman':5.32,
'hour':5.32,
'identified':5.32,
'indicates':5.32,
'jacqueline':5.32,
'joshua':5.32,
'kristen':5.32,
'label':5.32,
'literally':5.32,
'louise':5.32,
'mas':5.32,
'measure':5.32,
'medium':5.32,
'mention':5.32,
'michigan':5.32,
'names':5.32,
'nassau':5.32,
'negotiations':5.32,
'nineteenth':5.32,
'pa':5.32,
'palmer':5.32,
'partly':5.32,
'peeps':5.32,
'plz':5.32,
'posts':5.32,
'presumably':5.32,
'quite':5.32,
'rebounds':5.32,
'remind':5.32,
'reserve':5.32,
'review':5.32,
'rite':5.32,
'rye':5.32,
'selena':5.32,
'site':5.32,
'skip':5.32,
'someone\'s':5.32,
'speech':5.32,
'step':5.32,
'subway':5.32,
'surface':5.32,
'table':5.32,
'taking':5.32,
'tells':5.32,
'ticket':5.32,
'ting':5.32,
'tribes':5.32,
'turning':5.32,
'two-year':5.32,
'types':5.32,
'urself':5.32,
'vancouver':5.32,
'varies':5.32,
'yield':5.32,
'zone':5.32,
'preceding':5.31,
'affecting':5.31,
'alles':5.31,
'bop':5.31,
'consume':5.31,
'discipline':5.31,
'disposition':5.31,
'gypsy':5.31,
'heed':5.31,
'ion':5.31,
'shelf':5.31,
'stash':5.31,
'varying':5.31,
'vivir':5.31,
'\#fact':5.3,
'*estimated':5.3,
'actually':5.3,
'aire':5.3,
'ancora':5.3,
'atlanta':5.3,
'barnes':5.3,
'bat':5.3,
'biblical':5.3,
'bishop':5.3,
'bonnie':5.3,
'boundary':5.3,
'brad':5.3,
'brian':5.3,
'bring':5.3,
'calendar':5.3,
'carnegie':5.3,
'catholic':5.3,
'center':5.3,
'chairman':5.3,
'chrysler':5.3,
'circuits':5.3,
'colin':5.3,
'constantly':5.3,
'cornell':5.3,
'correspondent':5.3,
'counts':5.3,
'county':5.3,
'creature':5.3,
'dave':5.3,
'drake':5.3,
'editing':5.3,
'eight':5.3,
'elevator':5.3,
'glen':5.3,
'irene':5.3,
'jk':5.3,
'junior':5.3,
'km/h':5.3,
'lee':5.3,
'lesson':5.3,
'levels':5.3,
'lexington':5.3,
'md':5.3,
'medicare':5.3,
'mic':5.3,
'mike':5.3,
'miles':5.3,
'miriam':5.3,
'mph':5.3,
'murphy':5.3,
'neck':5.3,
'nova':5.3,
'number':5.3,
'one\'s':5.3,
'patch':5.3,
'pay':5.3,
'peggy':5.3,
'placed':5.3,
'pounds':5.3,
'president\'s':5.3,
'profile':5.3,
'quiz':5.3,
'rail':5.3,
'randy':5.3,
'reviews':5.3,
'ritual':5.3,
'robert':5.3,
'roberts':5.3,
'roger':5.3,
'samuel':5.3,
'scales':5.3,
'sec':5.3,
'seth':5.3,
'seymour':5.3,
'silly':5.3,
'singular':5.3,
'somebody':5.3,
'someone':5.3,
'spray':5.3,
'suit':5.3,
'system':5.3,
'tactics':5.3,
'telling':5.3,
'tend':5.3,
'third':5.3,
'transition':5.3,
'trump':5.3,
'via':5.3,
'vids':5.3,
'visitation':5.3,
'washing':5.3,
'ways':5.3,
'weekly':5.3,
'windy':5.3,
'year\'s':5.3,
'you\'re':5.3,
'hitherto':5.29,
'incorporated':5.29,
'prescribed':5.29,
'assumption':5.29,
'cama':5.29,
'clergy':5.29,
'heel':5.29,
'playas':5.29,
'rodeo':5.29,
'shakin':5.29,
'transferred':5.29,
'2-bath':5.28,
'alert':5.28,
'already':5.28,
'annual':5.28,
'assessment':5.28,
'beef':5.28,
'behalf':5.28,
'borough':5.28,
'code':5.28,
'comin':5.28,
'congregation':5.28,
'copies':5.28,
'craig':5.28,
'cuore':5.28,
'dean':5.28,
'declared':5.28,
'defended':5.28,
'diplomat':5.28,
'dot':5.28,
'empire':5.28,
'estar':5.28,
'esther':5.28,
'etsy':5.28,
'eventually':5.28,
'extract':5.28,
'feelin':5.28,
'follower':5.28,
'form':5.28,
'gates':5.28,
'handling':5.28,
'hannah':5.28,
'happen':5.28,
'harriet':5.28,
'harvey':5.28,
'held':5.28,
'holla':5.28,
'inches':5.28,
'institute':5.28,
'interviewed':5.28,
'jacobs':5.28,
'james':5.28,
'l':5.28,
'length':5.28,
'mag':5.28,
'martha':5.28,
'meanwhile':5.28,
'minutes':5.28,
'mode':5.28,
'morton':5.28,
'nonprofit':5.28,
'ora':5.28,
'packed':5.28,
'packing':5.28,
'pandora':5.28,
'parameter':5.28,
'posse':5.28,
'preacher':5.28,
'representatives':5.28,
'rewind':5.28,
'says':5.28,
'scheduled':5.28,
'secrets':5.28,
'section':5.28,
'serum':5.28,
'sheila':5.28,
'someday':5.28,
'sometimes':5.28,
'somewhere':5.28,
'sort':5.28,
'stands':5.28,
'state':5.28,
'stats':5.28,
'stays':5.28,
'temporal':5.28,
'that\'s':5.28,
'theodore':5.28,
'theology':5.28,
'tracks':5.28,
'tyler':5.28,
'unions':5.28,
'version':5.28,
'wandering':5.28,
'years':5.28,
'york\'s':5.28,
'specified':5.28,
'leben':5.27,
'anyhow':5.27,
'bumpin':5.27,
'governed':5.27,
'holdin':5.27,
'implies':5.27,
'moet':5.27,
'quieres':5.27,
'revised':5.27,
'semi':5.27,
'africa':5.26,
'agency':5.26,
'asking':5.26,
'based':5.26,
'berlin':5.26,
'bid':5.26,
'boyz':5.26,
'carrier':5.26,
'carrying':5.26,
'clinton\'s':5.26,
'commander':5.26,
'companies':5.26,
'conan':5.26,
'conference':5.26,
'converted':5.26,
'counsel':5.26,
'cynthia':5.26,
'dale':5.26,
'department':5.26,
'desk':5.26,
'detected':5.26,
'dias':5.26,
'digging':5.26,
'directions':5.26,
'doris':5.26,
'dormir':5.26,
'dramatic':5.26,
'drove':5.26,
'edward':5.26,
'elliott':5.26,
'facility':5.26,
'facing':5.26,
'fare':5.26,
'floyd':5.26,
'foto':5.26,
'frog':5.26,
'george':5.26,
'glenn':5.26,
'goes':5.26,
'ground':5.26,
'guidelines':5.26,
'hispanic':5.26,
'hmmmm':5.26,
'houston':5.26,
'jake':5.26,
'jim':5.26,
'justin':5.26,
'kay':5.26,
'lines':5.26,
'mainly':5.26,
'marcus':5.26,
'marshall':5.26,
'martin':5.26,
'matt':5.26,
'mayor':5.26,
'mr':5.26,
'mundo':5.26,
'nc':5.26,
'nearly':5.26,
'nina':5.26,
'papers':5.26,
'perry':5.26,
'philip':5.26,
'piece':5.26,
'plot':5.26,
'pouring':5.26,
'preliminary':5.26,
'print':5.26,
'prudential':5.26,
'qual':5.26,
'reasons':5.26,
'reed':5.26,
'register':5.26,
'richard':5.26,
'robinson':5.26,
'roslyn':5.26,
'semester':5.26,
'sergeant':5.26,
'shift':5.26,
'shirley':5.26,
'siempre':5.26,
'sir':5.26,
'spot':5.26,
'stated':5.26,
'statement':5.26,
'tool':5.26,
'uniform':5.26,
'units':5.26,
'walls':5.26,
'week\'s':5.26,
'lend':5.26,
'hangin':5.25,
'borne':5.24,
'differentiation':5.24,
'intermediate':5.24,
'motives':5.24,
'\#followfriday':5.24,
'a':5.24,
'abc':5.24,
'asks':5.24,
'beijing':5.24,
'bet':5.24,
'boeing':5.24,
'chart':5.24,
'depend':5.24,
'diplomats':5.24,
'doin':5.24,
'donna':5.24,
'douglas':5.24,
'drivers':5.24,
'edited':5.24,
'elaine':5.24,
'ellis':5.24,
'encounter':5.24,
'evans':5.24,
'faced':5.24,
'fifth':5.24,
'fin':5.24,
'five':5.24,
'franklin':5.24,
'garage':5.24,
'generally':5.24,
'goin':5.24,
'harry':5.24,
'industries':5.24,
'insurance':5.24,
'iowa':5.24,
'irving':5.24,
'jajaja':5.24,
'kirk':5.24,
'lieutenant':5.24,
'longtime':5.24,
'matters':5.24,
'mid':5.24,
'minnesota':5.24,
'morgan':5.24,
'namely':5.24,
'nathan':5.24,
'oliver':5.24,
'parliamentary':5.24,
'partially':5.24,
'parts':5.24,
'persian':5.24,
'pon':5.24,
'poppin':5.24,
'publicly':5.24,
'returns':5.24,
'ringing':5.24,
'rookie':5.24,
'salomon':5.24,
'sat':5.24,
'seem':5.24,
'sf':5.24,
'should':5.24,
'since':5.24,
'socialist':5.24,
'sorts':5.24,
'spending':5.24,
'stanley':5.24,
'substances':5.24,
'there\'s':5.24,
'ties':5.24,
'ton':5.24,
'toujours':5.24,
'turned':5.24,
'txt':5.24,
'vessels':5.24,
'veux':5.24,
'way':5.24,
'wee':5.24,
'woah':5.24,
'work':5.24,
'fraction':5.23,
'depths':5.22,
'destino':5.22,
'nelly':5.22,
'rug':5.22,
'shed':5.22,
'18th':5.22,
'adjustment':5.22,
'afterward':5.22,
'ali':5.22,
'and':5.22,
'anderson':5.22,
'andrew':5.22,
'any':5.22,
'artery':5.22,
'as':5.22,
'baila':5.22,
'barbara':5.22,
'bernstein':5.22,
'bio':5.22,
'bits':5.22,
'briefs':5.22,
'cause':5.22,
'charles':5.22,
'chris':5.22,
'como':5.22,
'counties':5.22,
'counting':5.22,
'dc':5.22,
'defend':5.22,
'defending':5.22,
'dems':5.22,
'dexter':5.22,
'does':5.22,
'drama':5.22,
'excess':5.22,
'file':5.22,
'for':5.22,
'fordham':5.22,
'hartford':5.22,
'hours':5.22,
'immigrants':5.22,
'joe':5.22,
'kim':5.22,
'knicks':5.22,
'lambert':5.22,
'lane':5.22,
'lcd':5.22,
'lg':5.22,
'lois':5.22,
'mano':5.22,
'mia':5.22,
'mill':5.22,
'mondo':5.22,
'motors':5.22,
'nets':5.22,
'northern':5.22,
'officer':5.22,
'ohio':5.22,
'order':5.22,
'others':5.22,
'palabras':5.22,
'psychological':5.22,
'pump':5.22,
'real-estate':5.22,
'ridge':5.22,
'seems':5.22,
'sentence':5.22,
'suffolk':5.22,
'swallow':5.22,
'systems':5.22,
'tal':5.22,
'ted':5.22,
'thru':5.22,
'till':5.22,
'tim':5.22,
'tissues':5.22,
'too':5.22,
'trance':5.22,
'trick':5.22,
'typical':5.22,
'undertaken':5.22,
'usual':5.22,
'veins':5.22,
'whoa':5.22,
'wrist':5.22,
'ya':5.22,
'yankees':5.22,
'bibliography':5.21,
'masses':5.21,
'mente':5.21,
'norms':5.21,
'twist':5.21,
'criterion':5.2,
'eastside':5.2,
'mio':5.2,
'node':5.2,
'nombre':5.2,
'repeats':5.2,
'thereafter':5.2,
'agency\'s':5.2,
'alcohol':5.2,
'another':5.2,
'app':5.2,
'ask':5.2,
'berkeley':5.2,
'bonds':5.2,
'briefly':5.2,
'cab':5.2,
'carry':5.2,
'checking':5.2,
'continued':5.2,
'cunningham':5.2,
'dallas':5.2,
'dare':5.2,
'decade':5.2,
'dia':5.2,
'donde':5.2,
'during':5.2,
'economist':5.2,
'four':5.2,
'goldberg':5.2,
'gurl':5.2,
'happens':5.2,
'hebrew':5.2,
'immigration':5.2,
'inch':5.2,
'initially':5.2,
'intended':5.2,
'internal':5.2,
'itself':5.2,
'jaw':5.2,
'jeff':5.2,
'jersey':5.2,
'jetzt':5.2,
'john\'s':5.2,
'journalists':5.2,
'kevin':5.2,
'klein':5.2,
'knocking':5.2,
'lightning':5.2,
'lil':5.2,
'linger':5.2,
'loads':5.2,
'lobby':5.2,
'marketing':5.2,
'maurice':5.2,
'mayor\'s':5.2,
'medieval':5.2,
'mejor':5.2,
'moreover':5.2,
'necessity':5.2,
'negotiating':5.2,
'objects':5.2,
'pattinson':5.2,
'peel':5.2,
'percentage':5.2,
'physicians':5.2,
'pitcher':5.2,
'poco':5.2,
'retiring':5.2,
'return':5.2,
'retweeting':5.2,
'rick':5.2,
'rochester':5.2,
'rodriguez':5.2,
'rosen':5.2,
'russians':5.2,
'rutgers':5.2,
'secondary':5.2,
'sections':5.2,
'shes':5.2,
'slang':5.2,
'snap':5.2,
'tape':5.2,
'tighter':5.2,
'tires':5.2,
'turn':5.2,
'turns':5.2,
'van':5.2,
'viento':5.2,
'vuelve':5.2,
'warner':5.2,
'williams':5.2,
'yi':5.2,
'lotta':5.19,
'amar':5.19,
'dogg':5.19,
'dominant':5.19,
'retained':5.19,
'searched':5.19,
'turnin':5.19,
'kickin':5.18,
'ph':5.18,
'squad':5.18,
'tasks':5.18,
'duro':5.18,
'advocate':5.18,
'ahora':5.18,
'allan':5.18,
'back':5.18,
'barney':5.18,
'barry':5.18,
'basement':5.18,
'blowing':5.18,
'boards':5.18,
'bones':5.18,
'brick':5.18,
'candidates':5.18,
'cape':5.18,
'cha':5.18,
'chancellor':5.18,
'chap':5.18,
'china\'s':5.18,
'claim':5.18,
'classification':5.18,
'closet':5.18,
'cnn':5.18,
'collar':5.18,
'context':5.18,
'crawling':5.18,
'deborah':5.18,
'defense':5.18,
'democrat':5.18,
'election':5.18,
'etc':5.18,
'existing':5.18,
'from':5.18,
'gate':5.18,
'governor\'s':5.18,
'hardcore':5.18,
'has':5.18,
'hasta':5.18,
'horn':5.18,
'imperial':5.18,
'is':5.18,
'jacob':5.18,
'joint':5.18,
'jonathan':5.18,
'judith':5.18,
'kita':5.18,
'knees':5.18,
'legal':5.18,
'leonard':5.18,
'leslie':5.18,
'letting':5.18,
'lloyd':5.18,
'longer':5.18,
'lynn':5.18,
'minister':5.18,
'mon':5.18,
'monitor':5.18,
'month':5.18,
'mt':5.18,
'muy':5.18,
'ninth':5.18,
'notion':5.18,
'o\'connor':5.18,
'ore':5.18,
'pac':5.18,
'penis':5.18,
'pete':5.18,
'phyllis':5.18,
'plug':5.18,
'pour':5.18,
'public':5.18,
'ra':5.18,
'render':5.18,
'reporters':5.18,
'retreat':5.18,
'returned':5.18,
'reuters':5.18,
'ritmo':5.18,
'roar':5.18,
'sera':5.18,
'shaw':5.18,
'simon':5.18,
'slick':5.18,
'sox':5.18,
'stepped':5.18,
'stuffed':5.18,
'take':5.18,
'urge':5.18,
'woh':5.18,
'yah':5.18,
'fuse':5.17,
'capitalism':5.16,
'doet':5.16,
'examine':5.16,
'laced':5.16,
'lado':5.16,
'spine':5.16,
'zeit':5.16,
'census':5.16,
'\#tinychat':5.16,
'14th':5.16,
'81st':5.16,
'about':5.16,
'after-tax':5.16,
'apartments':5.16,
'are':5.16,
'ballot':5.16,
'barometer':5.16,
'basic':5.16,
'basin':5.16,
'betty':5.16,
'chain':5.16,
'cooper':5.16,
'cuomo':5.16,
'cyrus':5.16,
'depot':5.16,
'diane':5.16,
'diddy':5.16,
'dios':5.16,
'dos':5.16,
'downstairs':5.16,
'ds':5.16,
'ed':5.16,
'effect':5.16,
'ellen':5.16,
'feb':5.16,
'floor':5.16,
'fuego':5.16,
'gordon':5.16,
'greg':5.16,
'hari':5.16,
'hype':5.16,
'lang':5.16,
'leon':5.16,
'locker':5.16,
'lt':5.16,
'mil':5.16,
'mira':5.16,
'months':5.16,
'murray':5.16,
'nfl':5.16,
'notice':5.16,
'occur':5.16,
'ones':5.16,
'permission':5.16,
'platform':5.16,
'pointing':5.16,
'population':5.16,
'prevent':5.16,
'prolonged':5.16,
'react':5.16,
'remaining':5.16,
'reporter':5.16,
'rosenberg':5.16,
'sabes':5.16,
'she\'ll':5.16,
'staten':5.16,
'station':5.16,
'stein':5.16,
'such':5.16,
'suga':5.16,
'sweep':5.16,
'tendency':5.16,
'tested':5.16,
'their':5.16,
'thermal':5.16,
'troops':5.16,
'turner':5.16,
'utah':5.16,
'verizon':5.16,
'viene':5.16,
'vou':5.16,
'wears':5.16,
'whereby':5.16,
'ions':5.15,
'ing':5.15,
'posterior':5.15,
'anterior':5.14,
'bearing':5.14,
'complexity':5.14,
'copyright':5.14,
'haffi':5.14,
'lui':5.14,
'melting':5.14,
'10th':5.14,
'02:00:00PM':5.14,
'a1':5.14,
'adjusted':5.14,
'ann':5.14,
'antonio':5.14,
'aw':5.14,
'baller':5.14,
'ben':5.14,
'besides':5.14,
'bruce':5.14,
'calle':5.14,
'calor':5.14,
'cohen':5.14,
'conduct':5.14,
'cosa':5.14,
'district':5.14,
'eddie':5.14,
'endlessly':5.14,
'englewood':5.14,
'estoy':5.14,
'factors':5.14,
'farther':5.14,
'firms':5.14,
'fyi':5.14,
'gail':5.14,
'garcia':5.14,
'gente':5.14,
'governor':5.14,
'greenberg':5.14,
'harrison':5.14,
'havin':5.14,
'henry':5.14,
'hmmm':5.14,
'hypnotized':5.14,
'israelis':5.14,
'it\'ll':5.14,
'keith':5.14,
'knw':5.14,
'larry':5.14,
'laying':5.14,
'lesbian':5.14,
'louis':5.14,
'lovato':5.14,
'mets':5.14,
'mitchell':5.14,
'mu':5.14,
'onto':5.14,
'operated':5.14,
'pad':5.14,
'pittsburgh':5.14,
'poi':5.14,
'pre':5.14,
'puerto':5.14,
'regardless':5.14,
'region':5.14,
'rendered':5.14,
'repeat':5.14,
'retired':5.14,
'roberta':5.14,
'roy':5.14,
'seemed':5.14,
'shake':5.14,
'silence':5.14,
'somehow':5.14,
'soooo':5.14,
'stem':5.14,
'still':5.14,
'subsidies':5.14,
'supposed':5.14,
'tak':5.14,
'thou':5.14,
'thus':5.14,
'toes':5.14,
'track':5.14,
'verte':5.14,
'volver':5.14,
'weil':5.14,
'wet':5.14,
'y\'all':5.14,
'yearning':5.14,
'jar':5.12,
'callin':5.12,
'hierarchy':5.12,
'latter':5.12,
'mirada':5.12,
'pum':5.12,
'territories':5.12,
'\#fb':5.12,
'1-bath':5.12,
'9th':5.12,
'#NAME?':5.12,
'anata':5.12,
'ankle':5.12,
'anyway':5.12,
'anyways':5.12,
'aww':5.12,
'backed':5.12,
'bare':5.12,
'bernard':5.12,
'boom':5.12,
'bulk':5.12,
'c\'mon':5.12,
'c-after':5.12,
'c/o':5.12,
'cell':5.12,
'collins':5.12,
'comer':5.12,
'committee':5.12,
'contained':5.12,
'cops':5.12,
'coro':5.12,
'creo':5.12,
'crush':5.12,
'debating':5.12,
'deja':5.12,
'del':5.12,
'digo':5.12,
'duke':5.12,
'eleanor':5.12,
'extreme':5.12,
'foster':5.12,
'here\'s':5.12,
'hillary':5.12,
'jah':5.12,
'jason':5.12,
'jerusalem':5.12,
'juga':5.12,
'jurisdiction':5.12,
'kalo':5.12,
'kansas':5.12,
'ken':5.12,
'meine':5.12,
'ncaa':5.12,
'nyt':5.12,
'office':5.12,
'pas':5.12,
'policies':5.12,
'rear':5.12,
'reported':5.12,
'reporting':5.12,
'retweet':5.12,
'rounds':5.12,
'sais':5.12,
'shadows':5.12,
'side':5.12,
'silent':5.12,
'single':5.12,
'sixth':5.12,
'soldier':5.12,
'stairs':5.12,
'tau':5.12,
'territory':5.12,
'testimony':5.12,
'tex':5.12,
'tumbling':5.12,
'ty':5.12,
'typically':5.12,
'viii':5.12,
'von':5.12,
'wander':5.12,
'while':5.12,
'willie':5.12,
'wire':5.12,
'xx':5.12,
'ye':5.12,
'torch':5.11,
'brotha':5.1,
'conmigo':5.1,
'edges':5.1,
'amino':5.1,
'pause':5.1,
'populations':5.1,
'sealed':5.1,
'ren':5.1,
'20th':5.1,
'4th':5.1,
'\@dealsplus':5.1,
'aaron':5.1,
'according':5.1,
'administrative':5.1,
'albert':5.1,
'alleen':5.1,
'allen':5.1,
'ave':5.1,
'average':5.1,
'bases':5.1,
'before':5.1,
'bellwether':5.1,
'betta':5.1,
'between':5.1,
'bryan':5.1,
'bus':5.1,
'butt':5.1,
'ca':5.1,
'careful':5.1,
'carlos':5.1,
'cells':5.1,
'ceo':5.1,
'circuit':5.1,
'cliff':5.1,
'commissioner':5.1,
'consumption':5.1,
'curtis':5.1,
'davis':5.1,
'dealt':5.1,
'differential':5.1,
'dr':5.1,
'either':5.1,
'et':5.1,
'extent':5.1,
'factor':5.1,
'ff':5.1,
'gary':5.1,
'goldstein':5.1,
'he\'s':5.1,
'hou':5.1,
'huntington':5.1,
'ian':5.1,
'investigate':5.1,
'jb':5.1,
'jon':5.1,
'koch':5.1,
'lists':5.1,
'managers':5.1,
'mans':5.1,
'marc':5.1,
'marks':5.1,
'mata':5.1,
'merger':5.1,
'mich':5.1,
'minneapolis':5.1,
'mother-in-law':5.1,
'nhl':5.1,
'nick':5.1,
'o\'brien':5.1,
'obey':5.1,
'omg':5.1,
'phat':5.1,
'pin':5.1,
'protestant':5.1,
'puts':5.1,
'quien':5.1,
'replacement':5.1,
'requests':5.1,
'rev':5.1,
'rogers':5.1,
'routine':5.1,
'sai':5.1,
'schwartz':5.1,
'smith':5.1,
'smokin':5.1,
'sobre':5.1,
'sont':5.1,
'stack':5.1,
'steelers':5.1,
'tablet':5.1,
'thats':5.1,
'there':5.1,
'these':5.1,
'toe':5.1,
'tooo':5.1,
'wayne':5.1,
'welfare':5.1,
'wolf':5.1,
'youre':5.1,
'youu':5.1,
'specimen':5.09,
'fait':5.08,
'hump':5.08,
'kg':5.08,
'trace':5.08,
'assuming':5.08,
'dmc':5.08,
'glue':5.08,
'neutral':5.08,
'provincial':5.08,
'questa':5.08,
'sempre':5.08,
'unto':5.08,
'whispered':5.08,
'\#ohjustlikeme':5.08,
'12th':5.08,
'admitted':5.08,
'after':5.08,
'agent':5.08,
'albany':5.08,
'alfred':5.08,
'amid':5.08,
'az':5.08,
'base':5.08,
'berger':5.08,
'booked':5.08,
'bronxville':5.08,
'budget':5.08,
'buss':5.08,
'c-included':5.08,
'canaan':5.08,
'ch':5.08,
'commissioners':5.08,
'copie':5.08,
'cord':5.08,
'countdown':5.08,
'department\'s':5.08,
'districts':5.08,
'doug':5.08,
'eric':5.08,
'eugene':5.08,
'factory':5.08,
'falta':5.08,
'february':5.08,
'fence':5.08,
'fui':5.08,
'gilbert':5.08,
'hart':5.08,
'hij':5.08,
'hun':5.08,
'indonesia':5.08,
'jo':5.08,
'john':5.08,
'juan':5.08,
'knee':5.08,
'laws':5.08,
'listed':5.08,
'manhasset':5.08,
'marion':5.08,
'martinez':5.08,
'medicaid':5.08,
'medicine':5.08,
'meyer':5.08,
'might':5.08,
'morgen':5.08,
'morris':5.08,
'nas':5.08,
'necessarily':5.08,
'norman':5.08,
'noted':5.08,
'occasionally':5.08,
'ohhh':5.08,
'ooo':5.08,
'para':5.08,
'pls':5.08,
'quiere':5.08,
'requirement':5.08,
'schemes':5.08,
'scott':5.08,
'seconds':5.08,
'sen':5.08,
'sets':5.08,
'settle':5.08,
'seventh':5.08,
'so':5.08,
'soledad':5.08,
'specimens':5.08,
'squeeze':5.08,
'steel':5.08,
'stevens':5.08,
'stewart':5.08,
'stick':5.08,
'suis':5.08,
'tag':5.08,
'tattoo':5.08,
'therefore':5.08,
'timothy':5.08,
'told':5.08,
'transit':5.08,
'underground':5.08,
'va':5.08,
'wanted':5.08,
'week':5.08,
'yr':5.08,
'z':5.08,
'tiempo':5.06,
'denn':5.06,
'km':5.06,
'komt':5.06,
'mientras':5.06,
'swallowed':5.06,
'todas':5.06,
'puede':5.06,
'17th':5.06,
'19th':5.06,
'atl':5.06,
'aus':5.06,
'banker':5.06,
'belt':5.06,
'bend':5.06,
'cali':5.06,
'changed':5.06,
'changes':5.06,
'chill':5.06,
'committees':5.06,
'convo':5.06,
'corporation':5.06,
'decision':5.06,
'diego':5.06,
'diffusion':5.06,
'eighth':5.06,
'federation':5.06,
'five-year':5.06,
'flatbush':5.06,
'follows':5.06,
'frederick':5.06,
'ganas':5.06,
'gb':5.06,
'grab':5.06,
'hughes':5.06,
'ihn':5.06,
'interview':5.06,
'interviews':5.06,
'jag':5.06,
'kenneth':5.06,
'kerry':5.06,
'kimi':5.06,
'lakers':5.06,
'las':5.06,
'm':5.06,
'marilyn':5.06,
'mj':5.06,
'monitoring':5.06,
'moscow':5.06,
'moved':5.06,
'mujer':5.06,
'nel':5.06,
'nyu':5.06,
'one-year':5.06,
'p':5.06,
'phase':5.06,
'poder':5.06,
'primitive':5.06,
'rattle':5.06,
'reign':5.06,
'restated':5.06,
'rod':5.06,
'ruth':5.06,
'screening':5.06,
'sherman':5.06,
'socks':5.06,
'sought':5.06,
'speculation':5.06,
'spokesman':5.06,
'stones':5.06,
'streak':5.06,
'swept':5.06,
'sympathies':5.06,
'td':5.06,
'this':5.06,
'thompson':5.06,
'thunder':5.06,
'tiene':5.06,
'tin':5.06,
'tryin':5.06,
'tx':5.06,
'voy':5.06,
'vuoi':5.06,
'weeks':5.06,
'who':5.06,
'whoever':5.06,
'wil':5.06,
'avec':5.05,
'consequently':5.04,
'dynamite':5.04,
'judgement':5.04,
'thereby':5.04,
'voz':5.04,
'wooden':5.04,
'conquer':5.04,
'loco':5.04,
'onset':5.04,
'\'the':5.04,
'7th':5.04,
'8th':5.04,
'ada':5.04,
'advertising':5.04,
'anders':5.04,
'aqui':5.04,
'aunque':5.04,
'b-included':5.04,
'bbm':5.04,
'been':5.04,
'biz':5.04,
'blair':5.04,
'blaze':5.04,
'bone':5.04,
'bosnian':5.04,
'break':5.04,
'bronx':5.04,
'cc':5.04,
'charged':5.04,
'cole':5.04,
'complex':5.04,
'dee':5.04,
'doc':5.04,
'edith':5.04,
'esta':5.04,
'fla':5.04,
'fleet':5.04,
'fred':5.04,
'fue':5.04,
'harlem':5.04,
'hav':5.04,
'herz':5.04,
'hmm':5.04,
'hombre':5.04,
'hoy':5.04,
'hrs':5.04,
'hut':5.04,
'into':5.04,
'j':5.04,
'llegar':5.04,
'mai':5.04,
'margin':5.04,
'measures':5.04,
'mei':5.04,
'mile':5.04,
'milton':5.04,
'mm':5.04,
'myers':5.04,
'nun':5.04,
'occupied':5.04,
'officially':5.04,
'other':5.04,
'ova':5.04,
'patient':5.04,
'presbyterian':5.04,
'ps':5.04,
'put':5.04,
'replace':5.04,
'robertson':5.04,
'rochelle':5.04,
'rss':5.04,
's':5.04,
'searching':5.04,
'sha':5.04,
'sides':5.04,
'sittin':5.04,
'size':5.04,
'somos':5.04,
'spend':5.04,
'standin':5.04,
'stare':5.04,
'statistics':5.04,
'stone':5.04,
'sub':5.04,
'takes':5.04,
'tanto':5.04,
'that\'ll':5.04,
'theyre':5.04,
'tweetdeck':5.04,
'undercover':5.04,
'ves':5.04,
'vos':5.04,
'w/':5.04,
'whilst':5.04,
'wipe':5.04,
'corners':5.02,
'luz':5.02,
'nena':5.02,
'adesso':5.02,
'alle':5.02,
'betcha':5.02,
'curtain':5.02,
'getcha':5.02,
'mash':5.02,
'preach':5.02,
'puedo':5.02,
'strings':5.02,
'tubes':5.02,
'veo':5.02,
'\#quote':5.02,
'6th':5.02,
'\@theellenshow':5.02,
'administrator':5.02,
'analysts':5.02,
'anyone':5.02,
'apologize':5.02,
'blacks':5.02,
'blvd':5.02,
'bu':5.02,
'burke':5.02,
'buses':5.02,
'c-net':5.02,
'carl':5.02,
'case':5.02,
'coleman':5.02,
'competing':5.02,
'controls':5.02,
'conventional':5.02,
'cuando':5.02,
'diagnostic':5.02,
'disclosure':5.02,
'documents':5.02,
'doy':5.02,
'draft':5.02,
'esse':5.02,
'estou':5.02,
'final':5.02,
'flat':5.02,
'flip':5.02,
'foot':5.02,
'gettin':5.02,
'gotta':5.02,
'happened':5.02,
'heeft':5.02,
'hot':5.02,
'ii':5.02,
'im':5.02,
'implied':5.02,
'industrial':5.02,
'israel\'s':5.02,
'it':5.02,
'ive':5.02,
'jerome':5.02,
'kaplan':5.02,
'kent':5.02,
'levine':5.02,
'lik':5.02,
'manager':5.02,
'marcia':5.02,
'mayer':5.02,
'meer':5.02,
'mi':5.02,
'mismo':5.02,
'nacht':5.02,
'necesito':5.02,
'necessary':5.02,
'newark':5.02,
'noch':5.02,
'ordinary':5.02,
'os':5.02,
'parameters':5.02,
'parking':5.02,
'pentagon':5.02,
'phantom':5.02,
'porque':5.02,
'pr':5.02,
'procedures':5.02,
'quarterly':5.02,
'random':5.02,
'rc':5.02,
'requiring':5.02,
'richardson':5.02,
'roth':5.02,
'sama':5.02,
'san':5.02,
'sc':5.02,
'schedule':5.02,
'setting':5.02,
'sleeve':5.02,
'slice':5.02,
'solitude':5.02,
'some':5.02,
'sou':5.02,
'stake':5.02,
'stamford':5.02,
'switch':5.02,
'teh':5.02,
'themselves':5.02,
'todd':5.02,
'tu':5.02,
'twittering':5.02,
'uni':5.02,
'veil':5.02,
'vous':5.02,
'vp':5.02,
'wana':5.02,
'westport':5.02,
'where':5.02,
'you\'ve':5.02,
'binding':5.01,
'\'cause':5,
'agents':5,
'alguien':5,
'assess':5,
'b-net':5,
'because':5,
'becker':5,
'boot':5,
'cada':5,
'carbon':5,
'coeur':5,
'commands':5,
'cosas':5,
'das':5,
'dated':5,
'diggin':5,
'executives':5,
'flipmode':5,
'forex':5,
'fourth':5,
'gosh':5,
'governing':5,
'herbert':5,
'hoo':5,
'hora':5,
'hush':5,
'id':5,
'indicated':5,
'jus':5,
'k':5,
'katz':5,
'kaufman':5,
'ku':5,
'la':5,
'listings':5,
'liver':5,
'luther':5,
'marjorie':5,
'marvin':5,
'mee':5,
'membrane':5,
'mir':5,
'neil':5,
'o\'neill':5,
'odds':5,
'offices':5,
'otra':5,
'par':5,
'paying':5,
'peculiar':5,
'pensar':5,
'per':5,
'plain':5,
'price':5,
'priced':5,
'pursued':5,
'quero':5,
'questions':5,
'reports':5,
'ridgewood':5,
'ron':5,
'ronald':5,
'sentir':5,
'shaggy':5,
'situation':5,
'some1':5,
'something':5,
'standard':5,
'stir':5,
'su':5,
'supervisor':5,
'thereof':5,
'throat':5,
'throw':5,
'til':5,
'todo':5,
'tp':5,
'tra':5,
'trop':5,
'tweeters':5,
'using':5,
'vid':5,
'voglio':5,
'wa':5,
'waan':5,
'warren':5,
'weighted':5,
'where\'s':5,
'whereas':5,
'who\'s':5,
'wig':5,
'zu':5,
'zum':5,
'stretched':4.99,
'forty':4.99,
'16th':4.98,
'57th':4.98,
'5th':4.98,
'\@addthis':4.98,
'\@idothat2':4.98,
'ai':4.98,
'bei':4.98,
'billy':4.98,
'bisa':4.98,
'btw':4.98,
'by':4.98,
'cloudy':4.98,
'compared':4.98,
'corp':4.98,
'cuba':4.98,
'd8':4.98,
'dartmouth':4.98,
'dei':4.98,
'denk':4.98,
'don':4.98,
'edge':4.98,
'edwards':4.98,
'een':4.98,
'ein':4.98,
'eine':4.98,
'episcopal':4.98,
'este':4.98,
'exec':4.98,
'hace':4.98,
'hits':4.98,
'hoes':4.98,
'howard':4.98,
'io':4.98,
'jadi':4.98,
'jeder':4.98,
'judicial':4.98,
'knot':4.98,
'line':4.98,
'mb':4.98,
'meu':4.98,
'mij':4.98,
'nails':4.98,
'needs':4.98,
'novo':4.98,
'nw':4.98,
'officers':4.98,
'ogni':4.98,
'ons':4.98,
'or':4.98,
'parliament':4.98,
'part':4.98,
'paso':4.98,
'piel':4.98,
'pork':4.98,
'pound':4.98,
'pres':4.98,
'question':4.98,
'rappers':4.98,
'rather':4.98,
'requirements':4.98,
'roundup':4.98,
'scarsdale':4.98,
'schneider':4.98,
'som':4.98,
'somethin':4.98,
'soooooo':4.98,
'stared':4.98,
'sumthin':4.98,
'syracuse':4.98,
'the':4.98,
'they\'d':4.98,
'they\'ve':4.98,
'three-year':4.98,
'throws':4.98,
'to':4.98,
'tudo':4.98,
'tuesday':4.98,
'wall':4.98,
'walsh':4.98,
'why':4.98,
'yesterday':4.98,
'clause':4.98,
'clit':4.98,
'hence':4.98,
'ml':4.98,
'babylon':4.98,
'pp':4.98,
'shi':4.97,
'\#tweetmyjobs':4.96,
'11th':4.96,
'3rd':4.96,
'accounts':4.96,
'aight':4.96,
'aku':4.96,
'alan':4.96,
'algo':4.96,
'ama':4.96,
'anybody':4.96,
'assumed':4.96,
'baru':4.96,
'bem':4.96,
'bin':4.96,
'borders':4.96,
'cbs':4.96,
'cf':4.96,
'cleveland':4.96,
'coal':4.96,
'colonel':4.96,
'comme':4.96,
'company\'s':4.96,
'dan':4.96,
'def':4.96,
'dried':4.96,
'drops':4.96,
'dug':4.96,
'eq':4.96,
'esto':4.96,
'fe':4.96,
'fone':4.96,
'frm':4.96,
'haar':4.96,
'hacer':4.96,
'hail':4.96,
'iii':4.96,
'incidence':4.96,
'investigators':4.96,
'ist':4.96,
'its':4.96,
'kann':4.96,
'keer':4.96,
'ko':4.96,
'larchmont':4.96,
'med':4.96,
'memorial':4.96,
'miley':4.96,
'montclair':4.96,
'napoleon':4.96,
'nuff':4.96,
'nxt':4.96,
'o':4.96,
'op-ed':4.96,
'ordered':4.96,
'outro':4.96,
'pelo':4.96,
'perhaps':4.96,
'pero':4.96,
'raton':4.96,
'ri':4.96,
'rita':4.96,
'schon':4.96,
'sein':4.96,
'semana':4.96,
'tengo':4.96,
'thick':4.96,
'tyson':4.96,
'ufc':4.96,
'ur':4.96,
'vi':4.96,
'when':4.96,
'wis':4.96,
'yall':4.96,
'yorker':4.96,
'anche':4.96,
'jour':4.96,
'mou':4.96,
'regiment':4.96,
'socialism':4.96,
'staan':4.96,
'temps':4.96,
'veces':4.96,
'\'s':4.94,
'[a1]':4.94,
'aber':4.94,
'acabo':4.94,
'across':4.94,
'agenda':4.94,
'aka':4.94,
'alibi':4.94,
'av':4.94,
'bam':4.94,
'banging':4.94,
'bein':4.94,
'bennett':4.94,
'boca':4.94,
'campbell':4.94,
'chase':4.94,
'close':4.94,
'co':4.94,
'contrast':4.94,
'council':4.94,
'cuerpo':4.94,
'debate':4.94,
'dinkins':4.94,
'dip':4.94,
'dm':4.94,
'ele':4.94,
'fazer':4.94,
'federal':4.94,
'foi':4.94,
'ftw':4.94,
'g':4.94,
'geez':4.94,
'gen':4.94,
'gw':4.94,
'he\'d':4.94,
'hooked':4.94,
'hs':4.94,
'inter':4.94,
'ix':4.94,
'iya':4.94,
'jaja':4.94,
'jou':4.94,
'makin':4.94,
'menos':4.94,
'mesmo':4.94,
'mins':4.94,
'mo':4.94,
'msg':4.94,
'naughty':4.94,
'needing':4.94,
'nie':4.94,
'nih':4.94,
'noi':4.94,
'noting':4.94,
'nou':4.94,
'of':4.94,
'ohne':4.94,
'once':4.94,
'popped':4.94,
'procedure':4.94,
'quel':4.94,
'rap':4.94,
'razor':4.94,
'reportedly':4.94,
'restructuring':4.94,
'row':4.94,
'rubin':4.94,
'sayin':4.94,
'sixty':4.94,
'stood':4.94,
'stormy':4.94,
'tackle':4.94,
'takin':4.94,
'temperature':4.94,
'term':4.94,
'termed':4.94,
'tes':4.94,
'testified':4.94,
'that':4.94,
'those':4.94,
'ti':4.94,
'tuo':4.94,
'una':4.94,
'until':4.94,
'vez':4.94,
'which':4.94,
'whom':4.94,
'antes':4.94,
'bajo':4.94,
'dmx':4.94,
'dripping':4.94,
'han':4.94,
'homeboy':4.94,
'inna':4.94,
'kon':4.94,
'questo':4.94,
'swell':4.94,
'xi':4.94,
'youll':4.94,
'doo':4.94,
'forma':4.94,
'marginal':4.94,
'nate':4.94,
'ojos':4.94,
'vie':4.94,
'zie':4.94,
'fold':4.94,
'ad':4.92,
'affect':4.92,
'agencies':4.92,
'ainda':4.92,
'alla':4.92,
'ar':4.92,
'armies':4.92,
'atleast':4.92,
'au':4.92,
'b6':4.92,
'bishops':4.92,
'bo':4.92,
'c\'est':4.92,
'cm':4.92,
'common':4.92,
'contigo':4.92,
'crave':4.92,
'da':4.92,
'decir':4.92,
'disclosed':4.92,
'dole':4.92,
'dom':4.92,
'economists':4.92,
'filing':4.92,
'fl':4.92,
'fr':4.92,
'gap':4.92,
'gerald':4.92,
'gorbachev':4.92,
'hast':4.92,
'homie':4.92,
'illinois':4.92,
'instead':4.92,
'interim':4.92,
'itu':4.92,
'judge':4.92,
'lebron':4.92,
'marked':4.92,
'mes':4.92,
'nato':4.92,
'ni':4.92,
'nye':4.92,
'only':4.92,
'pt':4.92,
'pushin':4.92,
'reais':4.92,
'representative':4.92,
'reviewer':4.92,
'ruled':4.92,
'sabe':4.92,
'shadow':4.92,
'strap':4.92,
'strip':4.92,
'sua':4.92,
'suppose':4.92,
'task':4.92,
'tenho':4.92,
'them':4.92,
'thomas':4.92,
'tix':4.92,
'todos':4.92,
'trans':4.92,
'twitpic':4.92,
'une':4.92,
'var':4.92,
'wha':4.92,
'whenever':4.92,
'whether':4.92,
'wordt':4.92,
'x':4.92,
'bist':4.92,
'dans':4.92,
'discourse':4.92,
'elke':4.92,
'ey':4.92,
'kau':4.92,
'peasant':4.92,
'pretending':4.92,
'puttin':4.92,
'siento':4.92,
'sola':4.92,
'spinal':4.92,
've':4.92,
'bizarre':4.92,
'weet':4.92,
'moi':4.91,
'\#in2010':4.9,
'#NAME?':4.9,
'al':4.9,
'andy':4.9,
'at':4.9,
'bis':4.9,
'bloomberg':4.9,
'border':4.9,
'brb':4.9,
'campaigns':4.9,
'charge':4.9,
'chu':4.9,
'dig':4.9,
'dukakis':4.9,
'edwin':4.9,
'ela':4.9,
'eres':4.9,
'esa':4.9,
'finance':4.9,
'fog':4.9,
'gt':4.9,
'heute':4.9,
'hpa':4.9,
'ie':4.9,
'jonas':4.9,
'kinda':4.9,
'koto':4.9,
'kt':4.9,
'law':4.9,
'levin':4.9,
'lu':4.9,
'maar':4.9,
'mack':4.9,
'melt':4.9,
'merrill':4.9,
'nee':4.9,
'nh':4.9,
'obliged':4.9,
'ook':4.9,
'pointed':4.9,
'pra':4.9,
'rental':4.9,
'sector':4.9,
'sleepy':4.9,
'sometime':4.9,
'soo':4.9,
'sticks':4.9,
'subsidiary':4.9,
'te':4.9,
'testing':4.9,
'tiny':4.9,
'trey':4.9,
'uma':4.9,
'ven':4.9,
'wer':4.9,
'xm':4.9,
'yuh':4.9,
'yup':4.9,
'zo':4.9,
'deine':4.9,
'dre':4.9,
'fi':4.9,
'kommt':4.9,
'macht':4.9,
'mig':4.9,
'sono':4.9,
'static':4.9,
'toi':4.9,
'vii':4.9,
'broads':4.9,
'moe':4.9,
'liefde':4.89,
'aiyyo':4.89,
'2nd':4.88,
'\@tommcfly':4.88,
'age':4.88,
'ago':4.88,
'allein':4.88,
'b4':4.88,
'billboard':4.88,
'black':4.88,
'bt':4.88,
'causes':4.88,
'chuck':4.88,
'cited':4.88,
'dass':4.88,
'dejes':4.88,
'dentro':4.88,
'der':4.88,
'digg':4.88,
'drifting':4.88,
'du':4.88,
'elderly':4.88,
'frost':4.88,
'guard':4.88,
'herman':4.88,
'het':4.88,
'ir':4.88,
'issued':4.88,
'it\'s':4.88,
'judges':4.88,
'junto':4.88,
'lectures':4.88,
'lieu':4.88,
'mais':4.88,
'memo':4.88,
'mg':4.88,
'mis':4.88,
'moody\'s':4.88,
'nevertheless':4.88,
'oil':4.88,
'operator':4.88,
'previous':4.88,
'prior':4.88,
're':4.88,
'regulators':4.88,
'remarks':4.88,
'rt':4.88,
'scale':4.88,
'se':4.88,
'sei':4.88,
'sgt':4.88,
'sie':4.88,
'siegel':4.88,
'sp':4.88,
'st':4.88,
'thang':4.88,
'toilet':4.88,
'tryna':4.88,
'ummm':4.88,
'veel':4.88,
'viel':4.88,
'went':4.88,
'whose':4.88,
'eg':4.88,
'igual':4.88,
'qui':4.88,
'substitute':4.88,
'nous':4.88,
'senza':4.88,
'\#random':4.86,
'\@donniewahlberg':4.86,
'\@ladygaga':4.86,
'accounting':4.86,
'ap':4.86,
'arnold':4.86,
'b-after':4.86,
'bb':4.86,
'bk':4.86,
'bush':4.86,
'bustin':4.86,
'cia':4.86,
'circumstances':4.86,
'cont':4.86,
'cud':4.86,
'diff':4.86,
'divisions':4.86,
'dus':4.86,
'echt':4.86,
'elsewhere':4.86,
'ft':4.86,
'gonna':4.86,
'haben':4.86,
'hath':4.86,
'hong':4.86,
'how\'s':4.86,
'hr':4.86,
'ira':4.86,
'ish':4.86,
'ja':4.86,
'jst':4.86,
'knock':4.86,
'le':4.86,
'mah':4.86,
'mask':4.86,
'mehr':4.86,
'mijn':4.86,
'missy':4.86,
'nadie':4.86,
'nonetheless':4.86,
'nu':4.86,
'og':4.86,
'oi':4.86,
'oo':4.86,
'ou':4.86,
'rd':4.86,
'recuerdo':4.86,
'ridin':4.86,
'sensitive':4.86,
'seo':4.86,
'shapiro':4.86,
'sm':4.86,
'smoked':4.86,
'sooooo':4.86,
'sr':4.86,
'staring':4.86,
'tellin':4.86,
'tempted':4.86,
'tract':4.86,
'voor':4.86,
'vor':4.86,
'vt':4.86,
'w':4.86,
'were':4.86,
'wie':4.86,
'ze':4.86,
'toch':4.86,
'askin':4.86,
'cinta':4.86,
'eminem':4.86,
'geld':4.86,
'ibid':4.86,
'isn':4.86,
'kane':4.86,
'labour':4.86,
'pienso':4.86,
'soc':4.86,
'miedo':4.85,
'tienes':4.85,
'explicit':4.85,
'\@taylorswift13':4.84,
'abt':4.84,
'administration':4.84,
'amo':4.84,
'an':4.84,
'awhile':4.84,
'b':4.84,
'b7':4.84,
'capitalist':4.84,
'crawl':4.84,
'd1':4.84,
'dam':4.84,
'dats':4.84,
'decades':4.84,
'dem':4.84,
'desde':4.84,
'di':4.84,
'en':4.84,
'est':4.84,
'filed':4.84,
'friedman':4.84,
'hab':4.84,
'harris':4.84,
'hm':4.84,
'hows':4.84,
'ht':4.84,
'investigating':4.84,
'invisible':4.84,
'jd':4.84,
'ka':4.84,
'ke':4.84,
'keine':4.84,
'lo':4.84,
'maintenance':4.84,
'mar':4.84,
'mining':4.84,
'mn':4.84,
'nao':4.84,
'need':4.84,
'nt':4.84,
'o_o':4.84,
'oh':4.84,
'proc':4.84,
'rates':4.84,
'reagan':4.84,
'sanders':4.84,
'secret':4.84,
'setup':4.84,
'sharp':4.84,
'sih':4.84,
'sta':4.84,
't':4.84,
'temp':4.84,
'tooth':4.84,
'vc':4.84,
'vernon':4.84,
'ward':4.84,
'duele':4.84,
'horns':4.84,
'inevitably':4.84,
'jeg':4.84,
'kneel':4.84,
'partial':4.84,
'puedes':4.84,
'throwin':4.84,
'zeg':4.84,
'geen':4.83,
'louder':4.83,
'tutto':4.83,
'tout':4.83,
'temptation':4.83,
'\#omgfacts':4.82,
'\@stephenfry':4.82,
'\@tweetmeme':4.82,
'acho':4.82,
'addenda':4.82,
'administration\'s':4.82,
'altered':4.82,
'ang':4.82,
'att':4.82,
'ayer':4.82,
'b/c':4.82,
'b2':4.82,
'beard':4.82,
'cane':4.82,
'cases':4.82,
'causing':4.82,
'che':4.82,
'col':4.82,
'cum':4.82,
'de':4.82,
'deh':4.82,
'demi':4.82,
'doch':4.82,
'duties':4.82,
'eso':4.82,
'examination':4.82,
'exposure':4.82,
'finna':4.82,
'flipped':4.82,
'gm':4.82,
'hes':4.82,
'hid':4.82,
'hoje':4.82,
'hood':4.82,
'impact':4.82,
'israeli':4.82,
'lagi':4.82,
'll':4.82,
'mideast':4.82,
'municipal':4.82,
'must':4.82,
'n':4.82,
'ne':4.82,
'ng':4.82,
'ot':4.82,
'over':4.82,
'pst':4.82,
'quando':4.82,
'ralph':4.82,
'repeated':4.82,
'rushing':4.82,
'sellin':4.82,
'sich':4.82,
'smell':4.82,
'ticking':4.82,
'tt':4.82,
'udah':4.82,
'vegan':4.82,
'wah':4.82,
'warum':4.82,
'witness':4.82,
'wut':4.82,
'assumptions':4.82,
'dawg':4.82,
'dro':4.82,
'gaan':4.82,
'nerve':4.82,
'scheme':4.82,
'sus':4.82,
'vas':4.82,
'vein':4.82,
'werden':4.82,
'otro':4.81,
'toda':4.81,
'detection':4.81,
'\@jonathanrknight':4.8,
'advisory':4.8,
'ak':4.8,
'ao':4.8,
'apa':4.8,
'asap':4.8,
'bankers':4.8,
'bij':4.8,
'bosnia':4.8,
'c1':4.8,
'cock':4.8,
'det':4.8,
'dey':4.8,
'didn':4.8,
'eds':4.8,
'el':4.8,
'es':4.8,
'eu':4.8,
'fa':4.8,
'giuliani':4.8,
'h':4.8,
'it\'d':4.8,
'johns':4.8,
'judy':4.8,
'kan':4.8,
'lautner':4.8,
'lejos':4.8,
'ltd':4.8,
'lugar':4.8,
'meetings':4.8,
'mein':4.8,
'mental':4.8,
'naar':4.8,
'nai':4.8,
'nd':4.8,
'nerd':4.8,
'nom':4.8,
'olvidar':4.8,
'one-time':4.8,
'orthodox':4.8,
'pataki':4.8,
'pe':4.8,
'proceedings':4.8,
'pussy':4.8,
'rehabilitation':4.8,
'rep':4.8,
'sachs':4.8,
'slightly':4.8,
'superintendent':4.8,
'sur':4.8,
'versus':4.8,
'wats':4.8,
'wen':4.8,
'what':4.8,
'what\'s':4.8,
'whos':4.8,
'widespread':4.8,
'yrs':4.8,
'zonder':4.8,
'petition':4.8,
'gimmie':4.8,
'jamais':4.8,
'laat':4.8,
'manos':4.8,
'niets':4.8,
'passive':4.8,
'tous':4.8,
'mase':4.79,
'wij':4.79,
'\#p2':4.78,
'1/2-bath':4.78,
'aja':4.78,
'asi':4.78,
'at;t':4.78,
'b1':4.78,
'bc':4.78,
'belly':4.78,
'blizzard':4.78,
'ce':4.78,
'conditions':4.78,
'confess':4.78,
'dann':4.78,
'des':4.78,
'dha':4.78,
'difference':4.78,
'em':4.78,
'ep':4.78,
'essa':4.78,
'exit':4.78,
'fate':4.78,
'fo':4.78,
'funk':4.78,
'gat':4.78,
'gimme':4.78,
'gli':4.78,
'israel':4.78,
'je':4.78,
'juss':4.78,
'kein':4.78,
'llorar':4.78,
'meds':4.78,
'military':4.78,
'mud':4.78,
'nasdaq':4.78,
'nos':4.78,
'nur':4.78,
'ohh':4.78,
'pena':4.78,
'r':4.78,
'reserved':4.78,
'reversed':4.78,
'sigo':4.78,
'sooo':4.78,
'stakes':4.78,
'suddenly':4.78,
'though':4.78,
'throwing':4.78,
'tht':4.78,
'ver':4.78,
'wallace':4.78,
'wel':4.78,
'wieder':4.78,
'witnesses':4.78,
'wud':4.78,
'tijd':4.78,
'unseen':4.78,
'\@aplusk':4.76,
'\@ddlovato':4.76,
'ahl':4.76,
'aside':4.76,
'bak':4.76,
'board':4.76,
'buat':4.76,
'chilling':4.76,
'cnt':4.76,
'coz':4.76,
'dat':4.76,
'departure':4.76,
'dolor':4.76,
'economics':4.76,
'else':4.76,
'ese':4.76,
'essay':4.76,
'gas':4.76,
'gd':4.76,
'governors':4.76,
'kobe':4.76,
'lately':4.76,
'les':4.76,
'management':4.76,
'min':4.76,
'officials':4.76,
'ought':4.76,
'oughta':4.76,
'pieces':4.76,
'pig':4.76,
'por':4.76,
'pulled':4.76,
'quand':4.76,
're-election':4.76,
'repair':4.76,
'report':4.76,
'sa':4.76,
'sans':4.76,
'sho':4.76,
'sinai':4.76,
'somewhat':4.76,
'spent':4.76,
'ta':4.76,
'targets':4.76,
'telerate':4.76,
'tem':4.76,
'th':4.76,
'tha':4.76,
'took':4.76,
'trippin':4.76,
'tuh':4.76,
'tupac':4.76,
'weer':4.76,
'weiss':4.76,
'wenn':4.76,
'whats':4.76,
'wore':4.76,
'would\'ve':4.76,
'woulda':4.76,
'xd':4.76,
'af':4.76,
'coulda':4.76,
'drift':4.76,
'goed':4.76,
'ihr':4.76,
'niente':4.76,
'tek':4.76,
'sword':4.75,
'&c':4.74,
'\'em':4.74,
'\@adamlambert':4.74,
'admit':4.74,
'alley':4.74,
'authority':4.74,
'b-includes':4.74,
'colo':4.74,
'corner':4.74,
'dag':4.74,
'dah':4.74,
'dealers':4.74,
'depending':4.74,
'dow':4.74,
'faz':4.74,
'fml':4.74,
'gona':4.74,
'had':4.74,
'heavily':4.74,
'hook':4.74,
'imma':4.74,
'judgment':4.74,
'licht':4.74,
'load':4.74,
'long':4.74,
'mines':4.74,
'minha':4.74,
'muito':4.74,
'myspace':4.74,
'older':4.74,
'operate':4.74,
'otherwise':4.74,
'policy':4.74,
'pull':4.74,
'quem':4.74,
'res':4.74,
'resist':4.74,
'saber':4.74,
'smaller':4.74,
'smh':4.74,
'than':4.74,
'trials':4.74,
'yu':4.74,
'zijn':4.74,
'ci':4.73,
'cling':4.73,
'niemand':4.73,
'possessed':4.73,
'refrain':4.73,
'thangs':4.73,
'weg':4.73,
'bwoy':4.73,
'\#tcot':4.72,
'\'i':4.72,
'43d':4.72,
'\@johncmayer':4.72,
'a3':4.72,
'alien':4.72,
'assume':4.72,
'bent':4.72,
'bestie':4.72,
'citing':4.72,
'claims':4.72,
'condition':4.72,
'ct':4.72,
'd':4.72,
'dealer':4.72,
'depends':4.72,
'e':4.72,
'estas':4.72,
'fits':4.72,
'government\'s':4.72,
'guessing':4.72,
'huh':4.72,
'loca':4.72,
'medical':4.72,
'meh':4.72,
'melhor':4.72,
'offset':4.72,
'period':4.72,
'pulmonary':4.72,
'redman':4.72,
'repeatedly':4.72,
'ses':4.72,
'sum1':4.72,
'surrounded':4.72,
'tho':4.72,
'umm':4.72,
'underneath':4.72,
'vai':4.72,
'wake':4.72,
'wird':4.72,
'wk':4.72,
'wo':4.72,
'doesn':4.71,
'ei':4.71,
'induced':4.71,
'interference':4.71,
'komm':4.71,
'obligations':4.71,
'perder':4.71,
'pues':4.71,
'tus':4.71,
'voel':4.71,
'boundaries':4.7,
'affairs':4.7,
'almost':4.7,
'boi':4.7,
'c':4.7,
'chasing':4.7,
'corporate':4.7,
'corps':4.7,
'cos':4.7,
'crossed':4.7,
'duty':4.7,
'except':4.7,
'excessive':4.7,
'geef':4.7,
'gue':4.7,
'hella':4.7,
'hound':4.7,
'however':4.7,
'inc':4.7,
'isso':4.7,
'kno':4.7,
'lawmakers':4.7,
'legislative':4.7,
'legislature':4.7,
'loc':4.7,
'los':4.7,
'mau':4.7,
'maybe':4.7,
'mere':4.7,
'nail':4.7,
'neva':4.7,
'nichts':4.7,
'nuh':4.7,
'nya':4.7,
'payment':4.7,
'pullin':4.7,
'rocky':4.7,
'sd':4.7,
'senate':4.7,
'ser':4.7,
'serbian':4.7,
'seriously':4.7,
'slight':4.7,
'striking':4.7,
'tweeps':4.7,
'wrk':4.7,
'wth':4.7,
'yet':4.7,
'frontin':4.69,
'iets':4.69,
'sind':4.69,
'weh':4.69,
'shakes':4.69,
'uns':4.69,
'zou':4.69,
'pasar':4.68,
'ab':4.68,
'ba':4.68,
'bodies':4.68,
'borrowed':4.68,
'clinical':4.68,
'cross':4.68,
'curb':4.68,
'cuz':4.68,
'deputy':4.68,
'doen':4.68,
'dun':4.68,
'einen':4.68,
'gibt':4.68,
'har':4.68,
'how':4.68,
'inevitable':4.68,
'institutions':4.68,
'islam':4.68,
'knives':4.68,
'kono':4.68,
'nem':4.68,
'oldest':4.68,
'op':4.68,
'overcast':4.68,
'patient\'s':4.68,
'pq':4.68,
'qtr':4.68,
'rapper':4.68,
'requires':4.68,
'ruling':4.68,
'shady':4.68,
'sodium':4.68,
'spots':4.68,
'threw':4.68,
'uu':4.68,
'wereld':4.68,
'wht':4.68,
'wir':4.68,
'bg':4.67,
'bump':4.67,
'dein':4.67,
'dependence':4.67,
'flesh':4.67,
'hustle':4.67,
'immer':4.67,
'nooit':4.67,
'dicen':4.67,
'tumble':4.67,
'13th':4.66,
'agora':4.66,
'borrow':4.66,
'drip':4.66,
'forms':4.66,
'freakin':4.66,
'ga':4.66,
'hole':4.66,
'if':4.66,
'inquiry':4.66,
'islamic':4.66,
'iz':4.66,
'minor':4.66,
'nach':4.66,
'nuttin':4.66,
'odd':4.66,
'pile':4.66,
'punk':4.66,
'quisiera':4.66,
'ruff':4.66,
'seu':4.66,
'shorty':4.66,
'strung':4.66,
'ter':4.66,
'theres':4.66,
'tua':4.66,
'um':4.66,
'v':4.66,
'wanting':4.66,
'yeltsin':4.66,
'yur':4.66,
'indirect':4.65,
'rappin':4.65,
'raps':4.65,
'stripped':4.65,
'tire':4.65,
'undone':4.65,
'wolves':4.65,
'mek':4.65,
'\#ff':4.64,
'b3':4.64,
'crazy':4.64,
'dry':4.64,
'euch':4.64,
'f':4.64,
'freak':4.64,
'freaky':4.64,
'fucking':4.64,
'fuera':4.64,
'ganz':4.64,
'government':4.64,
'heb':4.64,
'hv':4.64,
'ini':4.64,
'kijk':4.64,
'lbs':4.64,
'left':4.64,
'lust':4.64,
'muslim':4.64,
'nj':4.64,
'po':4.64,
'pretend':4.64,
'que':4.64,
'slit':4.64,
'soviet':4.64,
'un':4.64,
'yer':4.64,
'compton':4.63,
'fragments':4.63,
'geht':4.63,
'stares':4.63,
'stiff':4.63,
'wasn':4.63,
'zij':4.63,
'\#mm':4.62,
'**municipal':4.62,
'\@joejonas':4.62,
'\@nickjonas':4.62,
'altijd':4.62,
'bull':4.62,
'bureau':4.62,
'dick':4.62,
'diet':4.62,
'gasoline':4.62,
'gov':4.62,
'governments':4.62,
'gray':4.62,
'holes':4.62,
'holler':4.62,
'lease':4.62,
'lebanon':4.62,
'noone':4.62,
'ol\'':4.62,
'out':4.62,
'palestinian':4.62,
'past':4.62,
'peasants':4.62,
'pigs':4.62,
'pressed':4.62,
'serbs':4.62,
'short-term':4.62,
'wid':4.62,
'year-ago':4.62,
'atomic':4.61,
'daze':4.61,
'feds':4.61,
'ib':4.61,
'jij':4.61,
'inspection':4.61,
'dit':4.6,
'76th':4.6,
'bind':4.6,
'bound':4.6,
'bruh':4.6,
'commercial':4.6,
'dar':4.6,
'differences':4.6,
'fiscal':4.6,
'flames':4.6,
'half':4.6,
'ik':4.6,
'jg':4.6,
'later':4.6,
'lemme':4.6,
'li':4.6,
'little':4.6,
'mal':4.6,
'nme':4.6,
'nunca':4.6,
'obligation':4.6,
'pretax':4.6,
'q':4.6,
'recall':4.6,
'sack':4.6,
'shawty':4.6,
'sticky':4.6,
'tight':4.6,
'trigger':4.6,
'under':4.6,
'used':4.6,
'vs':4.6,
'was':4.6,
'bark':4.59,
'disguise':4.59,
'gots':4.59,
'och':4.59,
'seldom':4.59,
'dir':4.58,
'tarde':4.58,
'\@revrunwisdom':4.58,
'aan':4.58,
'als':4.58,
'although':4.58,
'bgt':4.58,
'busy':4.58,
'c-includes':4.58,
'cit':4.58,
'congressional':4.58,
'differ':4.58,
'emo':4.58,
'excuse':4.58,
'fighter':4.58,
'gtgt':4.58,
'gunna':4.58,
'hazy':4.58,
'hit':4.58,
'insisted':4.58,
'inspector':4.58,
'institution':4.58,
'jigga':4.58,
'jurors':4.58,
'kom':4.58,
'lls':4.58,
'lock':4.58,
'police':4.58,
'radical':4.58,
'saudi':4.58,
'senator':4.58,
'stops':4.58,
'whatever':4.58,
'durch':4.57,
'unreal':4.57,
'zal':4.57,
'dose':4.56,
'uit':4.56,
'\#idothat2':4.56,
'-p':4.56,
'\@jordanknight':4.56,
'ads':4.56,
'anymore':4.56,
'auf':4.56,
'blade':4.56,
'blues':4.56,
'bout':4.56,
'boxing':4.56,
'broker':4.56,
'bust':4.56,
'crunk':4.56,
'gon':4.56,
'grey':4.56,
'hoe':4.56,
'kuwait':4.56,
'merely':4.56,
'outta':4.56,
'queda':4.56,
'seh':4.56,
'stoned':4.56,
'w/o':4.56,
'yg':4.56,
'separately':4.55,
'uhh':4.55,
'gaat':4.55,
'appendix':4.54,
'\#epicpetwars':4.54,
'\#formspringme':4.54,
'\#shoutout':4.54,
'arbitrary':4.54,
'axe':4.54,
'beneath':4.54,
'bit':4.54,
'blocks':4.54,
'bom':4.54,
'caused':4.54,
'command':4.54,
'conn':4.54,
'conservative':4.54,
'depois':4.54,
'eh':4.54,
'er':4.54,
'gah':4.54,
'gut':4.54,
'hw':4.54,
'ima':4.54,
'institutional':4.54,
'iv':4.54,
'ludacris':4.54,
'narrow':4.54,
'oder':4.54,
'pending':4.54,
'pirate':4.54,
'prolly':4.54,
'regulation':4.54,
'rs':4.54,
'senators':4.54,
'sheesh':4.54,
'terms':4.54,
'twisting':4.54,
'urged':4.54,
'chains':4.53,
'chloride':4.53,
'waits':4.53,
'06:00:00AM':4.52,
'blew':4.52,
'clique':4.52,
'crucial':4.52,
'dependent':4.52,
'former':4.52,
'gak':4.52,
'hadn':4.52,
'investigations':4.52,
'leave':4.52,
'muss':4.52,
'omfg':4.52,
'previously':4.52,
'rule':4.52,
'shud':4.52,
'small':4.52,
'und':4.52,
'utterly':4.52,
'weight':4.52,
'cocked':4.51,
'daar':4.51,
'pill':4.51,
'\@iamdiddy':4.5,
'ages':4.5,
'arab':4.5,
'corporations':4.5,
'disposed':4.5,
'distance':4.5,
'dong':4.5,
'few':4.5,
'govt':4.5,
'nah':4.5,
'outst':4.5,
'palestinians':4.5,
'prob':4.5,
'randomly':4.5,
'regulatory':4.5,
'reverse':4.5,
'sangre':4.5,
'temporary':4.5,
'goo':4.49,
'couldn':4.49,
'army':4.48,
'blow':4.48,
'bs':4.48,
'despite':4.48,
'dunno':4.48,
'elections':4.48,
'essays':4.48,
'grabbed':4.48,
'heck':4.48,
'hidden':4.48,
'issue':4.48,
'lehman':4.48,
'ms':4.48,
'negroes':4.48,
'nuthin':4.48,
'snoop':4.48,
'y':4.48,
'ashes':4.47,
'commanded':4.47,
'nerves':4.47,
'spill':4.47,
'craving':4.46,
'crowds':4.46,
'gats':4.46,
'hammer':4.46,
'isn\'t':4.46,
'kicks':4.46,
'lecture':4.46,
'neither':4.46,
'ol':4.46,
'plastic':4.46,
'politics':4.46,
'required':4.46,
'rigid':4.46,
'rumble':4.46,
'scarcely':4.46,
'short':4.46,
'shorter':4.46,
'shy':4.46,
'skool':4.46,
'thrown':4.46,
'tossed':4.46,
'tricky':4.46,
'compelled':4.45,
'infantry':4.45,
'auch':4.44,
'bail':4.44,
'blank':4.44,
'bottom':4.44,
'busta':4.44,
'cop':4.44,
'correction':4.44,
'court\'s':4.44,
'errands':4.44,
'ew':4.44,
'gay':4.44,
'gaza':4.44,
'harder':4.44,
'haze':4.44,
'liable':4.44,
'push':4.44,
'rag':4.44,
'require':4.44,
'tank':4.44,
'triste':4.44,
'unusual':4.44,
'wat':4.44,
'weed':4.44,
'glocks':4.44,
'longing':4.43,
'removal':4.43,
'-d':4.42,
'behind':4.42,
'below':4.42,
'boa':4.42,
'breaks':4.42,
'commercials':4.42,
'constraints':4.42,
'controlling':4.42,
'dum':4.42,
'emotional':4.42,
'hides':4.42,
'knocked':4.42,
'na':4.42,
'nada':4.42,
'niet':4.42,
'notwithstanding':4.42,
'recalled':4.42,
'regulations':4.42,
'remains':4.42,
'republican':4.42,
'sem':4.42,
'serious':4.42,
'stumble':4.42,
'treatment':4.42,
'vietnam':4.42,
'regime':4.42,
'\#retweetthisif':4.4,
'beats':4.4,
'brokers':4.4,
'controlled':4.4,
'cus':4.4,
'desert':4.4,
'detroit':4.4,
'fuk':4.4,
'glock':4.4,
'hearings':4.4,
'lobbying':4.4,
'morir':4.4,
'muero':4.4,
'nicht':4.4,
'opposite':4.4,
'shyt':4.4,
'tied':4.4,
'wouldn':4.4,
'dich':4.4,
'\#haiti':4.38,
'03:00:00AM':4.38,
'authorities':4.38,
'chills':4.38,
'competitors':4.38,
'economy':4.38,
'effing':4.38,
'far':4.38,
'frozen':4.38,
'mortality':4.38,
'plaintiff':4.38,
'prices':4.38,
'rarely':4.38,
'rebel':4.38,
'resistance':4.38,
'slips':4.38,
'tangled':4.38,
'acids':4.38,
'naive':4.37,
'querer':4.37,
'shack':4.36,
'\@jonasbrothers':4.36,
'billings':4.36,
'consequence':4.36,
'custody':4.36,
'dang':4.36,
'divided':4.36,
'division':4.36,
'duh':4.36,
'end':4.36,
'grease':4.36,
'hide':4.36,
'irregular':4.36,
'juvenile':4.36,
'morn':4.36,
'needle':4.36,
'operations':4.36,
'pulling':4.36,
'reducing':4.36,
'sharply':4.36,
'strange':4.36,
'tease':4.36,
'burnin':4.35,
'strictly':4.35,
'storms':4.34,
'adios':4.34,
'arent':4.34,
'blown':4.34,
'burst':4.34,
'congress':4.34,
'ditch':4.34,
'droppin':4.34,
'faded':4.34,
'hiding':4.34,
'ho':4.34,
'hurry':4.34,
'icy':4.34,
'loud':4.34,
'replaced':4.34,
'ripping':4.34,
'shook':4.34,
'vampire':4.34,
'hesitate':4.33,
'cease':4.32,
'communist':4.32,
'eff':4.32,
'fbi':4.32,
'gop':4.32,
'howling':4.32,
'hunt':4.32,
'reduced':4.32,
'scattered':4.32,
'separate':4.32,
'slowly':4.32,
'surgical':4.32,
'tripping':4.32,
'waited':4.32,
'yikes':4.32,
'bias':4.31,
'blunt':4.31,
'shaking':4.31,
'din':4.31,
'smack':4.31,
'affected':4.3,
'brokerage':4.3,
'drop':4.3,
'formerly':4.3,
'gore':4.3,
'guards':4.3,
'hadn\'t':4.3,
'ich':4.3,
'iran':4.3,
'legislation':4.3,
'monday':4.3,
'muslims':4.3,
'naw':4.3,
'pit':4.3,
'sneak':4.3,
'so-called':4.3,
'sudden':4.3,
'sue':4.3,
'tick':4.3,
'rowdy':4.29,
'slippin':4.29,
'chased':4.29,
'divide':4.29,
'leavin':4.29,
'mortal':4.29,
'rebellion':4.29,
'aged':4.28,
'aids':4.28,
'bieber':4.28,
'fooling':4.28,
'guerrillas':4.28,
'idk':4.28,
'il':4.28,
'jury':4.28,
'nor':4.28,
'petroleum':4.28,
'pimpin':4.28,
'rules':4.28,
'spider':4.28,
'swore':4.28,
'taken':4.28,
'tests':4.28,
'wasn\'t':4.28,
'moan':4.27,
'warn':4.27,
'\@justinbieber':4.26,
'blows':4.26,
'defendants':4.26,
'fck':4.26,
'fires':4.26,
'intervention':4.26,
'lawyers':4.26,
'non':4.26,
'outlaw':4.26,
'owing':4.26,
'sht':4.26,
'split':4.26,
'storm':4.26,
'concerning':4.24,
'contrary':4.24,
'04:00:00AM':4.24,
'bah':4.24,
'barely':4.24,
'but':4.24,
'courts':4.24,
'kanye':4.24,
'lower':4.24,
'minority':4.24,
'orders':4.24,
'pounding':4.24,
'protests':4.24,
'psychiatric':4.24,
'questioned':4.24,
'raw':4.24,
'rebels':4.24,
'sag':4.24,
'shoulda':4.24,
'smash':4.24,
'spy':4.24,
'stern':4.24,
'stray':4.24,
'swear':4.24,
'unless':4.24,
'worn':4.23,
'dues':4.22,
'freaks':4.22,
'\#iranelection':4.22,
'away':4.22,
'backwards':4.22,
'beware':4.22,
'blast':4.22,
'breakin':4.22,
'bush\'s':4.22,
'calories':4.22,
'cold':4.22,
'concerned':4.22,
'due':4.22,
'grind':4.22,
'iranian':4.22,
'labor':4.22,
'limit':4.22,
'limited':4.22,
'loan':4.22,
'mutha':4.22,
'python':4.22,
'republicans':4.22,
'scratch':4.22,
'veto':4.22,
'waitin':4.22,
'wtf':4.22,
'waar':4.21,
'beat':4.2,
'blah':4.2,
'darn':4.2,
'default':4.2,
'dnt':4.2,
'expenditure':4.2,
'exposed':4.2,
'grrr':4.2,
'legislators':4.2,
'levy':4.2,
'lone':4.2,
'mccain':4.2,
'periods':4.2,
'politically':4.2,
'pow':4.2,
'prosecutors':4.2,
'screw':4.2,
'uh':4.2,
'verdict':4.2,
'weird':4.2,
'whips':4.2,
'underlying':4.19,
'objection':4.18,
'arsenal':4.18,
'boss':4.18,
'capture':4.18,
'chemical':4.18,
'dis':4.18,
'ex':4.18,
'exam':4.18,
'explode':4.18,
'forces':4.18,
'grr':4.18,
'porn':4.18,
'prey':4.18,
'reduce':4.18,
'smells':4.18,
'unpublished':4.18,
'warcraft':4.18,
'implications':4.17,
'uptight':4.17,
'acute':4.16,
'blades':4.16,
'astray':4.16,
'bash':4.16,
'chop':4.16,
'clinic':4.16,
'froze':4.16,
'gambling':4.16,
'heat':4.16,
'nowhere':4.16,
'palin':4.16,
'sigh':4.16,
'stranger':4.16,
'strangers':4.16,
'sucking':4.16,
'sweat':4.16,
'vice':4.16,
'crowd':4.14,
'demand':4.14,
'drag':4.14,
'fuck':4.14,
'havent':4.14,
'minimum':4.14,
'pee':4.14,
'pirates':4.14,
'pushing':4.14,
'shark':4.14,
'ripped':4.13,
'strict':4.13,
'decrease':4.12,
'drain':4.12,
'messing':4.12,
'renal':4.12,
'05:00:00AM':4.12,
'aren\'t':4.12,
'attorney':4.12,
'bother':4.12,
'fuss':4.12,
'hittin':4.12,
'negro':4.12,
'nonsense':4.12,
'nope':4.12,
'political':4.12,
'reductions':4.12,
'rush':4.12,
'shallow':4.12,
'taxpayers':4.12,
'twisted':4.12,
'blunts':4.11,
'abyss':4.1,
'lesser':4.1,
'liability':4.1,
'murda':4.1,
'conviction':4.1,
'cost':4.1,
'demanded':4.1,
'enforcement':4.1,
'erase':4.1,
'freaking':4.1,
'hard':4.1,
'heavy':4.1,
'hunting':4.1,
'laundry':4.1,
'less':4.1,
'numb':4.1,
'pills':4.1,
'pushed':4.1,
'rid':4.1,
'sacrifice':4.1,
'takeover':4.1,
'wack':4.1,
'ego':4.08,
'rumors':4.08,
'servant':4.08,
'weary':4.08,
'conservatives':4.08,
'crumble':4.08,
'cutting':4.08,
'fallin':4.08,
'freeze':4.08,
'hung':4.08,
'knife':4.08,
'plea':4.08,
'stopping':4.08,
'surrender':4.08,
'temper':4.08,
'wont':4.08,
'cardiac':4.06,
'fading':4.06,
'blinding':4.06,
'concerns':4.06,
'flushing':4.06,
'haiti':4.06,
'kurupt':4.06,
'mondays':4.06,
'prosecutor':4.06,
'sour':4.06,
'test':4.06,
'toll':4.06,
'unfollow':4.06,
'collide':4.04,
'fade':4.04,
'needles':4.04,
'chemicals':4.04,
'colder':4.04,
'concern':4.04,
'discharge':4.04,
'dominated':4.04,
'fall':4.04,
'hollow':4.04,
'hospice':4.04,
'hunter':4.04,
'imposed':4.04,
'reduction':4.04,
'shootin':4.04,
'spittin':4.04,
'unknown':4.04,
'unlike':4.04,
'welt':4.04,
'worm':4.04,
'rust':4.02,
'distant':4.02,
'affair':4.02,
'aint':4.02,
'block':4.02,
'consequences':4.02,
'dropping':4.02,
'ending':4.02,
'goodbyes':4.02,
'hasn\'t':4.02,
'imprecisely':4.02,
'incident':4.02,
'investigation':4.02,
'off':4.02,
'strife':4.02,
'strikes':4.02,
'weren\'t':4.02,
'ain\'t':4,
'alleged':4,
'arafat':4,
'bum':4,
'ceased':4,
'cracks':4,
'creeping':4,
'defensive':4,
'didn\'t':4,
'didnt':4,
'downs':4,
'force':4,
'least':4,
'limits':4,
'racial':4,
'ridiculous':4,
'rip':4,
'roughly':4,
'twit':4,
'zombies':4,
'accidentally':3.98,
'avoided':3.98,
'bite':3.98,
'breaking':3.98,
'demands':3.98,
'diagnosis':3.98,
'fled':3.98,
'hardly':3.98,
'humidity':3.98,
'isnt':3.98,
'old':3.98,
'punks':3.98,
'terminal':3.98,
'ruins':3.98,
'cracked':3.98,
'slam':3.98,
'argh':3.96,
'bang':3.96,
'bye':3.96,
'closing':3.96,
'dagger':3.96,
'expense':3.96,
'fists':3.96,
'iraqi':3.96,
'loose':3.96,
'minus':3.96,
'slugs':3.96,
'strike':3.96,
'tough':3.96,
'trial':3.96,
'unclear':3.96,
'killa':3.96,
'skull':3.96,
'charges':3.94,
'darker':3.94,
'erroneously':3.94,
'mess':3.94,
'pakistan':3.94,
'reluctant':3.94,
'slumdog':3.94,
'strapped':3.94,
'dizzy':3.94,
'executed':3.94,
'honky':3.94,
'homework':3.92,
'nixon':3.92,
'omitted':3.92,
'stained':3.92,
'ughh':3.92,
'jaded':3.92,
'dusty':3.92,
'absent':3.9,
'alarm':3.9,
'artificial':3.9,
'defendant':3.9,
'dim':3.9,
'doesnt':3.9,
'impose':3.9,
'iraq':3.9,
'issues':3.9,
'killas':3.9,
'misses':3.9,
'neediest':3.9,
'nothing':3.9,
'opponent':3.9,
'quit':3.9,
'slipping':3.9,
'stop':3.9,
'bald':3.9,
'begged':3.9,
'dropped':3.88,
'drunk':3.88,
'mortgage':3.88,
'nooo':3.88,
'shout':3.88,
'artillery':3.88,
'goddamn':3.88,
'rags':3.88,
'restless':3.88,
'uncertain':3.88,
'fiends':3.88,
'ass':3.86,
'farewell':3.86,
'fuckin':3.86,
'hang':3.86,
'not':3.86,
'sanctions':3.86,
'stopped':3.86,
'subjected':3.86,
'tremble':3.86,
'voodoo':3.86,
'wouldnt':3.86,
'slipped':3.86,
'mold':3.85,
'shiver':3.85,
'allegations':3.84,
'armed':3.84,
'ended':3.84,
'excuses':3.84,
'gripe':3.84,
'lawyer':3.84,
'messed':3.84,
'none':3.84,
'offline':3.84,
'pleaded':3.84,
'rent':3.84,
'shouldn\'t':3.84,
'snatch':3.84,
'ghosts':3.84,
'hatin':3.84,
'fragile':3.83,
'baddest':3.82,
'blood':3.82,
'creep':3.82,
'dark':3.82,
'darkness':3.82,
'eliminate':3.82,
'forgetting':3.82,
'gang':3.82,
'hanging':3.82,
'hardest':3.82,
'haven\'t':3.82,
'junk':3.82,
'loans':3.82,
'oppose':3.82,
'slip':3.82,
'sos':3.82,
'thirst':3.82,
'erased':3.82,
'vain':3.82,
'fades':3.81,
'aggressive':3.8,
'costs':3.8,
'critics':3.8,
'fire':3.8,
'fist':3.8,
'interment':3.8,
'ow':3.8,
'pale':3.8,
'protesters':3.8,
'witch':3.8,
'chronic':3.79,
'thirsty':3.79,
'thorns':3.79,
'sink':3.79,
'battles':3.78,
'bugs':3.78,
'court':3.78,
'ends':3.78,
'exams':3.78,
'predeceased':3.78,
'risks':3.78,
'rusty':3.78,
'slow':3.78,
'wouldn\'t':3.78,
'bothered':3.78,
'unnecessary':3.78,
'nothings':3.76,
'resigned':3.76,
'symptoms':3.76,
'yell':3.76,
'gutter':3.76,
'hangs':3.76,
'void':3.76,
'bailout':3.74,
'boo':3.74,
'critic\'s':3.74,
'denying':3.74,
'last':3.74,
'noise':3.74,
'obsession':3.74,
'reckless':3.74,
'shove':3.74,
'stomp':3.74,
'wait':3.74,
'sucka':3.74,
'pimp':3.73,
'stranded':3.73,
'tearing':3.73,
'strain':3.73,
'crack':3.72,
'fewer':3.72,
'gross':3.72,
'kick':3.72,
'oops':3.72,
'operation':3.72,
'removed':3.72,
'withdrawal':3.72,
'crowded':3.71,
'lacking':3.71,
'revenge':3.71,
'foolish':3.7,
'con':3.7,
'crooked':3.7,
'demanding':3.7,
'dirt':3.7,
'don\'t':3.7,
'dont':3.7,
'goodbye':3.7,
'locked':3.7,
'remove':3.7,
'sentenced':3.7,
'wasnt':3.7,
'won\'t':3.7,
'abnormal':3.69,
'hustler':3.69,
'controversy':3.68,
'disagree':3.68,
'fees':3.68,
'hitting':3.68,
'kicking':3.68,
'mean':3.68,
'missed':3.68,
'rival':3.68,
'sucker':3.68,
'waiting':3.68,
'wrath':3.68,
'plead':3.67,
'closed':3.66,
'deadline':3.66,
'down':3.66,
'low':3.66,
'messy':3.66,
'outdated':3.66,
'patients':3.66,
'pressure':3.66,
'snitch':3.66,
'sorry':3.66,
'stuck':3.66,
'anti':3.65,
'complications':3.65,
'disappear':3.65,
'snakes':3.65,
'lesions':3.65,
'bill':3.64,
'blocked':3.64,
'bore':3.64,
'cuts':3.64,
'darkest':3.64,
'delete':3.64,
'ghost':3.64,
'miss':3.64,
'nobody':3.64,
'nothin':3.64,
'shocked':3.64,
'swine':3.64,
'uncertainty':3.64,
'fooled':3.63,
'awkward':3.62,
'baghdad':3.62,
'begging':3.62,
'brat':3.62,
'doesn\'t':3.62,
'haunt':3.62,
'hussein':3.62,
'incompletely':3.62,
'limitations':3.62,
'risk':3.62,
'tore':3.62,
'bacteria':3.61,
'crude':3.6,
'dust':3.6,
'falls':3.6,
'flies':3.6,
'indicted':3.6,
'madness':3.6,
'mistaken':3.6,
'shattered':3.6,
'suspects':3.6,
'acid':3.59,
'pistol':3.59,
'decreased':3.58,
'absence':3.58,
'couldnt':3.58,
'excluded':3.58,
'gossip':3.58,
'leaving':3.58,
'punch':3.58,
'shotgun':3.58,
'sirens':3.58,
'restricted':3.57,
'darkened':3.57,
'slut':3.57,
'servants':3.56,
'afghanistan':3.56,
'confrontation':3.56,
'confusing':3.56,
'denial':3.56,
'empty':3.56,
'fucked':3.56,
'gloom':3.56,
'misidentified':3.56,
'mob':3.56,
'offense':3.56,
'piss':3.56,
'protest':3.56,
'runaway':3.56,
'shut':3.56,
'sorely':3.56,
'dire':3.55,
'stains':3.55,
'taxation':3.55,
'flee':3.54,
'haunted':3.54,
'bug':3.54,
'caught':3.54,
'chained':3.54,
'crushed':3.54,
'despise':3.54,
'dispute':3.54,
'expensive':3.54,
'forsaken':3.54,
'hospitals':3.54,
'owe':3.54,
'poor\'s':3.54,
'rough':3.54,
'shock':3.54,
'slug':3.54,
'without':3.54,
'drunken':3.53,
'missin':3.53,
'separation':3.53,
'spite':3.53,
'addicted':3.52,
'apart':3.52,
'fallen':3.52,
'suspected':3.52,
'suspicion':3.52,
'teardrops':3.52,
'tomb':3.52,
'ugh':3.52,
'warned':3.52,
'untrue':3.51,
'casket':3.5,
'dope':3.5,
'foe':3.5,
'hospital':3.5,
'paranoid':3.5,
'snake':3.5,
'struck':3.5,
'deficiency':3.49,
'pressures':3.49,
'cant':3.48,
'inmates':3.48,
'no':3.48,
'opponents':3.48,
'opposition':3.48,
'sucked':3.48,
'tobacco':3.48,
'unlikely':3.48,
'zombie':3.48,
'screams':3.48,
'sinking':3.48,
'swollen':3.48,
'deceive':3.47,
'monsters':3.47,
'urine':3.47,
'chaos':3.46,
'creepy':3.46,
'fee':3.46,
'insanity':3.46,
'isolated':3.46,
'late':3.46,
'misspelled':3.46,
'misstated':3.46,
'misunderstood':3.46,
'monster':3.46,
'refuse':3.46,
'shoot':3.46,
'sting':3.46,
'thorn':3.46,
'wreck':3.46,
'fright':3.45,
'radiation':3.45,
'stab':3.45,
'confined':3.44,
'delays':3.44,
'deny':3.44,
'fault':3.44,
'forgot':3.44,
'ghetto':3.44,
'litigation':3.44,
'poop':3.44,
'seized':3.44,
'zero':3.44,
'cage':3.44,
'disappeared':3.44,
'trap':3.44,
'diss':3.43,
'foes':3.43,
'smashed':3.42,
'anxious':3.42,
'can\'t':3.42,
'cut':3.42,
'erroneous':3.42,
'gangsta':3.42,
'gone':3.42,
'ignorant':3.42,
'invasion':3.42,
'lame':3.42,
'obsessed':3.42,
'raging':3.42,
'shatter':3.42,
'shouting':3.42,
'troubles':3.42,
'disturbed':3.41,
'zit':3.41,
'against':3.4,
'condolences':3.4,
'muthafucka':3.4,
'separated':3.4,
'struggle':3.4,
'whores':3.4,
'deception':3.39,
'stain':3.39,
'unconscious':3.39,
'delay':3.38,
'difficulty':3.38,
'discontinued':3.38,
'eliminated':3.38,
'haunting':3.38,
'hungry':3.38,
'refused':3.38,
'wicked':3.38,
'blinded':3.37,
'hunger':3.37,
'torn':3.37,
'phony':3.36,
'argued':3.36,
'beast':3.36,
'bullet':3.36,
'busted':3.36,
'critic':3.36,
'dammit':3.36,
'deleted':3.36,
'dentist':3.36,
'forbidden':3.36,
'killin':3.36,
'syndrome':3.36,
'tornado':3.36,
'weapon':3.36,
'emptiness':3.35,
'injection':3.35,
'burnt':3.34,
'complicated':3.34,
'crap':3.34,
'never':3.34,
'politicians':3.34,
'tired':3.34,
'traffic':3.34,
'unfair':3.34,
'vulnerable':3.34,
'warning':3.34,
'fucker':3.33,
'sinner':3.33,
'envy':3.33,
'whack':3.32,
'alone':3.32,
'bleeds':3.32,
'cannot':3.32,
'confusion':3.32,
'couldn\'t':3.32,
'expenses':3.32,
'ignored':3.32,
'nigga':3.32,
'noose':3.32,
'opposed':3.32,
'restrictions':3.32,
'scars':3.32,
'shots':3.32,
'savage':3.31,
'choke':3.31,
'cigarettes':3.31,
'doubts':3.3,
'fool':3.3,
'fury':3.3,
'lowest':3.3,
'suckers':3.3,
'whip':3.3,
'helpless':3.29,
'rats':3.29,
'conspiracy':3.28,
'crashing':3.28,
'falling':3.28,
'fools':3.28,
'lazy':3.28,
'nuclear':3.28,
'scar':3.28,
'suspicious':3.28,
'scarred':3.27,
'screamed':3.27,
'cough':3.26,
'damned':3.26,
'frown':3.24,
'pimps':3.24,
'vengeance':3.24,
'canceled':3.24,
'cavity':3.24,
'delayed':3.24,
'dull':3.24,
'fat':3.24,
'jerk':3.24,
'missile':3.24,
'remorse':3.24,
'rot':3.24,
'screwed':3.24,
'gangstas':3.23,
'captured':3.22,
'critical':3.22,
'fell':3.22,
'forget':3.22,
'freezing':3.22,
'ignore':3.22,
'losers':3.22,
'lynch':3.22,
'wasting':3.22,
'defect':3.21,
'frightened':3.2,
'combat':3.2,
'convicted':3.2,
'defeat':3.2,
'dirty':3.2,
'dread':3.2,
'drug':3.2,
'inferior':3.2,
'screamin':3.2,
'cryin':3.19,
'liar':3.18,
'aching':3.18,
'difficult':3.18,
'faggot':3.18,
'FALSE':3.18,
'forgotten':3.18,
'garbage':3.18,
'kicked':3.18,
'scandal':3.18,
'sinners':3.18,
'suspension':3.18,
'woe':3.18,
'accusations':3.16,
'complain':3.16,
'declined':3.16,
'disorders':3.16,
'doubt':3.16,
'forced':3.16,
'lack':3.16,
'severe':3.16,
'smoke':3.16,
'yuck':3.16,
'feared':3.14,
'gangster':3.14,
'argument':3.14,
'avoid':3.14,
'bitch':3.14,
'bruise':3.14,
'dismissed':3.14,
'disorder':3.14,
'exhausted':3.14,
'incorrectly':3.14,
'isolation':3.14,
'scream':3.14,
'slapped':3.14,
'spit':3.14,
'suck':3.14,
'sucks':3.14,
'suspect':3.14,
'whore':3.14,
'wrong':3.14,
'cursed':3.12,
'doom':3.12,
'desperate':3.12,
'lonesome':3.12,
'regret':3.12,
'rob':3.12,
'defects':3.1,
'ambulance':3.1,
'annoy':3.1,
'conflict':3.1,
'criticism':3.1,
'execution':3.1,
'fought':3.1,
'indictment':3.1,
'pity':3.1,
'smoking':3.1,
'stink':3.1,
'tear':3.1,
'unable':3.1,
'cigarette':3.09,
'beg':3.08,
'prejudice':3.08,
'bullshit':3.08,
'decay':3.08,
'decline':3.08,
'deficit':3.08,
'difficulties':3.08,
'graves':3.08,
'regrets':3.08,
'suspended':3.08,
'trapped':3.08,
'yelling':3.08,
'aging':3.06,
'arguing':3.06,
'bullets':3.06,
'dumb':3.06,
'emergency':3.06,
'greed':3.06,
'idiot':3.06,
'idiots':3.06,
'inadequate':3.06,
'refugees':3.06,
'turmoil':3.06,
'rotting':3.04,
'greedy':3.04,
'havoc':3.04,
'arguments':3.04,
'bled':3.04,
'bored':3.04,
'complaints':3.04,
'horror':3.04,
'insane':3.04,
'jealousy':3.04,
'lawsuits':3.04,
'rat':3.04,
'resignation':3.04,
'scare':3.04,
'anxiety':3.03,
'fiend':3.02,
'hostile':3.02,
'weeping':3.02,
'broken':3.02,
'criticized':3.02,
'offensive':3.02,
'trembling':3.02,
'argue':3,
'argues':3,
'bitter':3,
'condemned':3,
'fights':3,
'muthafuckin':3,
'vicious':3,
'battle':2.98,
'confused':2.98,
'crappy':2.98,
'damn':2.98,
'guns':2.98,
'ignorance':2.98,
'missing':2.98,
'niggaz':2.98,
'problem':2.98,
'worthless':2.98,
'insecure':2.98,
'coffin':2.96,
'conflicts':2.96,
'damages':2.96,
'lawsuit':2.96,
'niggas':2.96,
'screaming':2.96,
'wound':2.96,
'bloody':2.94,
'cemetery':2.94,
'choking':2.94,
'explosion':2.94,
'foul':2.94,
'nervous':2.94,
'sore':2.94,
'tension':2.94,
'thief':2.94,
'thug':2.94,
'unfortunate':2.94,
'weakness':2.94,
'breakdown':2.94,
'bury':2.93,
'accused':2.92,
'awful':2.92,
'burn':2.92,
'cries':2.92,
'hangover':2.92,
'mistakes':2.92,
'problems':2.92,
'riot':2.92,
'sleepless':2.92,
'demon':2.92,
'boring':2.9,
'bruised':2.9,
'burned':2.9,
'collapse':2.9,
'complained':2.9,
'debt':2.9,
'fake':2.9,
'frustrated':2.9,
'impossible':2.9,
'ouch':2.9,
'deadly':2.9,
'disrespect':2.9,
'drown':2.9,
'badly':2.88,
'banned':2.88,
'burning':2.88,
'cancelled':2.88,
'dislike':2.88,
'threats':2.88,
'sins':2.88,
'bombs':2.86,
'complaint':2.86,
'errors':2.86,
'illegal':2.86,
'lonely':2.86,
'mourns':2.86,
'prisoner':2.86,
'stress':2.86,
'tax':2.86,
'violations':2.86,
'widow':2.86,
'addict':2.84,
'buried':2.84,
'devils':2.84,
'dump':2.84,
'hater':2.84,
'incorrect':2.84,
'infection':2.84,
'neglected':2.84,
'penalty':2.84,
'terrible':2.84,
'unkind':2.84,
'weak':2.84,
'annoying':2.82,
'bills':2.82,
'blame':2.82,
'burden':2.82,
'complaining':2.82,
'danger':2.82,
'demise':2.82,
'despair':2.82,
'disabled':2.82,
'discrimination':2.82,
'filthy':2.82,
'gun':2.82,
'lied':2.82,
'missiles':2.82,
'mourners':2.82,
'obituary':2.82,
'prosecution':2.82,
'worry':2.82,
'mafia':2.81,
'wounds':2.8,
'burns':2.78,
'cowards':2.78,
'fever':2.78,
'mistake':2.78,
'trouble':2.78,
'troubled':2.78,
'wasted':2.78,
'bitches':2.76,
'bleeding':2.76,
'fighting':2.76,
'lose':2.76,
'lost':2.76,
'pathetic':2.76,
'unfortunately':2.76,
'neglect':2.76,
'defeated':2.74,
'loses':2.74,
'stressed':2.74,
'ugly':2.74,
'violation':2.74,
'unholy':2.73,
'addiction':2.72,
'arrests':2.72,
'disgrace':2.72,
'heartbreaker':2.72,
'mourn':2.72,
'struggling':2.72,
'desperation':2.7,
'distress':2.7,
'fight':2.7,
'spam':2.7,
'taxes':2.7,
'waste':2.7,
'worse':2.7,
'sorrows':2.69,
'bleed':2.69,
'ache':2.68,
'bastards':2.68,
'fears':2.68,
'injuries':2.68,
'jealous':2.68,
'misery':2.68,
'ruin':2.68,
'shame':2.68,
'stupid':2.68,
'trash':2.68,
'deaf':2.67,
'afraid':2.66,
'ban':2.66,
'drugs':2.66,
'loneliness':2.66,
'penalties':2.66,
'surgery':2.66,
'tensions':2.66,
'bad':2.64,
'curse':2.64,
'demons':2.64,
'enemy':2.64,
'guilty':2.64,
'inflation':2.64,
'motherfucking':2.64,
'sin':2.64,
'heartaches':2.63,
'\#fail':2.62,
'beaten':2.62,
'lies':2.62,
'losing':2.62,
'nasty':2.62,
'retarded':2.62,
'rude':2.62,
'threatened':2.62,
'violated':2.62,
'thugs':2.61,
'abortion':2.6,
'brutal':2.6,
'crash':2.6,
'error':2.6,
'lie':2.6,
'mad':2.6,
'selfish':2.6,
'stole':2.6,
'worries':2.6,
'ashamed':2.59,
'infections':2.59,
'annoyed':2.58,
'blind':2.58,
'cheated':2.58,
'damage':2.58,
'disgusting':2.58,
'guilt':2.58,
'lying':2.58,
'motherfuckin':2.58,
'rotten':2.58,
'scared':2.58,
'scary':2.58,
'shitty':2.58,
'starving':2.58,
'stroke':2.58,
'betrayed':2.57,
'nightmares':2.56,
'assault':2.56,
'beating':2.56,
'grave':2.56,
'hopeless':2.56,
'loss':2.56,
'rage':2.56,
'satan':2.56,
'upset':2.56,
'corpse':2.55,
'abandoned':2.54,
'broke':2.54,
'cocaine':2.54,
'denied':2.54,
'harm':2.54,
'hurricane':2.54,
'miserable':2.54,
'pissed':2.54,
'ruined':2.54,
'tumor':2.53,
'attacked':2.52,
'bastard':2.52,
'destroy':2.52,
'failing':2.52,
'shooting':2.52,
'useless':2.52,
'motherfuckers':2.51,
'betray':2.5,
'psycho':2.5,
'shit':2.5,
'shot':2.5,
'stolen':2.5,
'crisis':2.48,
'damaged':2.48,
'haters':2.48,
'recession':2.48,
'saddam':2.48,
'slap':2.48,
'attacks':2.46,
'crashed':2.46,
'losses':2.46,
'panic':2.46,
'steal':2.46,
'stealing':2.46,
'tears':2.46,
'burial':2.44,
'cheat':2.44,
'dangerous':2.44,
'drowning':2.44,
'enemies':2.44,
'hating':2.44,
'prisoners':2.44,
'saddened':2.44,
'arrest':2.42,
'attack':2.42,
'flood':2.42,
'ill':2.42,
'killer':2.42,
'negative':2.42,
'worried':2.42,
'wounded':2.42,
'nigger':2.41,
'slaughter':2.41,
'asshole':2.4,
'flu':2.4,
'weapons':2.4,
'graveside':2.38,
'sad':2.38,
'victim':2.38,
'hurting':2.36,
'threat':2.36,
'frustration':2.34,
'hate':2.34,
'tragic':2.34,
'grief':2.33,
'accident':2.32,
'angry':2.32,
'fear':2.32,
'nightmare':2.32,
'poor':2.32,
'victims':2.32,
'anger':2.3,
'fired':2.3,
'fraud':2.3,
'theft':2.3,
'thieves':2.29,
'heartache':2.28,
'sadly':2.28,
'cheating':2.26,
'destruction':2.26,
'disappointed':2.26,
'bombing':2.24,
'devil':2.24,
'horrible':2.24,
'suffered':2.24,
'hatred':2.22,
'weep':2.22,
'hell':2.22,
'holocaust':2.22,
'injured':2.22,
'suffering':2.22,
'cried':2.2,
'crime':2.2,
'loser':2.2,
'depressed':2.18,
'divorce':2.18,
'hurt':2.18,
'robbed':2.18,
'tsunami':2.18,
'agony':2.16,
'drowned':2.16,
'homeless':2.16,
'pollution':2.16,
'corruption':2.14,
'crimes':2.14,
'hated':2.14,
'hurts':2.14,
'painful':2.12,
'sorrow':2.12,
'unemployment':2.12,
'unhappy':2.12,
'heartbreak':2.11,
'dying':2.1,
'funeral':2.1,
'pain':2.1,
'worst':2.1,
'dies':2.08,
'racist':2.08,
'rejected':2.08,
'robbery':2.08,
'suffer':2.08,
'virus':2.08,
'bankruptcy':2.06,
'fails':2.06,
'failure':2.06,
'hates':2.06,
'prison':2.06,
'slave':2.06,
'slaves':2.06,
'tragedy':2.06,
'violent':2.06,
'crying':2.04,
'destroyed':2.04,
'injury':2.04,
'rejection':2.02,
'motherfucker':2.02,
'sick':2.02,
'slavery':2.02,
'dead':2,
'disease':2,
'illness':2,
'killers':2,
'punishment':2,
'criminal':1.98,
'depression':1.98,
'headache':1.98,
'poverty':1.98,
'tumors':1.98,
'bomb':1.96,
'disaster':1.96,
'fail':1.96,
'poison':1.94,
'depressing':1.9,
'earthquake':1.9,
'evil':1.9,
'wars':1.9,
'abuse':1.88,
'diseases':1.88,
'sadness':1.88,
'violence':1.86,
'cruel':1.84,
'cry':1.84,
'failed':1.84,
'sickness':1.84,
'abused':1.83,
'tortured':1.82,
'fatal':1.8,
'killings':1.8,
'murdered':1.8,
'war':1.8,
'kills':1.78,
'jail':1.76,
'terror':1.76,
'die':1.74,
'killing':1.7,
'arrested':1.64,
'deaths':1.64,
'raped':1.64,
'torture':1.58,
'died':1.56,
'kill':1.56,
'killed':1.56,
'cancer':1.54,
'death':1.54,
'murder':1.48,
'terrorism':1.48,
'rape':1.44,
'suicide':1.3,
'terrorist':1.3
} | 0 | 0 | 0 |
649d71de77a159fa7464be3ada8151dcf99987bf | 1,012 | py | Python | Encoder.py | Amirhossein-Zare/rsa-manager | 6dfcbb5b948257ef448cbc932f5338bb3e53daba | [
"MIT"
] | 1 | 2021-02-15T17:20:42.000Z | 2021-02-15T17:20:42.000Z | Encoder.py | Amirhossein-Zare/rsa-manager | 6dfcbb5b948257ef448cbc932f5338bb3e53daba | [
"MIT"
] | 1 | 2021-02-15T17:19:28.000Z | 2021-02-15T17:19:28.000Z | Encoder.py | Amirhossein-Zare/rsa-manager | 6dfcbb5b948257ef448cbc932f5338bb3e53daba | [
"MIT"
] | null | null | null | # See transform.py for more info
import transform
import ascii
# M(message in numbers) ** e(key[0]) mod n(key[1]) = C(output)
| 29.764706 | 68 | 0.594862 | # See transform.py for more info
import transform
import ascii
# M(message in numbers) ** e(key[0]) mod n(key[1]) = C(output)
def encrypt():
message = input("What is your secret message?")
key = [0, 0]
key[0] = int(input("Now please type/paste e(public key) here:"))
key[1] = int(input("Now please type/paste n here:"))
block_size = 20
if len(message) % block_size == 0:
block_count = len(message) // block_size
else:
block_count = (len(message) // block_size) + 1
blocks = [0] * block_count
coded = [0] * block_count
for i in range(0, block_count):
blocks[i] = message[i * block_size:(i + 1) * block_size]
print("Encoding. Please wait...")
for i in range(block_count):
#numeric = transform.ttn(blocks[i])
numeric = ascii.TextToNum(blocks[i])
coded[i] = pow(numeric, key[0], key[1])
print("Coded Message:")
for i in range(0, block_count):
print('Part',i+1,': ', end='')
print(coded[i])
| 863 | 0 | 22 |
ed3ddab75fd8cd35f3e3498ca2b3368b5d9a384c | 111 | py | Python | setup.py | jskeys/words | 00467466cb7f7157c47ea49b91d5a540765cc4aa | [
"MIT"
] | null | null | null | setup.py | jskeys/words | 00467466cb7f7157c47ea49b91d5a540765cc4aa | [
"MIT"
] | null | null | null | setup.py | jskeys/words | 00467466cb7f7157c47ea49b91d5a540765cc4aa | [
"MIT"
] | null | null | null | import site
import sys
from setuptools import setup
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
setup()
| 13.875 | 48 | 0.747748 | import site
import sys
from setuptools import setup
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
setup()
| 0 | 0 | 0 |
510340caa64571a122561488ddd79b7b088711e0 | 2,232 | py | Python | simple-proxy/tcp_proxy_encoded.py | liangsai12/Hassio-Addons | d320b4a8c7f54ac6fd7d6ce8aef0581c662a5017 | [
"Apache-2.0"
] | null | null | null | simple-proxy/tcp_proxy_encoded.py | liangsai12/Hassio-Addons | d320b4a8c7f54ac6fd7d6ce8aef0581c662a5017 | [
"Apache-2.0"
] | null | null | null | simple-proxy/tcp_proxy_encoded.py | liangsai12/Hassio-Addons | d320b4a8c7f54ac6fd7d6ce8aef0581c662a5017 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import json
import socket
import threading
from selectors import DefaultSelector, EVENT_READ
# Proxy开放的端口号
LOCAL_PORT = 7088
# 连接的远程服务器与端口,修改成你的远程服务器地址
REMOTE_ADDR = "hachinasp.duckdns.org"
REMOTE_PORT = 7088
def xor_encode( bstring ):
"""一个简单编码:两次编码后与原值相同"""
MASK = 0x55
ret = bytearray( bstring )
for i in range(len(ret)):
ret[i] ^= MASK
return ret
def proxy_process_encoded( sock1, sock2 ):
"""在两个sockek之间转发数据:任何一个收到的,编码后转发到另一个"""
sel = DefaultSelector()
sel.register(sock1, EVENT_READ)
sel.register(sock2, EVENT_READ)
while True:
events = sel.select()
for (key,ev) in events:
try:
data_in = key.fileobj.recv(8192)
except ConnectionResetError as e:
print(key.fileobj, "\nreset receive!")
sock1.close()
sock2.close()
return
if data_in:
if key.fileobj==sock1:
sock2.send(xor_encode(data_in))
else:
sock1.send(xor_encode(data_in))
else:
sock1.close()
sock2.close()
return
def tcp_proxy(sock_in, addr):
"""新的代理请求连接时,进行相关处理"""
print("新的连接: %s:%s..." % addr, flush=True)
# 建立远程连接
sock_remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_remote.settimeout(15)
try:
sock_remote.connect((REMOTE_ADDR, REMOTE_PORT))
except Exception as e:
print(e, flush=True)
print( "Error when connect to", (REMOTE_ADDR, REMOTE_PORT), flush=True )
sock_in.close()
return
# 在本地连接与远程连接间转发数据
proxy_process_encoded( sock_in, sock_remote )
def start_server():
"""主服务函数"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", LOCAL_PORT))
s.listen()
print("等待客户端连接...", flush=True)
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcp_proxy, args=(sock, addr))
t.start()
if __name__ == "__main__":
os.system("iptables -A INPUT -p tcp --sport {} --tcp-flags RST RST -j DROP".format(REMOTE_PORT))
start_server() | 26.258824 | 100 | 0.59095 | #!/usr/bin/env python3
import os
import json
import socket
import threading
from selectors import DefaultSelector, EVENT_READ
# Proxy开放的端口号
LOCAL_PORT = 7088
# 连接的远程服务器与端口,修改成你的远程服务器地址
REMOTE_ADDR = "hachinasp.duckdns.org"
REMOTE_PORT = 7088
def xor_encode( bstring ):
"""一个简单编码:两次编码后与原值相同"""
MASK = 0x55
ret = bytearray( bstring )
for i in range(len(ret)):
ret[i] ^= MASK
return ret
def proxy_process_encoded( sock1, sock2 ):
"""在两个sockek之间转发数据:任何一个收到的,编码后转发到另一个"""
sel = DefaultSelector()
sel.register(sock1, EVENT_READ)
sel.register(sock2, EVENT_READ)
while True:
events = sel.select()
for (key,ev) in events:
try:
data_in = key.fileobj.recv(8192)
except ConnectionResetError as e:
print(key.fileobj, "\nreset receive!")
sock1.close()
sock2.close()
return
if data_in:
if key.fileobj==sock1:
sock2.send(xor_encode(data_in))
else:
sock1.send(xor_encode(data_in))
else:
sock1.close()
sock2.close()
return
def tcp_proxy(sock_in, addr):
"""新的代理请求连接时,进行相关处理"""
print("新的连接: %s:%s..." % addr, flush=True)
# 建立远程连接
sock_remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_remote.settimeout(15)
try:
sock_remote.connect((REMOTE_ADDR, REMOTE_PORT))
except Exception as e:
print(e, flush=True)
print( "Error when connect to", (REMOTE_ADDR, REMOTE_PORT), flush=True )
sock_in.close()
return
# 在本地连接与远程连接间转发数据
proxy_process_encoded( sock_in, sock_remote )
def start_server():
"""主服务函数"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", LOCAL_PORT))
s.listen()
print("等待客户端连接...", flush=True)
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcp_proxy, args=(sock, addr))
t.start()
if __name__ == "__main__":
os.system("iptables -A INPUT -p tcp --sport {} --tcp-flags RST RST -j DROP".format(REMOTE_PORT))
start_server() | 0 | 0 | 0 |
ad92cc4df4b6ad37e22118abd074de52c4815494 | 667 | py | Python | CS-483_Big-Data-Analytics-Capstone_2020-Spring/code/statistics/crime-over-time-by-type.py | CraftingGamerTom/wsu-computer-science | aa40fc95a84ac95535284048f6f572def1375f7d | [
"MIT"
] | null | null | null | CS-483_Big-Data-Analytics-Capstone_2020-Spring/code/statistics/crime-over-time-by-type.py | CraftingGamerTom/wsu-computer-science | aa40fc95a84ac95535284048f6f572def1375f7d | [
"MIT"
] | null | null | null | CS-483_Big-Data-Analytics-Capstone_2020-Spring/code/statistics/crime-over-time-by-type.py | CraftingGamerTom/wsu-computer-science | aa40fc95a84ac95535284048f6f572def1375f7d | [
"MIT"
] | null | null | null | # https://www.kaggle.com/skeftical/chicago-crimes-eda-spatio-temporal
# Crime over time (by type)
# This is very resource intensive to run
crimes_count_date = crimes.pivot_table('P_INCID_NO', aggfunc=np.size, columns='IBR_TYPE', index=crimes.index, fill_value=0)
crimes_count_date.index = pd.DatetimeIndex(crimes_count_date.index)
plo = crimes_count_date.rolling(365).sum().plot(figsize=(12, 30), subplots=True, layout=(-1, 3), sharex=False, sharey=False)
plt.figure(figsize=(11,4))
crimes_count_date.resample('D').size().rolling(365).sum().plot()
plt.title('Rolling sum of all crimes from 2007 - 2018')
plt.ylabel('Number of crimes')
plt.xlabel('Days')
plt.show() | 44.466667 | 124 | 0.757121 | # https://www.kaggle.com/skeftical/chicago-crimes-eda-spatio-temporal
# Crime over time (by type)
# This is very resource intensive to run
crimes_count_date = crimes.pivot_table('P_INCID_NO', aggfunc=np.size, columns='IBR_TYPE', index=crimes.index, fill_value=0)
crimes_count_date.index = pd.DatetimeIndex(crimes_count_date.index)
plo = crimes_count_date.rolling(365).sum().plot(figsize=(12, 30), subplots=True, layout=(-1, 3), sharex=False, sharey=False)
plt.figure(figsize=(11,4))
crimes_count_date.resample('D').size().rolling(365).sum().plot()
plt.title('Rolling sum of all crimes from 2007 - 2018')
plt.ylabel('Number of crimes')
plt.xlabel('Days')
plt.show() | 0 | 0 | 0 |
29d2e41188a0da558bc18421bb19c9f3e24ccafc | 199 | py | Python | apps/categories/urls.py | ressapanda/fishka-backend | c1eb58566dce01c7a011f0093893cd16b6d50875 | [
"MIT"
] | null | null | null | apps/categories/urls.py | ressapanda/fishka-backend | c1eb58566dce01c7a011f0093893cd16b6d50875 | [
"MIT"
] | null | null | null | apps/categories/urls.py | ressapanda/fishka-backend | c1eb58566dce01c7a011f0093893cd16b6d50875 | [
"MIT"
] | null | null | null | from rest_framework import routers
from apps.categories.views import CategoryViewSet
router = routers.SimpleRouter()
router.register(r"", CategoryViewSet, "categories")
urlpatterns = router.urls
| 19.9 | 51 | 0.809045 | from rest_framework import routers
from apps.categories.views import CategoryViewSet
router = routers.SimpleRouter()
router.register(r"", CategoryViewSet, "categories")
urlpatterns = router.urls
| 0 | 0 | 0 |
006f3a7558111c7d27acad4e2d9118022e4d4e03 | 6,975 | py | Python | tests/router_test.py | aio-libs-abandoned/aiorest | 3caf0ad8018d96bd681843b54aa07c86803fbec8 | [
"MIT"
] | 3 | 2018-02-07T04:16:53.000Z | 2018-04-26T21:56:59.000Z | tests/router_test.py | aio-libs-abandoned/aiorest | 3caf0ad8018d96bd681843b54aa07c86803fbec8 | [
"MIT"
] | null | null | null | tests/router_test.py | aio-libs-abandoned/aiorest | 3caf0ad8018d96bd681843b54aa07c86803fbec8 | [
"MIT"
] | null | null | null | import unittest
from unittest import mock
import asyncio
import aiohttp
from aiorest import RESTServer, Request
import json
| 37.299465 | 78 | 0.586667 | import unittest
from unittest import mock
import asyncio
import aiohttp
from aiorest import RESTServer, Request
import json
class RouterTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.server = RESTServer(hostname='example.com', loop=self.loop)
def tearDown(self):
self.loop.close()
def test_add_url(self):
handler = lambda id: None
self.server.add_url('post', '/post/{id}', handler)
self.assertEqual(1, len(self.server._urls))
entry = self.server._urls[0]
self.assertEqual('POST', entry.method)
self.assertIs(handler, entry.handler)
self.assertEqual('^/post/(?P<id>[^{}/]+)$', entry.regex.pattern)
def test_add_url_invalid1(self):
with self.assertRaises(ValueError):
self.server.add_url('post', '/post/{id', lambda: None)
def test_add_url_invalid2(self):
with self.assertRaises(ValueError):
self.server.add_url('post', '/post/{id{}}', lambda: None)
def test_add_url_invalid3(self):
with self.assertRaises(ValueError):
self.server.add_url('post', '/post/{id{}', lambda: None)
def test_add_url_invalid4(self):
with self.assertRaises(ValueError):
self.server.add_url('post', '/post/{id"}', lambda: None)
def test_add_url_invalid5(self):
with self.assertRaises(ValueError):
self.server.add_url('post', '/post"{id}', lambda: None)
def test_dispatch_not_found(self):
m = mock.Mock()
self.server.add_url('post', '/post/{id}', m)
self.server.add_url('get', '/post/{id}', m)
@asyncio.coroutine
def go():
with self.assertRaises(aiohttp.HttpProcessingError) as ctx:
request = Request('host', aiohttp.RawRequestMessage(
'POST', '/not/found', '1.1', {}, True, None),
None, loop=self.loop)
yield from self.server.dispatch(request)
self.assertEqual(404, ctx.exception.code)
self.assertFalse(m.called)
self.loop.run_until_complete(go())
def test_dispatch_method_not_allowed(self):
m = mock.Mock()
self.server.add_url('post', '/post/{id}', m)
self.server.add_url('get', '/post/{id}', m)
@asyncio.coroutine
def go():
with self.assertRaises(aiohttp.HttpProcessingError) as ctx:
request = Request('host', aiohttp.RawRequestMessage(
'DELETE', '/post/123', '1.1', {}, True, None),
None, loop=self.loop)
yield from self.server.dispatch(request)
self.assertEqual(405, ctx.exception.code)
self.assertEqual((('Allow', 'GET, POST'),), ctx.exception.headers)
self.assertFalse(m.called)
self.loop.run_until_complete(go())
def test_dispatch(self):
def f(request):
return {'a': 1, 'b': 2}
self.server.add_url('get', '/post/{id}', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/post/123', '1.1', {}, True, None),
None, loop=self.loop)
ret = self.loop.run_until_complete(self.server.dispatch(request))
# json.loads is required to avoid items order in dict
self.assertEqual({"b": 2, "a": 1}, json.loads(ret))
def test_dispatch_with_ending_slash(self):
def f(request):
return {'a': 1, 'b': 2}
self.server.add_url('get', '/post/{id}/', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/post/123/', '1.1', {}, True, None),
None, loop=self.loop)
ret = self.loop.run_until_complete(self.server.dispatch(request))
# json.loads is required to avoid items order in dict
self.assertEqual({"b": 2, "a": 1}, json.loads(ret))
def test_dispatch_with_ending_slash_not_found1(self):
def f(request):
return {'a': 1, 'b': 2}
self.server.add_url('get', '/post/{id}/', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/post/123', '1.1', {}, True, None),
None, loop=self.loop)
with self.assertRaises(aiohttp.HttpProcessingError) as ctx:
self.loop.run_until_complete(self.server.dispatch(request))
self.assertEqual(404, ctx.exception.code)
def test_dispatch_with_ending_slash_not_found2(self):
def f(request):
return {'a': 1, 'b': 2}
self.server.add_url('get', '/post/{id}/', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/po/123', '1.1', {}, True, None),
None, loop=self.loop)
with self.assertRaises(aiohttp.HttpProcessingError) as ctx:
self.loop.run_until_complete(self.server.dispatch(request))
self.assertEqual(404, ctx.exception.code)
def test_dispatch_bad_signature(self):
def f():
return {'a': 1, 'b': 2}
self.server.add_url('get', '/post/{id}', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/post/123', '1.1', {}, True, None),
None, loop=self.loop)
@asyncio.coroutine
def go():
with self.assertRaises(aiohttp.HttpProcessingError) as ctx:
yield from self.server.dispatch(request)
self.assertEqual(500, ctx.exception.code)
self.loop.run_until_complete(go())
def test_dispatch_http_exception_from_handler(self):
def f(request):
raise aiohttp.HttpProcessingError(
code=401,
headers=(('WWW-Authenticate', 'Basic'),))
self.server.add_url('get', '/post/{id}', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/post/123', '1.1', {}, True, None),
None, loop=self.loop)
@asyncio.coroutine
def go():
with self.assertRaises(aiohttp.HttpProcessingError) as ctx:
yield from self.server.dispatch(request)
self.assertEqual(401, ctx.exception.code)
self.assertEqual((('WWW-Authenticate', 'Basic'),),
ctx.exception.headers)
self.loop.run_until_complete(go())
def test_dispatch_with_request(self):
def f(req):
self.assertIsInstance(req, Request)
self.assertEqual('GET', req.method)
self.assertEqual('/post/123', req.path)
return {'a': 1, 'b': 2}
self.server.add_url('get', '/post/{id}', f)
request = Request('host', aiohttp.RawRequestMessage(
'GET', '/post/123', '1.1', {}, True, None),
None, loop=self.loop)
ret = self.loop.run_until_complete(self.server.dispatch(request))
# json.loads is required to avoid items order in dict
self.assertEqual({"b": 2, "a": 1}, json.loads(ret))
| 6,351 | 16 | 482 |
67fd0980e8bfd9e6fdcb6df4ed1cfde8f1348929 | 2,515 | py | Python | aliyun-python-sdk-dybaseapi/aliyunsdkdybaseapi/mns/mns_request.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-dybaseapi/aliyunsdkdybaseapi/mns/mns_request.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-dybaseapi/aliyunsdkdybaseapi/mns/mns_request.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
| 29.940476 | 87 | 0.686282 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
class RequestBase:
def __init__(self):
self.method = ""
self.request_id = None
def set_req_info(self, req_info):
if req_info is not None:
if req_info.request_id is not None:
self.request_id = req_info.request_id
class ResponseBase():
def __init__(self):
self.status = -1
self.header = {}
self.error_data = ""
def get_requestid(self):
return self.header.get("x-mns-request-id")
class BatchReceiveMessageRequest(RequestBase):
def __init__(self, queue_name, batch_size, base64decode = True, wait_seconds = -1):
RequestBase.__init__(self)
self.queue_name = queue_name
self.batch_size = batch_size
self.base64decode = base64decode
self.wait_seconds = wait_seconds
self.method = "GET"
class ReceiveMessageResponseEntry():
def __init__(self):
self.dequeue_count = -1
self.enqueue_time = -1
self.first_dequeue_time = -1
self.message_body = ""
self.message_id = ""
self.message_body_md5 = ""
self.priority = -1
self.next_visible_time = ""
self.receipt_handle = ""
class BatchReceiveMessageResponse(ResponseBase):
def __init__(self):
ResponseBase.__init__(self)
self.message_list = []
class BatchDeleteMessageRequest(RequestBase):
def __init__(self, queue_name, receipt_handle_list):
RequestBase.__init__(self)
self.queue_name = queue_name
self.receipt_handle_list = receipt_handle_list
self.method = "DELETE"
class BatchDeleteMessageResponse(ResponseBase):
def __init__(self):
ResponseBase.__init__(self)
| 1,190 | 114 | 397 |
9a541e688037965d295b83fe7772fcef39af44d0 | 1,006 | py | Python | src/server.py | CaC-Grupo-9/backend | 107067d8bc13ee2ba28c9c1a8bd0b19a4cf55277 | [
"MIT"
] | 1 | 2021-12-03T16:10:27.000Z | 2021-12-03T16:10:27.000Z | src/server.py | CaC-Grupo-9/backend | 107067d8bc13ee2ba28c9c1a8bd0b19a4cf55277 | [
"MIT"
] | null | null | null | src/server.py | CaC-Grupo-9/backend | 107067d8bc13ee2ba28c9c1a8bd0b19a4cf55277 | [
"MIT"
] | 5 | 2021-11-15T23:30:05.000Z | 2021-11-30T13:10:59.000Z | from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import os
with HTTPServer(('', 8000), handler) as server:
server.serve_forever() | 35.928571 | 94 | 0.643141 | from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import os
class handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path_folder = "data"
abs_file_path = os.path.join(os.path.join(script_dir, rel_path_folder), 'mydata.json')
with open(abs_file_path, 'r') as f:
self.wfile.write(f.read().encode("utf8"))
#message = "{ 'Hello': 'World!' }"
#self.wfile.write(bytes(message, "utf8"))
def do_POST(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
message = "Hello, World! Here is a POST response"
self.wfile.write(bytes(message, "utf8"))
with HTTPServer(('', 8000), handler) as server:
server.serve_forever() | 742 | 17 | 80 |
ab789c35e7c93cf4d2515dde51e2bcb17301e2fa | 2,187 | py | Python | encryt_by_Caesar_cipher.py | Glenn-Po/LearningPython | 96b12999d13b55216a3da6cf6b9248a8e86cbe0b | [
"Apache-2.0"
] | null | null | null | encryt_by_Caesar_cipher.py | Glenn-Po/LearningPython | 96b12999d13b55216a3da6cf6b9248a8e86cbe0b | [
"Apache-2.0"
] | null | null | null | encryt_by_Caesar_cipher.py | Glenn-Po/LearningPython | 96b12999d13b55216a3da6cf6b9248a8e86cbe0b | [
"Apache-2.0"
] | null | null | null | #implement caesar's cipher
#it is well simple to implement and it uses the idea of ASCII character codes
#Note that in Python,characters are encoded in UNICODE by default
#The idea behind this encryption is to shift indvidual x-ters in a string(message) by a certain number(the key)
#White spaces are not encrypted
from string import whitespace as wsp, punctuation as punc,ascii_lowercase as lower, ascii_uppercase as upper
if __name__ == '__main__':
mainfunc()
| 37.067797 | 111 | 0.589392 | #implement caesar's cipher
#it is well simple to implement and it uses the idea of ASCII character codes
#Note that in Python,characters are encoded in UNICODE by default
#The idea behind this encryption is to shift indvidual x-ters in a string(message) by a certain number(the key)
#White spaces are not encrypted
from string import whitespace as wsp, punctuation as punc,ascii_lowercase as lower, ascii_uppercase as upper
def encrypt(plain_text, key):
cipher = ''
for char in plain_text:
if not char.isascii():
cipher += char
elif char.islower():
cipher += lower[(lower.index(char)+key)%26]
elif char.isupper():
cipher += upper[(upper.index(char)+key)%26]
elif char in punc: #char.ispunct():
cipher = punc[(punc.index(char)+key)%26]
else: cipher += char # for whitespaces etc
return cipher
def decrypt(cipher, key):
original_text = ''
for char in cipher:
if not char.isascii():
original_text += char
if char.islower():
original_text += lower[(lower.index(char)-key)%26]
elif char.isupper():
original_text += upper[(upper.index(char)-key)%26]
elif char in punc: #char.ispunct():
original_text = punc[(punc.index(char)-key)%26]
else: original_text += char # for whitespaces etc
return original_text
def mainfunc():
while True:
choice = input("\n>>>Encrypt(E/e) or Decrypt(D/d) or Quit(Q/q)?\n").lower()[0]
if choice == 'e':
print("Enter text to encypt:")
text = input()
key = int(input('Enter an encryption key between(0 and 26): '))
print('\nThe encrypted text is {}'.format(encrypt(text, key)))
elif choice == 'd':
print("Enter cipher text to decypt:")
text = input()
key = int(input('Enter an encryption key: '))
print('\nThe encrypted text is {}'.format(decrypt(text, key)))
elif choice == 'q':
break;
else:
print('Invalid Choice !!!!\a\a\a\a')
if __name__ == '__main__':
mainfunc()
| 1,647 | 0 | 69 |
b52f9d37fe6b32f6785b7f5f9cf9060cea6cecf0 | 4,138 | py | Python | site-generator/generate.py | kleutzinger/kleutzinger.github.io | ceb3d78d918e44a0636b11cfb923a116781f6363 | [
"MIT"
] | 1 | 2016-01-11T06:50:37.000Z | 2016-01-11T06:50:37.000Z | site-generator/generate.py | kleutzinger/kleutzinger.github.io | ceb3d78d918e44a0636b11cfb923a116781f6363 | [
"MIT"
] | 1 | 2021-06-02T04:11:08.000Z | 2021-06-02T04:11:08.000Z | site-generator/generate.py | kleutzinger/kleutzinger.github.io | ceb3d78d918e44a0636b11cfb923a116781f6363 | [
"MIT"
] | null | null | null | import datetime
import time
import os
from markdown import markdown
import dominate
import sass
from dominate.tags import *
from dominate.util import raw
from prettify import html_prettify
import ingest
def gen_tags(project):
"display tags over picture when card is hovered"
tag_list = project.get("technologies", "")
if tag_list == "":
return ""
tag_list = tag_list.split(",")
LIS = "\n".join([f'<li><a href="#">{text}</a></li>' for text in tag_list])
out = f"""
<li class="tags">
<ul>
{LIS}
</ul>
</li>
"""
return out
def gen_card_html(project, is_alt_card=False):
"return raw html of a project card"
title = project.get("title", "_TITLE_")
screenshot_url = project.get("screenshot_url", "")
subtitle = gen_subtitle(project)
description = gen_description(project)
if "demo_url" in project:
demo_url = a("< Open >", href=project["demo_url"])
else:
demo_url = ""
if "repo_url" in project and project["repo_url"] not in project.get("demo_url", ""):
repo_url = a("Source Code", href=project["repo_url"])
else:
repo_url = ""
if "youtube" in project:
youtube = a("Video Demo", href=project["youtube"])
else:
youtube = ""
alt_class = "alt" * is_alt_card
hover_tags = gen_tags(project)
project_card = f"""\
<div class="blog-card {alt_class}">
<div class="meta">
<div class="photo" style="background-image: url({screenshot_url})"></div>
<ul class="details">
<li class="author"><a href="https://github.com/kleutzinger">Kevin Leutzinger</a></li>
{hover_tags}
</ul>
</div>
<div class="description">
<h1>{title}</h1>
{subtitle}
{description}
<p class="read-more">
{repo_url}
{youtube}
{demo_url}
</p>
</div>
</div>
"""
return project_card
if __name__ == "__main__":
generate_css()
doc = dominate.document(title="Portfolio - kevinleutzinger.com")
doc["lang"] = "en"
with doc.head:
link(rel="stylesheet", href="site-generator/card.css")
meta(charset="UTF-8")
meta(name="viewport", content="width=device-width,initial-scale=1")
# script(type='text/javascript', src='script.js')
print("getting all rows")
projects = ingest.get_rows()
projects.sort(reverse=True, key=order_proj)
even_idx = True
for proj in projects:
if "kl" in proj.get("omit_from", ""):
continue
htm = gen_card_html(proj, is_alt_card=even_idx)
with doc:
raw(htm)
even_idx = not even_idx
with open(os.path.join("..", "index.html"), "w") as f:
pretty_html = html_prettify(str(doc))
f.write(pretty_html)
print("regenerated index at", time.asctime(time.localtime()))
| 27.959459 | 93 | 0.594732 | import datetime
import time
import os
from markdown import markdown
import dominate
import sass
from dominate.tags import *
from dominate.util import raw
from prettify import html_prettify
import ingest
def generate_css():
with open("card.scss") as f:
uncompiled = f.read()
compiled = sass.compile(string=uncompiled)
with open("card.css", "r+") as w:
infile = w.read()
if infile != compiled:
print("recompile card.scss")
w.seek(0)
w.write(compiled)
w.truncate()
def gen_tags(project):
"display tags over picture when card is hovered"
tag_list = project.get("technologies", "")
if tag_list == "":
return ""
tag_list = tag_list.split(",")
LIS = "\n".join([f'<li><a href="#">{text}</a></li>' for text in tag_list])
out = f"""
<li class="tags">
<ul>
{LIS}
</ul>
</li>
"""
return out
def get_date(project):
if "date_created" in project:
date_slashes = project["date_created"]
month_year = datetime.datetime.strptime(date_slashes, "%Y-%m-%d").strftime(
"%b %Y"
)
date_created = month_year
else:
date_created = ""
return date_created
def gen_description(project):
description_text = project.get("web_description", "")
description = raw(markdown(description_text))
return description
def gen_subtitle(project):
sub_text = project.get("medium", "")
sub_date = get_date(project)
if sub_date:
sub_date = "<br>" + sub_date
templ = f"<h2>{sub_text}{sub_date}</h2>"
return templ
def gen_card_html(project, is_alt_card=False):
"return raw html of a project card"
title = project.get("title", "_TITLE_")
screenshot_url = project.get("screenshot_url", "")
subtitle = gen_subtitle(project)
description = gen_description(project)
if "demo_url" in project:
demo_url = a("< Open >", href=project["demo_url"])
else:
demo_url = ""
if "repo_url" in project and project["repo_url"] not in project.get("demo_url", ""):
repo_url = a("Source Code", href=project["repo_url"])
else:
repo_url = ""
if "youtube" in project:
youtube = a("Video Demo", href=project["youtube"])
else:
youtube = ""
alt_class = "alt" * is_alt_card
hover_tags = gen_tags(project)
project_card = f"""\
<div class="blog-card {alt_class}">
<div class="meta">
<div class="photo" style="background-image: url({screenshot_url})"></div>
<ul class="details">
<li class="author"><a href="https://github.com/kleutzinger">Kevin Leutzinger</a></li>
{hover_tags}
</ul>
</div>
<div class="description">
<h1>{title}</h1>
{subtitle}
{description}
<p class="read-more">
{repo_url}
{youtube}
{demo_url}
</p>
</div>
</div>
"""
return project_card
if __name__ == "__main__":
generate_css()
doc = dominate.document(title="Portfolio - kevinleutzinger.com")
doc["lang"] = "en"
with doc.head:
link(rel="stylesheet", href="site-generator/card.css")
meta(charset="UTF-8")
meta(name="viewport", content="width=device-width,initial-scale=1")
# script(type='text/javascript', src='script.js')
print("getting all rows")
projects = ingest.get_rows()
def order_proj(proj):
star_rating = int(proj.get("star_rating", 0))
shine_rating = int(proj.get("shine_rating", 0))
return (shine_rating + star_rating) / 2
projects.sort(reverse=True, key=order_proj)
even_idx = True
for proj in projects:
if "kl" in proj.get("omit_from", ""):
continue
htm = gen_card_html(proj, is_alt_card=even_idx)
with doc:
raw(htm)
even_idx = not even_idx
with open(os.path.join("..", "index.html"), "w") as f:
pretty_html = html_prettify(str(doc))
f.write(pretty_html)
print("regenerated index at", time.asctime(time.localtime()))
| 1,135 | 0 | 119 |
92e4a26b450f124c1a796965f61aeab928387856 | 7,427 | py | Python | tests/core/data/test_transforms.py | Site-Command/lightning-flash | bfff08ded9cf193cce1cd16e7034d8005de172ae | [
"Apache-2.0"
] | 1 | 2021-06-01T09:59:03.000Z | 2021-06-01T09:59:03.000Z | tests/core/data/test_transforms.py | Site-Command/lightning-flash | bfff08ded9cf193cce1cd16e7034d8005de172ae | [
"Apache-2.0"
] | null | null | null | tests/core/data/test_transforms.py | Site-Command/lightning-flash | bfff08ded9cf193cce1cd16e7034d8005de172ae | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
import pytest
import torch
from torch import nn
from flash.core.data.data_source import DefaultDataKeys
from flash.core.data.transforms import ApplyToKeys, kornia_collate, KorniaParallelTransforms, merge_transforms
from flash.core.data.utils import convert_to_modules
@pytest.mark.parametrize("with_params", [True, False])
_MOCK_TRANSFORM = Mock()
@pytest.mark.parametrize(
"base_transforms, additional_transforms, expected_result",
[
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
)
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
),
"post_tensor_transform": _MOCK_TRANSFORM
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
),
"post_tensor_transform": _MOCK_TRANSFORM
},
),
],
)
| 34.705607 | 110 | 0.569678 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
import pytest
import torch
from torch import nn
from flash.core.data.data_source import DefaultDataKeys
from flash.core.data.transforms import ApplyToKeys, kornia_collate, KorniaParallelTransforms, merge_transforms
from flash.core.data.utils import convert_to_modules
class TestApplyToKeys:
@pytest.mark.parametrize(
"sample, keys, expected", [
({
DefaultDataKeys.INPUT: "test"
}, DefaultDataKeys.INPUT, "test"),
(
{
DefaultDataKeys.INPUT: "test_a",
DefaultDataKeys.TARGET: "test_b"
},
[DefaultDataKeys.INPUT, DefaultDataKeys.TARGET],
["test_a", "test_b"],
),
({
"input": "test"
}, "input", "test"),
({
"input": "test_a",
"target": "test_b"
}, ["input", "target"], ["test_a", "test_b"]),
({
"input": "test_a",
"target": "test_b",
"extra": "..."
}, ["input", "target"], ["test_a", "test_b"]),
({
"input": "test_a",
"target": "test_b"
}, ["input", "target", "extra"], ["test_a", "test_b"]),
({
"target": "..."
}, "input", None),
]
)
def test_forward(self, sample, keys, expected):
transform = Mock(return_value=["out"] * len(keys))
ApplyToKeys(keys, transform)(sample)
if expected is not None:
transform.assert_called_once_with(expected)
else:
transform.assert_not_called()
@pytest.mark.parametrize(
"transform, expected", [
(
ApplyToKeys(DefaultDataKeys.INPUT, torch.nn.ReLU()),
"ApplyToKeys(keys=<DefaultDataKeys.INPUT: 'input'>, transform=ReLU())",
),
(
ApplyToKeys([DefaultDataKeys.INPUT, DefaultDataKeys.TARGET], torch.nn.ReLU()),
"ApplyToKeys(keys=[<DefaultDataKeys.INPUT: 'input'>, "
"<DefaultDataKeys.TARGET: 'target'>], transform=ReLU())",
),
(ApplyToKeys("input", torch.nn.ReLU()), "ApplyToKeys(keys='input', transform=ReLU())"),
(
ApplyToKeys(["input", "target"], torch.nn.ReLU()),
"ApplyToKeys(keys=['input', 'target'], transform=ReLU())",
),
]
)
def test_repr(self, transform, expected):
assert repr(transform) == expected
@pytest.mark.parametrize("with_params", [True, False])
def test_kornia_parallel_transforms(with_params):
samples = [torch.rand(1, 3, 10, 10), torch.rand(1, 3, 10, 10)]
transformed_sample = torch.rand(1, 3, 10, 10)
transform_a = Mock(spec=torch.nn.Module, return_value=transformed_sample)
transform_b = Mock(spec=torch.nn.Module)
if with_params:
transform_a._params = "test"
parallel_transforms = KorniaParallelTransforms(transform_a, transform_b)
parallel_transforms(samples)
assert transform_a.call_count == 2
assert transform_b.call_count == 2
if with_params:
assert transform_a.call_args_list[0][0][1] == transform_a.call_args_list[1][0][1] == "test"
assert torch.allclose(transform_a.call_args_list[0][0][0], samples[0])
assert torch.allclose(transform_a.call_args_list[1][0][0], samples[1])
assert torch.allclose(transform_b.call_args_list[0][0][0], transformed_sample)
assert torch.allclose(transform_b.call_args_list[1][0][0], transformed_sample)
def test_kornia_collate():
samples = [
{
DefaultDataKeys.INPUT: torch.zeros(1, 3, 10, 10),
DefaultDataKeys.TARGET: 1
},
{
DefaultDataKeys.INPUT: torch.zeros(1, 3, 10, 10),
DefaultDataKeys.TARGET: 2
},
{
DefaultDataKeys.INPUT: torch.zeros(1, 3, 10, 10),
DefaultDataKeys.TARGET: 3
},
]
result = kornia_collate(samples)
assert torch.all(result[DefaultDataKeys.TARGET] == torch.tensor([1, 2, 3]))
assert list(result[DefaultDataKeys.INPUT].shape) == [3, 3, 10, 10]
assert torch.allclose(result[DefaultDataKeys.INPUT], torch.zeros(1))
_MOCK_TRANSFORM = Mock()
@pytest.mark.parametrize(
"base_transforms, additional_transforms, expected_result",
[
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
)
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
),
"post_tensor_transform": _MOCK_TRANSFORM
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
),
"post_tensor_transform": _MOCK_TRANSFORM
},
),
],
)
def test_merge_transforms(base_transforms, additional_transforms, expected_result):
result = merge_transforms(base_transforms, additional_transforms)
assert result.keys() == expected_result.keys()
for key in result.keys():
if result[key] == _MOCK_TRANSFORM:
assert expected_result[key] == _MOCK_TRANSFORM
elif isinstance(result[key], nn.Sequential):
assert isinstance(expected_result[key], nn.Sequential)
assert len(result[key]) == len(expected_result[key])
for module, expected_module in zip(result[key], expected_result[key]):
assert module.func == expected_module.func
| 2,588 | 1,956 | 90 |
b46e8e2aacdd145262f76dc3ea2ddc2a0d8354ca | 873 | bzl | Python | rules.bzl | DataDog/bazel-mypy-integration | b35f54dcb68db4edc98826c2dc831665e566fc5c | [
"MIT"
] | 59 | 2019-12-02T11:06:55.000Z | 2022-01-23T00:39:20.000Z | rules.bzl | DataDog/bazel-mypy-integration | b35f54dcb68db4edc98826c2dc831665e566fc5c | [
"MIT"
] | 38 | 2019-12-08T05:51:32.000Z | 2022-03-28T21:24:19.000Z | rules.bzl | DataDog/bazel-mypy-integration | b35f54dcb68db4edc98826c2dc831665e566fc5c | [
"MIT"
] | 28 | 2020-02-11T02:28:17.000Z | 2022-03-31T02:34:21.000Z | MyPyStubsInfo = provider(
fields = {
"srcs": ".pyi stub files",
},
)
mypy_stubs = rule(
implementation = _mypy_stubs_impl,
attrs = {
"srcs": attr.label_list(
allow_empty = False,
mandatory = True,
doc = "TODO(Jonathon)",
allow_files = [".pyi"],
),
},
)
| 23.594595 | 60 | 0.539519 | MyPyStubsInfo = provider(
fields = {
"srcs": ".pyi stub files",
},
)
def _mypy_stubs_impl(ctx):
pyi_srcs = []
for target in ctx.attr.srcs:
pyi_srcs.extend(target.files.to_list())
transitive_srcs = depset(direct = pyi_srcs)
return [
MyPyStubsInfo(
srcs = ctx.attr.srcs,
),
PyInfo(
# TODO(Jonathon): Stub files only for Py3 right?
has_py2_only_sources = False,
has_py3_only_sources = True,
uses_shared_libraries = False,
transitive_sources = transitive_srcs,
),
]
mypy_stubs = rule(
implementation = _mypy_stubs_impl,
attrs = {
"srcs": attr.label_list(
allow_empty = False,
mandatory = True,
doc = "TODO(Jonathon)",
allow_files = [".pyi"],
),
},
)
| 504 | 0 | 23 |
9a554f3d5fecf47993aafe33bd1584e1008ea14f | 4,491 | py | Python | synch/convert.py | luolin0313/synch | 1a4a1262c20a85fe06f2cb40291f0a066572518b | [
"Apache-2.0"
] | null | null | null | synch/convert.py | luolin0313/synch | 1a4a1262c20a85fe06f2cb40291f0a066572518b | [
"Apache-2.0"
] | null | null | null | synch/convert.py | luolin0313/synch | 1a4a1262c20a85fe06f2cb40291f0a066572518b | [
"Apache-2.0"
] | 1 | 2020-09-28T01:37:00.000Z | 2020-09-28T01:37:00.000Z | import sqlparse
from sqlparse.sql import Function, Identifier
from sqlparse.sql import Token as SQLToken
from sqlparse.sql import TokenList
from sqlparse.tokens import Keyword, Token, Whitespace
| 43.182692 | 100 | 0.515253 | import sqlparse
from sqlparse.sql import Function, Identifier
from sqlparse.sql import Token as SQLToken
from sqlparse.sql import TokenList
from sqlparse.tokens import Keyword, Token, Whitespace
class SqlConvert:
_type_mapping = {
"date": "Date",
"datetime": "DateTime",
"bool": "UInt8",
"float": "Float32",
"double": "Float64",
"varchar": "String",
"decimal": "Decimal{digits}",
"tinyint": "Int8",
"int": "Int32",
"smallint": "Int16",
"mediumint": "Int32",
"bigint": "Int64",
"timestamp": "DateTime",
"char": "FixedString",
"bigchar": "String",
}
@classmethod
def _add_token(cls, schema, parsed, tokens, token_list):
for i, token in enumerate(tokens):
if token.value == "add" and parsed.token_next(i)[1].value != "column":
token_list.tokens.append(token)
token_list.tokens.append(SQLToken(Whitespace, " "))
token_list.tokens.append(SQLToken(Keyword, "column"))
elif token.value == "change":
token_list.tokens.append(SQLToken(Keyword, "rename"))
token_list.tokens.append(SQLToken(Whitespace, " "))
token_list.tokens.append(SQLToken(Keyword, "column"))
token_list.tokens.append(SQLToken(Whitespace, " "))
tokens = parsed.token_next(i)[1].tokens
token_list.tokens.append(tokens[0])
token_list.tokens.append(SQLToken(Whitespace, " "))
token_list.tokens.append(SQLToken(Keyword, "to"))
token_list.tokens.append(SQLToken(Whitespace, " "))
token_list.tokens.append(tokens[2])
return token_list
elif token.value == "modify":
token_list.tokens.append(token)
token_list.tokens.append(SQLToken(Whitespace, " "))
token_list.tokens.append(SQLToken(Keyword, "column"))
elif isinstance(token, (Function, Identifier)) or token.ttype == Token.Name.Builtin:
if token.ttype == Token.Name.Builtin:
token_list.tokens.append(SQLToken(Keyword, cls._type_mapping.get(token.value)))
elif isinstance(token, Identifier):
if parsed.token_prev(i - 1)[1].value == "table":
value = token.value
table = (
f"{schema}.{token.value}" if len(value.split(".")) == 1 else token.value
)
token_list.tokens.append(SQLToken(Keyword, table))
elif len(token.tokens) == 1:
real_token = cls._type_mapping.get(token.value)
token_list.tokens.append(
SQLToken(Keyword, real_token) if real_token else token
)
else:
cls._add_token(schema, parsed, token.tokens, token_list)
else:
len_token = len(token.tokens)
if len_token == 3:
identifier, _, digits = token.tokens
else:
identifier, digits = token.tokens
if identifier.value == "decimal":
token_list.tokens.append(
SQLToken(
Keyword,
cls._type_mapping.get(identifier.value).format(digits=digits),
)
)
else:
token_list.tokens.append(
SQLToken(Keyword, cls._type_mapping.get(identifier.value))
)
elif token.value in ["null", "not null"]:
continue
elif token.ttype == Whitespace and i > 0 and token_list.tokens[-1].ttype == Whitespace:
continue
else:
token_list.tokens.append(token)
return token_list
@classmethod
def to_clickhouse(cls, schema: str, query: str):
"""
parse ddl query
:param schema:
:param query:
:return:
"""
token_list = TokenList()
parsed = sqlparse.parse(query)[0]
token_list = cls._add_token(schema, parsed, parsed.tokens, token_list)
return str(token_list)
| 3,403 | 869 | 23 |
4c3c26fd6d7f8c7c96dd471cbbf5a45a7d7e4c61 | 60,553 | py | Python | cogs/clan_battle.py | hibibol/clanbattle-management | 89a8041ed26e5fc1ca117534737516e58fd77583 | [
"MIT"
] | 1 | 2021-06-07T21:11:05.000Z | 2021-06-07T21:11:05.000Z | cogs/clan_battle.py | hibibol/clanbattle-management | 89a8041ed26e5fc1ca117534737516e58fd77583 | [
"MIT"
] | 4 | 2021-07-05T12:58:30.000Z | 2021-08-06T11:18:11.000Z | cogs/clan_battle.py | hibibol/clanbattle-management | 89a8041ed26e5fc1ca117534737516e58fd77583 | [
"MIT"
] | null | null | null | import asyncio
from collections import defaultdict
from datetime import datetime, timedelta
from functools import reduce
from logging import getLogger
from typing import List, Optional, Tuple
from operator import sub
import discord
from discord import colour
from discord.channel import TextChannel
from discord.errors import Forbidden, HTTPException
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
from discord_slash.model import SlashCommandOptionType
from discord_slash.utils.manage_commands import create_choice, create_option
from cogs.cbutil.attack_type import ATTACK_TYPE_DICT, AttackType
from cogs.cbutil.boss_status_data import AttackStatus
from cogs.cbutil.clan_battle_data import ClanBattleData, update_clanbattledata
from cogs.cbutil.clan_data import ClanData
from cogs.cbutil.form_data import create_form_data
from cogs.cbutil.gss import get_sheet_values, get_worksheet_list
from cogs.cbutil.log_data import LogData
from cogs.cbutil.operation_type import (OPERATION_TYPE_DESCRIPTION_DICT,
OperationType)
from cogs.cbutil.player_data import CarryOver, PlayerData
from cogs.cbutil.reserve_data import ReserveData
from cogs.cbutil.sqlite_util import SQLiteUtil
from cogs.cbutil.util import calc_carry_over_time, get_damage, select_from_list
from setting import (BOSS_COLOURS, EMOJI_ATTACK, EMOJI_CANCEL, EMOJI_CARRYOVER,
EMOJI_LAST_ATTACK, EMOJI_MAGIC, EMOJI_NO, EMOJI_PHYSICS,
EMOJI_REVERSE, EMOJI_SETTING, EMOJI_TASK_KILL, EMOJI_YES,
GUILD_IDS, JST, TREASURE_CHEST)
logger = getLogger(__name__)
| 42.854211 | 156 | 0.623817 | import asyncio
from collections import defaultdict
from datetime import datetime, timedelta
from functools import reduce
from logging import getLogger
from typing import List, Optional, Tuple
from operator import sub
import discord
from discord import colour
from discord.channel import TextChannel
from discord.errors import Forbidden, HTTPException
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
from discord_slash.model import SlashCommandOptionType
from discord_slash.utils.manage_commands import create_choice, create_option
from cogs.cbutil.attack_type import ATTACK_TYPE_DICT, AttackType
from cogs.cbutil.boss_status_data import AttackStatus
from cogs.cbutil.clan_battle_data import ClanBattleData, update_clanbattledata
from cogs.cbutil.clan_data import ClanData
from cogs.cbutil.form_data import create_form_data
from cogs.cbutil.gss import get_sheet_values, get_worksheet_list
from cogs.cbutil.log_data import LogData
from cogs.cbutil.operation_type import (OPERATION_TYPE_DESCRIPTION_DICT,
OperationType)
from cogs.cbutil.player_data import CarryOver, PlayerData
from cogs.cbutil.reserve_data import ReserveData
from cogs.cbutil.sqlite_util import SQLiteUtil
from cogs.cbutil.util import calc_carry_over_time, get_damage, select_from_list
from setting import (BOSS_COLOURS, EMOJI_ATTACK, EMOJI_CANCEL, EMOJI_CARRYOVER,
EMOJI_LAST_ATTACK, EMOJI_MAGIC, EMOJI_NO, EMOJI_PHYSICS,
EMOJI_REVERSE, EMOJI_SETTING, EMOJI_TASK_KILL, EMOJI_YES,
GUILD_IDS, JST, TREASURE_CHEST)
logger = getLogger(__name__)
class ClanBattle(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.ready = False
@commands.Cog.listener()
async def on_ready(self):
logger.info("loading ClanBattle data...")
asyncio.create_task(update_clanbattledata())
# bossデータの読み込みが完了するまで待つ
while not ClanBattleData.boudaries:
await asyncio.sleep(1)
self.clan_data: defaultdict[int, Optional[ClanData]] = SQLiteUtil.load_clandata_dict()
self.clan_battle_data = ClanBattleData()
self.ready = True
logger.info("ClanBattle Management Ready!")
@cog_ext.cog_slash(
description="凸管理するメンバーを追加します。オプションがない場合、コマンドを実行した人が追加されます。",
options=[
create_option(
name="role",
description="追加したいロール(ロールがついているメンバーをまとめて追加できます)",
option_type=SlashCommandOptionType.ROLE,
required=False
),
create_option(
name="member",
description="追加したいメンバー",
option_type=SlashCommandOptionType.USER,
required=False
)
],
guild_ids=GUILD_IDS
)
async def add(self, ctx: SlashContext, role: Optional[discord.Role] = None, member: Optional[discord.User] = None) -> None:
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
player_data_list: List[PlayerData] = []
if role is None and member is None:
player_data = PlayerData(ctx.author_id)
clan_data.player_data_dict[ctx.author_id] = player_data
player_data_list.append(player_data)
if member is not None:
player_data = PlayerData(member.id)
clan_data.player_data_dict[member.id] = player_data
player_data_list.append(player_data)
if role is not None:
for member in role.members:
player_data = PlayerData(member.id)
clan_data.player_data_dict[member.id] = PlayerData(member.id)
player_data_list.append(player_data)
await ctx.send(f"{len(player_data_list)}名追加します。")
await self._update_remain_attack_message(clan_data)
if player_data_list:
SQLiteUtil.register_playerdata(clan_data, player_data_list)
@cog_ext.cog_slash(
description="凸管理するメンバーを削除します。オプションがない場合、コマンドを実行した人が削除されます。",
options=[
create_option(
name="member",
description="削除したいメンバー",
option_type=SlashCommandOptionType.USER,
required=False
),
create_option(
name="all",
description="全てのメンバーを削除します。",
option_type=SlashCommandOptionType.BOOLEAN,
required=False
)
],
guild_ids=GUILD_IDS
)
async def remove(self, ctx: SlashContext, member: Optional[discord.User] = None, all: Optional[bool] = None):
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
player_data_list: List[PlayerData] = []
if member is None and all is None:
if player_data := clan_data.player_data_dict.get(ctx.author.id):
player_data_list.append(player_data)
else:
return await ctx.send(f"{ctx.author.display_name}さんは凸管理対象ではありません。")
if member:
if player_data := clan_data.player_data_dict.get(member.id):
player_data_list.append(player_data)
else:
return await ctx.send(f"{member.display_name}さんは凸管理対象ではありません。")
if all:
player_data_list += list(clan_data.player_data_dict.values())
await ctx.send(f"{len(player_data_list)}名のデータを削除します。")
for player_data in player_data_list:
for i in range(5):
clan_data.reserve_list[i] = [
reserve_data for reserve_data in clan_data.reserve_list[i]
if reserve_data.player_data.user_id != player_data.user_id]
SQLiteUtil.delete_playerdata(clan_data, player_data)
del clan_data.player_data_dict[player_data.user_id]
await self._update_remain_attack_message(clan_data)
await ctx.channel.send("削除が完了しました。")
@cog_ext.cog_slash(
description="凸管理のセットアップを実施します。",
options=[
create_option(
name="category_channel_name",
description="凸管理を行うカテゴリーチャンネルの名前",
option_type=SlashCommandOptionType.STRING,
required=False
)
],
guild_ids=GUILD_IDS
)
async def setup(self, ctx: SlashContext, category_channel_name: str = ""):
"""凸管理用チャンネルを作成するセットアップを実施する"""
await ctx.send("チャンネルのセットアップを実施します")
if not category_channel_name:
category_channel_name = "凸管理"
try:
category = await ctx.guild.create_category(category_channel_name)
summary_channel = await category.create_text_channel("まとめ")
boss_channels: List[TextChannel] = []
for i in range(5):
boss_channel = await category.create_text_channel(f"ボス{i+1}")
boss_channels.append(boss_channel)
remain_attack_channel = await category.create_text_channel("残凸把握板")
reserve_channel = await category.create_text_channel("凸ルート共有板")
command_channel = await category.create_text_channel("コマンド入力板")
except Forbidden:
await ctx.send("チャンネル作成の権限を付与してください。")
return
except HTTPException as e:
await ctx.send(f"チャンネルの作成に失敗しました\n```\n{e.response}\n```")
return
clan_data = ClanData(
ctx.guild_id,
category.id,
[boss_channel.id for boss_channel in boss_channels],
remain_attack_channel.id,
reserve_channel.id,
command_channel.id,
summary_channel.id
)
logger.info(f"New ClanData is created: guild={ctx.guild.name}")
self.clan_data[category.id] = clan_data
await self._initialize_progress_messages(clan_data, 1)
await self._initialize_reserve_message(clan_data)
await self._initialize_remain_attack_message(clan_data)
SQLiteUtil.register_clandata(clan_data)
await ctx.channel.send("セットアップが完了しました")
@cog_ext.cog_slash(
description="周回数を変更します",
options=[
create_option(
name="lap",
description="周回数",
option_type=SlashCommandOptionType.INTEGER,
required=True
)
],
guild_ids=GUILD_IDS
)
async def lap(self, ctx: SlashContext, lap: int):
"""周回数を設定する"""
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
await ctx.send(content=f"周回数を{lap}に設定します")
# ボス状態に関するすべてのデータを削除する
clan_data.initialize_progress_data()
SQLiteUtil.delete_old_data(clan_data, 999)
await self._initialize_progress_messages(clan_data, lap)
await self._update_remain_attack_message(clan_data)
SQLiteUtil.update_clandata(clan_data)
@cog_ext.cog_slash(
description="ボスに凸宣言した時の処理を実施します",
guild_ids=GUILD_IDS,
options=[
create_option(
name="member",
description="処理対象のメンバー(メンションで指定)",
option_type=SlashCommandOptionType.USER,
required=True
),
create_option(
name="attack_type",
description="凸方法を指定します。",
option_type=SlashCommandOptionType.STRING,
required=True,
choices=[
create_choice(
name=f"{EMOJI_PHYSICS} 物理凸",
value=EMOJI_PHYSICS,
),
create_choice(
name=f"{EMOJI_MAGIC} 魔法凸",
value=EMOJI_MAGIC
),
create_choice(
name=f"{EMOJI_CARRYOVER} 持ち越し凸",
value=EMOJI_CARRYOVER
)
]
),
create_option(
name="lap",
description="周回数 (指定がない場合は今現在のボスが指定されます)",
option_type=SlashCommandOptionType.INTEGER,
required=False
),
create_option(
name="boss_number",
description="ボス番号 (各ボスの進行用チャンネルで実行する場合は指定する必要がありません)",
option_type=SlashCommandOptionType.INTEGER,
required=False
)
]
)
async def attack_declare(self, ctx: SlashContext, member: discord.User, attack_type: str, lap: Optional[int] = None, boss_number: Optional[int] = None):
"""コマンドで凸宣言を実施した時の処理を行う"""
checked = await self.check_command_arguments(ctx, member, lap, boss_number)
if not checked:
return
clan_data, player_data, lap, boss_index = checked
attack_type_v = ATTACK_TYPE_DICT.get(attack_type)
if attack_type_v is AttackType.CARRYOVER and not player_data.carry_over_list:
return ctx.send("持ち越しを所持していません。凸宣言をキャンセルします。")
await ctx.send(content=f"{member.display_name}の凸を{attack_type_v.value}で{lap}周目{boss_index+1}ボスに宣言します")
await self._attack_declare(clan_data, player_data, attack_type_v, lap, boss_index)
@cog_ext.cog_slash(
description="ボスに凸した時の処理を実施します。",
guild_ids=GUILD_IDS,
options=[
create_option(
name="member",
description="処理対象のメンバー(メンションで指定)",
option_type=SlashCommandOptionType.USER,
required=True
),
create_option(
name="lap",
description="周回数 (指定がない場合は今現在のボスが指定されます)",
option_type=SlashCommandOptionType.INTEGER,
required=False
),
create_option(
name="boss_number",
description="ボス番号 (各ボスの進行用チャンネルで実行する場合は指定する必要がありません)",
option_type=SlashCommandOptionType.INTEGER,
required=False
),
create_option(
name="damage",
description="与えたダメージ",
option_type=SlashCommandOptionType.INTEGER,
required=False
)
]
)
async def attack_fin(
self, ctx: SlashContext,
member: discord.User,
lap: Optional[int] = None,
boss_number: Optional[int] = None,
damage: Optional[int] = None
):
"""ボスに凸した時の処理を実施する"""
cheked = await self.check_command_arguments(ctx, member, lap, boss_number)
if not cheked:
return
clan_data, player_data, lap, boss_index = cheked
await ctx.send(content=f"{member.display_name}の凸を{lap}周目{boss_index+1}ボスに消化します")
boss_status_data = clan_data.boss_status_data[lap][boss_index]
attack_status_index = boss_status_data.get_attack_status_index(player_data, False)
if attack_status_index is None:
return await ctx.send("凸宣言がされていません。処理を中断します。")
attack_status = boss_status_data.attack_players[attack_status_index]
if damage:
attack_status.damage = damage
await self._attack_boss(attack_status, clan_data, lap, boss_index, ctx.channel, ctx.author)
# return ctx.channel.send("処理が完了しました。")
@cog_ext.cog_slash(
description="ボスを討伐した時の処理を実施します。",
guild_ids=GUILD_IDS,
options=[
create_option(
name="member",
description="処理対象のメンバー(メンションで指定)",
option_type=SlashCommandOptionType.USER,
required=True
),
create_option(
name="lap",
description="周回数 (指定がない場合は今現在のボスが指定されます)",
option_type=SlashCommandOptionType.INTEGER,
required=False
),
create_option(
name="boss_number",
description="ボス番号 (各ボスの進行用チャンネルで実行する場合は指定する必要がありません)",
option_type=SlashCommandOptionType.INTEGER,
required=False
)
]
)
async def defeat_boss(
self, ctx: SlashContext,
member: discord.User,
lap: Optional[int] = None,
boss_number: Optional[int] = None
):
"""コマンドからボスを討伐した時の処理を実施する。"""
checked = await self.check_command_arguments(ctx, member, lap, boss_number)
if not checked:
return
clan_data, player_data, lap, boss_index = checked
await ctx.send(content=f"{member.display_name}の凸で{boss_index+1}ボスを討伐します")
boss_status_data = clan_data.boss_status_data[lap][boss_index]
attack_status_index = boss_status_data.get_attack_status_index(player_data, False)
if attack_status_index is None:
return await ctx.send("凸宣言がされていません。処理を中断します。")
attack_status = boss_status_data.attack_players[attack_status_index]
await self._last_attack_boss(
attack_status=attack_status,
clan_data=clan_data,
lap=lap,
boss_index=boss_index,
channel=ctx.channel,
user=ctx.author
)
# return ctx.channel.send("処理が完了しました。")
@cog_ext.cog_slash(
description="元に戻す処理を実施します。",
guild_ids=GUILD_IDS,
options=[
create_option(
name="member",
description="処理対象のメンバー(メンションで指定)",
option_type=SlashCommandOptionType.USER,
required=True
)
]
)
async def undo(self, ctx: SlashContext, member: discord.User):
"""コマンドでもとに戻すときの処理を実施する"""
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
player_data = clan_data.player_data_dict.get(member.id)
if not player_data:
return await ctx.send(f"{member.display_name}さんは凸管理のメンバーに指定されていません。")
if not player_data.log:
return await ctx.send("元に戻す内容がありませんでした")
log_data = player_data.log[-1]
await ctx.send(
f"{member.display_name}の{log_data.boss_index+1}ボスに対する"
f"`{OPERATION_TYPE_DESCRIPTION_DICT[log_data.operation_type]}`を元に戻します。")
await self._undo(clan_data, player_data, log_data)
@cog_ext.cog_slash(
name="resend",
description="進行用のメッセージを再送します。",
guild_ids=GUILD_IDS,
options=[
create_option(
name="lap",
description="周回数 (指定がない場合は今現在のボスが指定されます)",
option_type=SlashCommandOptionType.INTEGER,
required=False
),
create_option(
name="boss_number",
description="ボス番号 (各ボスの進行用チャンネルで実行する場合は指定する必要がありません)",
option_type=SlashCommandOptionType.INTEGER,
required=False
)
]
)
async def resend_progress_message(
self, ctx: SlashContext,
lap: Optional[int] = None,
boss_number: Optional[int] = None
):
checked = await self.check_command_arguments(ctx, None, lap, boss_number)
if not checked:
return
clan_data, _, lap, boss_index = checked
await ctx.send(f"{lap}週目{boss_index+1}の進行用メッセージを再送します")
await self._delete_progress_message(clan_data, lap, boss_index)
await self._send_new_progress_message(clan_data, lap, boss_index)
@cog_ext.cog_slash(
description="持越時間を登録します。",
guild_ids=GUILD_IDS,
options=[
create_option(
name="time",
description="持越秒数",
option_type=SlashCommandOptionType.INTEGER,
required=True
)
]
)
async def set_cot(self, ctx: SlashContext, time: int):
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
if player_data := clan_data.player_data_dict.get(ctx.author.id):
if not player_data.carry_over_list:
return await ctx.send("持ち越しを持っていません。")
co_index = 0
await ctx.send(f"持ち越し時間{time}秒を設定します。")
if len(player_data.carry_over_list) > 1:
co_index = await select_from_list(
self.bot, ctx.channel, ctx.author, player_data.carry_over_list,
f"{ctx.author.mention} 持ち越しが二つ以上発生しています。以下から持ち越し時間を登録したい持ち越しを選択してください")
player_data.carry_over_list[co_index].carry_over_time = time
await self._update_remain_attack_message(clan_data)
await ctx.channel.send("持ち越し時間の設定が完了しました。")
else:
return await ctx.send(f"{ctx.author.display_name}さんは凸管理対象ではありません。")
@cog_ext.cog_slash(
description="日程調査用のアンケートフォームを表示します。",
guild_ids=GUILD_IDS,
)
async def form(self, ctx: SlashContext):
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
if clan_data.form_data.check_update():
await ctx.send(content="アンケートフォームを新規作成しています。")
new_flag = True if len(clan_data.form_data.form_url) == 0 else False
async with ctx.channel.typing():
title = f"{datetime.now(JST).month}月 " + ctx.guild.name + " 日程調査"
form_data_dict = await create_form_data(title)
clan_data.form_data.set_from_form_data_dict(form_data_dict)
# ctx.sendが使えなくなるので冗長だけど分ける。
form_url = clan_data.form_data.create_form_url(ctx.author.display_name, ctx.author.id)
await ctx.channel.send(f"{ctx.author.display_name} さん専用のURLです。\n{form_url}")
if new_flag:
SQLiteUtil.register_form_data(clan_data)
else:
SQLiteUtil.update_form_data(clan_data)
else:
form_url = clan_data.form_data.create_form_url(ctx.author.display_name, ctx.author.id)
await ctx.send(f"{ctx.author.display_name} さん専用のURLです。\n{form_url}")
@cog_ext.cog_slash(
description="参戦時間を読み込みます。(手動更新用)",
guild_ids=GUILD_IDS,
options=[
create_option(
name="day",
description="何日目のデータを読み込むかを指定する",
option_type=SlashCommandOptionType.INTEGER,
required=True
)
]
)
async def load_time(self, ctx: SlashContext, day: int):
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
if not clan_data.form_data.form_url:
return await ctx.send("日程調査用のアンケートフォームが作成されていません。")
if day < 0 or day > 5:
return await ctx.send(content="1から5までの数字を指定してください")
await ctx.send(f"{day}日目の参戦時間を読み込みます")
await self._load_gss_data(clan_data, day)
return await ctx.channel.send("読み込みが完了しました")
@cog_ext.cog_slash(
description="日程調査の回答シートを出力します",
guild_ids=GUILD_IDS
)
async def form_sheet(self, ctx: SlashContext):
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
if not clan_data.form_data.form_url:
return await ctx.send("日程調査用のアンケートフォームが作成されていません。")
return await ctx.send(clan_data.form_data.sheet_url)
async def _undo(self, clan_data: ClanData, player_data: PlayerData, log_data: LogData):
"""元に戻す処理を実施する。"""
boss_index = log_data.boss_index
log_type = log_data.operation_type
boss_status_data = clan_data.boss_status_data[log_data.lap][boss_index]
if log_type is OperationType.ATTACK_DECLAR:
if (attack_index := boss_status_data.get_attack_status_index(player_data, False)) is not None:
attack_status = boss_status_data.attack_players[attack_index]
SQLiteUtil.delete_attackstatus(
clan_data=clan_data, lap=log_data.lap, boss_index=boss_index, attack_status=attack_status)
del boss_status_data.attack_players[attack_index]
del player_data.log[-1]
await self._update_progress_message(clan_data, log_data.lap, boss_index)
if log_type is OperationType.ATTACK or log_type is OperationType.LAST_ATTACK:
if (attack_index := boss_status_data.get_attack_status_index(player_data, True)) is not None:
attack_status = boss_status_data.attack_players[attack_index]
player_data.from_dict(log_data.player_data)
attack_status.attacked = False
SQLiteUtil.reverse_attackstatus(clan_data, log_data.lap, boss_index, attack_status)
if log_type is OperationType.LAST_ATTACK:
boss_status_data.beated = log_data.beated
SQLiteUtil.update_boss_status_data(clan_data, boss_index, boss_status_data)
del player_data.log[-1]
await self._update_progress_message(clan_data, log_data.lap, boss_index)
await self._update_remain_attack_message(clan_data)
SQLiteUtil.update_playerdata(clan_data, player_data)
SQLiteUtil.reregister_carryover_data(clan_data, player_data)
async def _delete_reserve_by_attack(self, clan_data: ClanData, attack_status: AttackStatus, boss_idx: int):
"""ボス攻撃時に予約の削除を行う"""
reserve_idx = -1
for i, reserve_data in enumerate(clan_data.reserve_list[boss_idx]):
if reserve_data.carry_over == attack_status.carry_over and reserve_data.attack_type == attack_status.attack_type\
and reserve_data.player_data == attack_status.player_data:
reserve_idx = i
if reserve_idx != -1:
SQLiteUtil.delete_reservedata(clan_data, boss_idx, clan_data.reserve_list[boss_idx][reserve_idx])
del clan_data.reserve_list[boss_idx][reserve_idx]
await self._update_reserve_message(clan_data, boss_idx)
# 凸が完了もしくは持ち越しを吐ききったらそれらに関する予約を削除する
player_data = attack_status.player_data
attack_comp = player_data.magic_attack + player_data.physics_attack == 3
co_comp = len(player_data.carry_over_list) == 0
if attack_comp or co_comp:
for i in range(5):
old_reserve_set = set(clan_data.reserve_list[i])
finished_reserve_set = {
reserve_data
for reserve_data in clan_data.reserve_list[i]
if (attack_comp and reserve_data.player_data.user_id == player_data.user_id and not reserve_data.carry_over) or (
co_comp and reserve_data.player_data.user_id == player_data.user_id and reserve_data.carry_over)
}
diff_set = old_reserve_set - finished_reserve_set
if finished_reserve_set:
for reserve_data in finished_reserve_set:
SQLiteUtil.delete_reservedata(clan_data, i, reserve_data)
clan_data.reserve_list[i] = list(diff_set)
await self._update_reserve_message(clan_data, i)
def _create_progress_message(
self, clan_data: ClanData, lap: int, boss_index: int, guild: discord.Guild
) -> discord.Embed:
"""進行用のメッセージを作成する"""
attacked_list: List[str] = []
attack_list: List[str] = []
boss_status_data = clan_data.boss_status_data[lap][boss_index]
boss_status_data.attack_players.sort(key=lambda x: x.damage, reverse=True)
total_damage: int = 0
current_hp: int = boss_status_data.max_hp
for attack_status in boss_status_data.attack_players:
if attack_status.attacked:
user = guild.get_member(attack_status.player_data.user_id)
if user is None:
continue
attacked_list.append(
f"({attack_status.attack_type.value}済み) {'{:,}'.format(attack_status.damage)}万 {user.display_name}"
)
current_hp -= attack_status.damage
for attack_status in boss_status_data.attack_players:
if not attack_status.attacked:
user = guild.get_member(attack_status.player_data.user_id)
if user is None:
continue
attack_list.append(attack_status.create_attack_status_txt(user.display_name, current_hp))
total_damage += attack_status.damage
progress_title = f"[{lap}周目] {ClanBattleData.boss_names[boss_index]}"
if boss_status_data.beated:
progress_title += " **討伐済み**"
else:
progress_title += f" {'{:,}'.format(current_hp)}万/{'{:,}'.format(boss_status_data.max_hp)}万"\
f" 合計 {'{:,}'.format(total_damage)}万"
progress_description = "\n".join(attacked_list) + "\n" + "\n".join(attack_list)
pr_embed = discord.Embed(
title=progress_title,
description=progress_description,
colour=BOSS_COLOURS[boss_index]
)
if boss_status_data.beated:
pr_embed.set_thumbnail(url=TREASURE_CHEST)
else:
pr_embed.set_thumbnail(url=ClanBattleData.icon[boss_index])
return pr_embed
async def _initialize_progress_messages(
self, clan_data: ClanData, lap: int
) -> None:
clan_data.progress_message_ids[lap] = [0, 0, 0, 0, 0]
clan_data.initialize_boss_status_data(lap)
SQLiteUtil.register_progress_message_id(clan_data, lap)
SQLiteUtil.register_all_boss_status_data(clan_data, lap)
for i in range(5):
await self._send_new_progress_message(clan_data, lap, i)
async def _send_new_progress_message(
self, clan_data: ClanData, lap: int, boss_index: int
) -> None:
"""新しい進行メッセージを送信する"""
guild = self.bot.get_guild(clan_data.guild_id)
channel = self.bot.get_channel(clan_data.boss_channel_ids[boss_index])
progress_embed = self._create_progress_message(clan_data, lap, boss_index, guild)
progress_message: discord.Message = await channel.send(embed=progress_embed)
clan_data.progress_message_ids[lap][boss_index] = progress_message.id
await progress_message.add_reaction(EMOJI_PHYSICS)
await progress_message.add_reaction(EMOJI_MAGIC)
await progress_message.add_reaction(EMOJI_CARRYOVER)
await progress_message.add_reaction(EMOJI_ATTACK)
await progress_message.add_reaction(EMOJI_LAST_ATTACK)
await progress_message.add_reaction(EMOJI_REVERSE)
SQLiteUtil.update_progress_message_id(clan_data, lap)
# まとめ用のメッセージがなければ新しく送信する
if lap not in clan_data.summary_message_ids:
clan_data.summary_message_ids[lap] = [0, 0, 0, 0, 0]
for i in range(5):
progress_embed = self._create_progress_message(clan_data, lap, i, guild)
summary_channel = self.bot.get_channel(clan_data.summary_channel_id)
sum_progress_message = await summary_channel.send(embed=progress_embed)
clan_data.summary_message_ids[lap][i] = sum_progress_message.id
SQLiteUtil.register_summary_message_id(clan_data, lap)
async def _update_progress_message(self, clan_data: ClanData, lap: int, boss_idx: int) -> None:
"""進行用のメッセージを更新する"""
channel = self.bot.get_channel(clan_data.boss_channel_ids[boss_idx])
progress_message = await channel.fetch_message(clan_data.progress_message_ids[lap][boss_idx])
progress_embed = self._create_progress_message(clan_data, lap, boss_idx, channel.guild)
await progress_message.edit(embed=progress_embed)
# まとめチャンネルの進行用メッセージを更新する
channel = self.bot.get_channel(clan_data.summary_channel_id)
progress_message = await channel.fetch_message(clan_data.summary_message_ids[lap][boss_idx])
await progress_message.edit(embed=progress_embed)
async def _delete_progress_message(self, clan_data: ClanData, lap: int, boss_idx: int) -> None:
"""進行用のメッセージを削除する"""
channel = self.bot.get_channel(clan_data.boss_channel_ids[boss_idx])
try:
progress_message: discord.Message = await channel.fetch_message(clan_data.progress_message_ids[lap][boss_idx])
await progress_message.delete()
except (discord.NotFound, discord.Forbidden):
return
async def _delete_carry_over_by_attack(
self,
clan_data: ClanData,
attack_status: AttackStatus,
channel: discord.TextChannel,
user: discord.User
) -> bool:
"""持ち越しでの凸時に凸宣言を持ち越しを削除する。
Returns
---------
bool
正常に削除できたかどうか
"""
carry_over_index = 0
if not attack_status.player_data.carry_over_list:
del attack_status.player_data.log[-1]
await channel.send(f"{user.mention} 持ち越しを所持していません。キャンセルします。")
return False
if len(attack_status.player_data.carry_over_list) > 1:
try:
carry_over_index = await select_from_list(
self.bot,
channel,
user,
attack_status.player_data.carry_over_list,
f"{user.mention} 持ち越しが二つ以上発生しています。以下から使用した持ち越しを選択してください"
)
except TimeoutError:
del attack_status.player_data.log[-1]
return False
# たまにエラーが出る。再現性不明
if carry_over_index < len(attack_status.player_data.carry_over_list):
SQLiteUtil.delete_carryover_data(
clan_data, attack_status.player_data, attack_status.player_data.carry_over_list[carry_over_index])
del attack_status.player_data.carry_over_list[carry_over_index]
else:
logger.error(f"Index Error: carry_over_index={carry_over_index}"
f", length={len(attack_status.player_data.carry_over_list)}")
await channel.send("エラーが発生しました")
return False
return True
async def _attack_boss(
self,
attack_status: AttackStatus,
clan_data: ClanData,
lap: int,
boss_index: int,
channel: discord.TextChannel,
user: discord.User
) -> None:
"""ボスに凸したときに実行する"""
# ログデータの取得
attack_status.player_data.log.append(
LogData(
OperationType.ATTACK, lap, boss_index, attack_status.player_data.to_dict()
)
)
if attack_status.attack_type is AttackType.CARRYOVER:
if not await self._delete_carry_over_by_attack(
clan_data=clan_data,
attack_status=attack_status,
channel=channel,
user=user
):
return
else:
attack_status.update_attack_log()
attack_status.attacked = True
SQLiteUtil.update_attackstatus(clan_data, lap, boss_index, attack_status)
SQLiteUtil.update_playerdata(clan_data, attack_status.player_data)
await self._update_progress_message(clan_data, lap, boss_index)
await self._update_remain_attack_message(clan_data)
await self._delete_reserve_by_attack(clan_data, attack_status, boss_index)
async def _attack_declare(
self, clan_data: ClanData, player_data: PlayerData, attack_type: AttackType, lap: int, boss_index: int
) -> None:
attack_status = AttackStatus(
player_data, attack_type, attack_type is AttackType.CARRYOVER
)
clan_data.boss_status_data[lap][boss_index].attack_players.append(attack_status)
await self._update_progress_message(clan_data, lap, boss_index)
SQLiteUtil.register_attackstatus(clan_data, lap, boss_index, attack_status)
player_data.log.append(LogData(
operation_type=OperationType.ATTACK_DECLAR, lap=lap, boss_index=boss_index
))
async def _last_attack_boss(
self,
attack_status: AttackStatus,
clan_data: ClanData,
lap: int,
boss_index: int,
channel: discord.TextChannel,
user: discord.User
) -> None:
"""ボスを討伐した際に実行する"""
boss_status_data = clan_data.boss_status_data[lap][boss_index]
if boss_status_data.beated:
return await channel.send("既に討伐済みのボスです")
# ログデータの取得
attack_status.player_data.log.append(LogData(
OperationType.LAST_ATTACK,
lap,
boss_index,
attack_status.player_data.to_dict(),
boss_status_data.beated)
)
attack_status.attacked = True
if attack_status.attack_type is AttackType.CARRYOVER:
if not await self._delete_carry_over_by_attack(
clan_data=clan_data,
attack_status=attack_status,
channel=channel,
user=user
):
return
else:
attack_status.update_attack_log()
SQLiteUtil.update_playerdata(clan_data, attack_status.player_data)
carry_over = CarryOver(attack_status.attack_type, boss_index)
if len(attack_status.player_data.carry_over_list) < 3:
attack_status.player_data.carry_over_list.append(carry_over)
SQLiteUtil.register_carryover_data(clan_data, attack_status.player_data, carry_over)
boss_status_data.beated = True
await self._update_progress_message(clan_data, lap, boss_index)
SQLiteUtil.update_attackstatus(clan_data, lap, boss_index, attack_status)
SQLiteUtil.update_boss_status_data(clan_data, boss_index, boss_status_data)
next_lap = lap + 1
# 進行用メッセージを保持するリストがなければ新しく作成する
if next_lap not in clan_data.progress_message_ids:
clan_data.progress_message_ids[next_lap] = [0, 0, 0, 0, 0]
clan_data.initialize_boss_status_data(next_lap)
SQLiteUtil.register_progress_message_id(clan_data, next_lap)
SQLiteUtil.register_all_boss_status_data(clan_data, next_lap)
# 進行用のメッセージが送信されていなければ新しく送信する
if clan_data.progress_message_ids[next_lap][boss_index] == 0:
await self._send_new_progress_message(clan_data, next_lap, boss_index)
await self._update_remain_attack_message(clan_data)
await self._delete_reserve_by_attack(clan_data, attack_status, boss_index)
def _create_reserve_message(self, clan_data: ClanData, boss_index: int, guild: discord.Guild) -> discord.Embed:
"""予約状況を表示するためのメッセージを作成する"""
resreve_message_title = f"**{ClanBattleData.boss_names[boss_index]}** の 予約状況"
reserve_message_list = []
clan_data.reserve_list[boss_index].sort(key=lambda x: x.damage, reverse=True)
for reserve_data in clan_data.reserve_list[boss_index]:
user = guild.get_member(reserve_data.player_data.user_id)
if user is None:
continue
reserve_message_list.append(reserve_data.create_reserve_txt(user.display_name))
rs_embed = discord.Embed(
title=resreve_message_title,
description="\n".join(reserve_message_list),
colour=BOSS_COLOURS[boss_index]
)
rs_embed.set_thumbnail(url=ClanBattleData.icon[boss_index])
return rs_embed
async def _initialize_reserve_message(self, clan_data: ClanData) -> None:
"""新しい予約メッセージを送信する"""
guild = self.bot.get_guild(clan_data.guild_id)
reserve_channel = self.bot.get_channel(clan_data.reserve_channel_id)
async for old_message in reserve_channel.history(limit=100):
try:
await old_message.delete()
except Exception:
pass
for i in range(5):
reserve_message_embed = self._create_reserve_message(clan_data, i, guild)
reserve_message = await reserve_channel.send(embed=reserve_message_embed)
clan_data.reserve_message_ids[i] = reserve_message.id
await reserve_message.add_reaction(EMOJI_PHYSICS)
await reserve_message.add_reaction(EMOJI_MAGIC)
await reserve_message.add_reaction(EMOJI_SETTING)
await reserve_message.add_reaction(EMOJI_CANCEL)
async def _update_reserve_message(self, clan_data: ClanData, boss_idx: int) -> None:
"""予約状況を表示するメッセージを更新する"""
channel = self.bot.get_channel(clan_data.reserve_channel_id)
reserve_message = await channel.fetch_message(clan_data.reserve_message_ids[boss_idx])
reserve_embed = self._create_reserve_message(clan_data, boss_idx, channel.guild)
await reserve_message.edit(embed=reserve_embed)
def _create_remain_attaack_message(self, clan_data: ClanData) -> discord.Embed:
""""残凸状況を表示するメッセージを作成する"""
remain_attack_message_list = [
[], [], [], []
]
remain_attack_co = [
[], [], [], []
]
today = (datetime.now(JST) - timedelta(hours=5)).strftime('%m月%d日')
embed = discord.Embed(
title=f"{today} の残凸状況",
colour=colour.Colour.orange()
)
sum_remain_attack = 0
guild = self.bot.get_guild(clan_data.guild_id)
for player_data in clan_data.player_data_dict.values():
user = guild.get_member(player_data.user_id)
if user is None:
continue
txt = "- " + player_data.create_txt(user.display_name)
sum_attack = player_data.magic_attack + player_data.physics_attack
sum_remain_attack += 3 - sum_attack
if player_data.carry_over_list:
remain_attack_co[sum_attack].append(txt)
else:
remain_attack_message_list[sum_attack].append(txt)
for i in range(4):
content = "\n".join(remain_attack_message_list[i])
if content:
embed.add_field(
name=f"残{3-i}凸",
value=f"```md\n{content.replace('_', '_')}\n```",
inline=False
)
content_co = "\n".join(remain_attack_co[i])
if content_co:
if len(content_co) < 1014:
embed.add_field(
name=f"残{3-i}凸(持ち越し)",
value=f"```md\n{content_co.replace('_', '_')}\n```",
inline=False
)
else:
center = len(remain_attack_co[i]) // 2 + len(remain_attack_co[i]) % 2
content_co_list = [
"\n".join(remain_attack_co[i][:center]),
"\n".join(remain_attack_co[i][center:])
]
suffix = ["A", "B"]
for j in range(2):
embed.add_field(
name=f"残{3-i}凸(持ち越し{suffix[j]})",
value=f"```md\n{content_co_list[j]}\n```",
inline=False
)
embed.set_footer(
text=f"{clan_data.get_latest_lap()}周目 {sum_remain_attack}/{len(clan_data.player_data_dict)*3}"
)
return embed
async def _update_remain_attack_message(self, clan_data: ClanData) -> None:
"""残凸状況を表示するメッセージを更新する"""
remain_attack_channel = self.bot.get_channel(clan_data.remain_attack_channel_id)
remain_attack_message = await remain_attack_channel.fetch_message(clan_data.remain_attack_message_id)
remain_attack_embed = self._create_remain_attaack_message(clan_data)
await remain_attack_message.edit(embed=remain_attack_embed)
async def _initialize_remain_attack_message(self, clan_data: ClanData) -> None:
"""残凸状況を表示するメッセージの初期化を行う"""
remain_attack_embed = self._create_remain_attaack_message(clan_data)
remain_attack_channel = self.bot.get_channel(clan_data.remain_attack_channel_id)
remain_attack_message = await remain_attack_channel.send(embed=remain_attack_embed)
clan_data.remain_attack_message_id = remain_attack_message.id
await remain_attack_message.add_reaction(EMOJI_TASK_KILL)
async def initialize_clandata(self, clan_data: ClanData) -> None:
"""クランの凸状況を初期化する"""
for player_data in clan_data.player_data_dict.values():
player_data.initialize_attack()
SQLiteUtil.update_playerdata(clan_data, player_data)
SQLiteUtil.delete_all_carryover_data(clan_data, player_data)
clan_data.reserve_list = [
[], [], [], [], []
]
SQLiteUtil.delete_all_reservedata(clan_data)
# 2周以上の古い周を削除する
# TODO 周回数管理がちゃんとできるようになったらここら辺を直す
# latest_lap = max(clan_data.boss_status_data.keys())
# old_laps = list(lap for lap in clan_data.boss_status_data.keys() if latest_lap - 1 > lap)
# for old_lap in old_laps:
# del clan_data.boss_status_data[old_lap]
# del clan_data.progress_message_ids[old_lap]
# del clan_data.summary_message_ids[old_lap]
# SQLiteUtil.delete_old_data(clan_data, latest_lap-1)
if clan_data.form_data.form_url:
now = datetime.now(JST)
if ClanBattleData.start_time <= now <= ClanBattleData.end_time:
diff = now - ClanBattleData.start_time
day = diff.days + 1
await self._load_gss_data(clan_data, day)
async def _get_reserve_info(
self, clan_data: ClanData, player_data: PlayerData, user: discord.User
) -> Optional[Tuple[int, str, bool]]:
"""ユーザーから予約に関する情報を取得する"""
setting_content_damage = f"{user.mention} 想定ダメージを送信してください\nスペース後にコメントを付けられます (例: `600 60s討伐`)"
setting_content_co = f"{user.mention} 持ち越しの予約ですか?"
setting_message_cancel = f"{user.mention} タイムアウトのため予約設定をキャンセルしました"
setting_content_fin = "予約設定を受け付けました"
command_channnel = self.bot.get_channel(clan_data.command_channel_id)
await command_channnel.send(content=setting_content_damage)
try:
damage_message: discord.Message = await self.bot.wait_for(
'message', timeout=60.0,
check=lambda m: m.author == user and get_damage(m.content)
)
except asyncio.TimeoutError:
await command_channnel.send(setting_message_cancel)
return None
damage, memo = get_damage(damage_message.content)
if player_data.carry_over_list:
setting_co_message = await command_channnel.send(content=setting_content_co)
await setting_co_message.add_reaction(EMOJI_YES)
await setting_co_message.add_reaction(EMOJI_NO)
try:
reaction_co, user = await self.bot.wait_for(
'reaction_add', timeout=60.0, check=lambda reaction, reaction_user: reaction_user == user
)
except asyncio.TimeoutError:
await command_channnel.send(setting_message_cancel)
return None
if str(reaction_co.emoji) == EMOJI_YES:
carry_over = True
else:
carry_over = False
else:
carry_over = False
await command_channnel.send(content=setting_content_fin)
return damage, memo, carry_over
async def _check_date_update(self, clan_data: ClanData):
"""日付が更新されているかどうかをチェックする"""
today = (datetime.now(JST) - timedelta(hours=5)).date()
if clan_data.date != today:
clan_data.date = today
await self.initialize_clandata(clan_data)
await self._initialize_reserve_message(clan_data)
await self._initialize_remain_attack_message(clan_data)
SQLiteUtil.update_clandata(clan_data)
async def _load_gss_data(self, clan_data: ClanData, day: int):
"""参戦時間を管理するスプレッドシートを読み込む"""
if not clan_data.form_data.sheet_url:
return
ws_titles = await get_worksheet_list(clan_data.form_data.sheet_url)
candidate_words = ["フォームの回答 1", "第 1 张表单回复", "フォームの回答"]
for candidate_word in candidate_words:
if candidate_word in ws_titles:
sheet_data = await get_sheet_values(
clan_data.form_data.sheet_url,
candidate_word
)
for row in sheet_data[1:]:
player_data = clan_data.player_data_dict.get(int(row[2]))
if player_data:
player_data.raw_limit_time_text = row[2+day]
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
"""凸のダメージを登録する"""
if not self.ready:
return
if message.author.id == self.bot.user.id:
return
if message.channel.category is None:
return
category_channel_id = message.channel.category.id
clan_data = self.clan_data[category_channel_id]
if clan_data is None:
return
if message.channel.id not in clan_data.boss_channel_ids:
return
boss_index = clan_data.boss_channel_ids.index(message.channel.id)
player_data = clan_data.player_data_dict.get(message.author.id)
if not player_data:
return
damage_data = get_damage(message.content)
if damage_data is None:
return
# 凸宣言をしている直近の周でダメージを登録している
lap_list = list(clan_data.progress_message_ids.keys())
lap_list.sort(reverse=True)
for lap in lap_list:
boss_status_data = clan_data.boss_status_data[lap][boss_index]
if (attack_status_index := boss_status_data.get_attack_status_index(
player_data, False)) is not None:
attack_status = boss_status_data.attack_players[attack_status_index]
attack_status.damage = damage_data[0]
attack_status.memo = damage_data[1]
await self._update_progress_message(clan_data, lap, boss_index)
SQLiteUtil.update_attackstatus(clan_data, lap, boss_index, attack_status)
return
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):
if not self.ready:
return
if payload.user_id == self.bot.user.id:
return
channel = self.bot.get_channel(payload.channel_id)
if channel.category is None:
return
category_channel_id = channel.category.id
clan_data = self.clan_data[category_channel_id]
if clan_data is None:
return
if clan_data.reserve_channel_id == payload.channel_id:
boss_index = clan_data.get_reserve_boss_index(payload.message_id)
if boss_index is None:
return
lap = 0
reserve_flag = True
else:
boss_index = clan_data.get_boss_index_from_channel_id(payload.channel_id)
if boss_index is None:
return
lap = clan_data.get_lap_from_message_id(payload.message_id, boss_index)
if lap is None:
return
reserve_flag = False
player_data = clan_data.player_data_dict.get(payload.user_id)
if player_data is None:
return
async def remove_reaction():
message = await channel.fetch_message(payload.message_id)
await message.remove_reaction(payload.emoji, user)
user = self.bot.get_user(payload.user_id)
attack_type = ATTACK_TYPE_DICT.get(str(payload.emoji))
if attack_type:
await self._check_date_update(clan_data)
if reserve_flag:
reserve_data = ReserveData(
player_data, attack_type
)
clan_data.reserve_list[boss_index].append(reserve_data)
await self._update_reserve_message(clan_data, boss_index)
SQLiteUtil.register_reservedata(clan_data, boss_index, reserve_data)
else:
if not any(
attack_status.player_data.user_id == payload.user_id and not attack_status.attacked # 既に凸宣言済みだったら実行しない
for attack_status in clan_data.boss_status_data[lap][boss_index].attack_players
) and (
attack_type in {AttackType.MAGIC, AttackType.PHYSICS} or (
attack_type is AttackType.CARRYOVER and player_data.carry_over_list # 持ち越し未所持で持ち越しでの凸は反応しない
)
):
await self._attack_declare(clan_data, player_data, attack_type, lap, boss_index)
return await remove_reaction()
elif str(payload.emoji) == EMOJI_ATTACK:
for attack_status in clan_data.boss_status_data[lap][boss_index].attack_players:
if attack_status.player_data.user_id == payload.user_id and not attack_status.attacked:
await self._attack_boss(attack_status, clan_data, lap, boss_index, channel, user)
break
return await remove_reaction()
elif str(payload.emoji) == EMOJI_LAST_ATTACK:
for attack_status in clan_data.boss_status_data[lap][boss_index].attack_players:
if attack_status.player_data.user_id == payload.user_id and not attack_status.attacked:
await self._last_attack_boss(attack_status, clan_data, lap, boss_index, channel, user)
break
return await remove_reaction()
# 押した人が一番最後に登録した予約を削除する
elif str(payload.emoji) == EMOJI_CANCEL and reserve_flag:
user_reserve_data_list = [
(i, reserve_data) for i, reserve_data in enumerate(clan_data.reserve_list[boss_index])
if reserve_data.player_data.user_id == payload.user_id
]
if user_reserve_data_list:
rd_list_index = 0
if len(user_reserve_data_list) > 1:
command_channel = self.bot.get_channel(clan_data.command_channel_id)
user_selected_index = await select_from_list(
self.bot, command_channel, user, [rd[1] for rd in user_reserve_data_list],
f"{user.mention} 予約が複数あります。以下から削除をしたい予約を選んでください。"
)
if user_selected_index is None:
return await remove_reaction()
else:
rd_list_index = user_selected_index
reserve_index = user_reserve_data_list[rd_list_index][0]
SQLiteUtil.delete_reservedata(clan_data, boss_index, clan_data.reserve_list[boss_index][reserve_index])
del clan_data.reserve_list[boss_index][reserve_index]
await self._update_reserve_message(clan_data, boss_index)
await remove_reaction()
elif str(payload.emoji) == EMOJI_SETTING and reserve_flag:
user_reserve_data_list = [
reserve_data for reserve_data in clan_data.reserve_list[boss_index]
if reserve_data.player_data.user_id == payload.user_id]
if user_reserve_data_list:
reserve_index = 0
if len(user_reserve_data_list) > 1:
command_channel = self.bot.get_channel(clan_data.command_channel_id)
user_selected_index = await select_from_list(
self.bot, command_channel, user, user_reserve_data_list,
f"{user.mention} 予約が複数あります。以下から予約設定をしたい予約を選んでください。"
)
if user_selected_index is None:
return await remove_reaction()
else:
reserve_index = user_selected_index
reserve_info = await self._get_reserve_info(clan_data, player_data, user)
if reserve_info:
reserve_data = user_reserve_data_list[reserve_index]
reserve_data.set_reserve_info(reserve_info)
await self._update_reserve_message(clan_data, boss_index)
SQLiteUtil.update_reservedata(clan_data, boss_index, reserve_data)
return await remove_reaction()
elif str(payload.emoji) == EMOJI_REVERSE:
if not player_data.log:
return await remove_reaction()
log_data = player_data.log[-1]
log_index = log_data.boss_index
log_lap = log_data.lap
if log_index != boss_index or log_lap != lap:
txt = f"<@{payload.user_id}> すでに{log_lap}周目{log_index+1}ボスに凸しています。"\
f"先に<#{clan_data.boss_channel_ids[log_index]}>で{EMOJI_REVERSE}を押してください"
channel = self.bot.get_channel(payload.channel_id)
await channel.send(txt, delete_after=30)
return await remove_reaction()
await self._undo(clan_data, player_data, log_data)
return await remove_reaction()
@commands.Cog.listener("on_raw_reaction_add")
async def set_task_kill(self, payload: discord.RawReactionActionEvent):
"""タスキルをした場合の設定を行う"""
if not self.ready:
return
if payload.user_id == self.bot.user.id:
return
if str(payload.emoji) != EMOJI_TASK_KILL:
return
channel = self.bot.get_channel(payload.channel_id)
if channel.category is None:
return
category_channel_id = channel.category.id
clan_data = self.clan_data[category_channel_id]
if clan_data is None:
return
if payload.message_id != clan_data.remain_attack_message_id:
return
if player_data := clan_data.player_data_dict.get(payload.user_id):
player_data.task_kill = True
await self._update_remain_attack_message(clan_data)
SQLiteUtil.update_playerdata(clan_data, player_data)
@commands.Cog.listener("on_raw_reaction_remove")
async def unset_task_kill(self, payload: discord.RawReactionActionEvent):
"""タスキルをした場合の設定を行う"""
if not self.ready:
return
if payload.user_id == self.bot.user.id:
return
if str(payload.emoji) != EMOJI_TASK_KILL:
return
channel = self.bot.get_channel(payload.channel_id)
if channel.category is None:
return
category_channel_id = channel.category.id
clan_data = self.clan_data[category_channel_id]
if clan_data is None:
return
if payload.message_id != clan_data.remain_attack_message_id:
return
if player_data := clan_data.player_data_dict.get(payload.user_id):
player_data.task_kill = False
await self._update_remain_attack_message(clan_data)
SQLiteUtil.update_playerdata(clan_data, player_data)
async def check_command_arguments(
self, ctx: SlashContext,
member: Optional[discord.User],
lap: Optional[int] = None,
boss_number: Optional[int] = None
) -> Optional[Tuple[ClanData, Optional[PlayerData], int, int]]:
"""凸宣言などでコマンドを使用する際の引数をチェックする"""
clan_data = self.clan_data[ctx.channel.category_id]
if clan_data is None:
await ctx.send(content="凸管理を行うカテゴリーチャンネル内で実行してください")
return
if not boss_number:
boss_index = clan_data.get_boss_index_from_channel_id(ctx.channel_id)
if boss_index is None:
await ctx.send("ボス番号を指定してください")
return
elif not (0 < boss_number < 6):
await ctx.send("ボス番号が不適です。1から5までの整数を指定してください。")
return
else:
boss_index = boss_number - 1
latest_lap = clan_data.get_latest_lap(boss_index)
if lap is None:
lap = latest_lap
elif latest_lap < lap:
await ctx.send("不正な周回数です")
return
if member:
player_data = clan_data.player_data_dict.get(member.id)
if not player_data:
await ctx.send(f"{member.display_name}は凸管理対象ではありません。")
return
else:
player_data = None
return clan_data, player_data, lap, boss_index
@cog_ext.cog_slash(
description="持ち越し時間を計算します。",
options=[
create_option(
name="boss_hp",
description="ボスの残りHP。引き算も出来ます。(例: `1000-500`)",
option_type=SlashCommandOptionType.STRING,
required=True
),
create_option(
name="damage",
description="討伐する際のダメージ。",
option_type=SlashCommandOptionType.INTEGER,
required=True
)
],
guild_ids=GUILD_IDS
)
async def calc_cot(
self, ctx: SlashContext, boss_hp: str, damage: int
):
boss_hp = boss_hp.replace(" ", "").replace(" ", "").replace("ー", "")
boss_hp_int = reduce(sub, [int(number) for number in boss_hp.split("-")])
if boss_hp_int > damage:
await ctx.send(f"ボスを討伐出来ません。\nボスHP: {boss_hp_int}\nダメージ: {damage}")
return
if boss_hp_int < 1:
await ctx.send(f"討伐済みです\nボスHP: {boss_hp_int}")
cot = calc_carry_over_time(boss_hp_int, damage)
await ctx.send(f"ボスHP: {boss_hp_int}\nダメージ: {damage}\n持ち越し秒数: {cot}秒")
def setup(bot):
bot.add_cog(ClanBattle(bot)) # TestCogにBotを渡してインスタンス化し、Botにコグとして登録する。
| 16,993 | 47,596 | 46 |
406ac3a0994759eb44a396f798b588513f0a2419 | 30,236 | py | Python | pysnmp-with-texts/Juniper-IP-PROFILE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/Juniper-IP-PROFILE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/Juniper-IP-PROFILE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Juniper-IP-PROFILE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IP-PROFILE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:03:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
JuniEnable, JuniName, JuniSetMap = mibBuilder.importSymbols("Juniper-TC", "JuniEnable", "JuniName", "JuniSetMap")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter64, Bits, Gauge32, ObjectIdentity, Integer32, TimeTicks, Counter32, MibIdentifier, IpAddress, ModuleIdentity, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter64", "Bits", "Gauge32", "ObjectIdentity", "Integer32", "TimeTicks", "Counter32", "MibIdentifier", "IpAddress", "ModuleIdentity", "NotificationType")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
juniIpProfileMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26))
juniIpProfileMIB.setRevisions(('2006-09-08 10:26', '2005-09-13 17:21', '2004-10-05 14:04', '2003-09-24 15:33', '2002-10-11 13:20', '2001-01-24 20:06', '2000-05-08 00:00', '1999-08-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniIpProfileMIB.setRevisionsDescriptions(('Added support for Blocking multicast sources on IP Interfaces - juniIpProfileBlockMulticastSources.', 'Added support for Flow Stats a.k.a. J-Flow for IP Interfaces by including juniIpProfileFlowStats.', 'Added support for IP filter options all for IP Interfaces by including juniIpProfileFilterOptionsAll.', 'Added support for TCP MSS configuration for IP interfaces by including juniIpProfileTcpMss.', 'Replaced Unisphere names with Juniper names. In juniIpProfileTable, to support unnumbered interfaces referencing numbered interfaces in addition to loopback interfaces, the following object is made obsolete: juniIpProfileLoopback and the following object is added: juniIpProfileInheritNumString', 'Deprecated juniIpProfileRowStatus; the table is now dense and populated as a side-effect of creation of an entry in the juniProfileNameTable in Juniper-PROFILE-MIB. Also, added juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable.', 'Obsoleted juniIpProfileLoopbackIfIndex, replacing it with juniIpProfileLoopback.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: juniIpProfileMIB.setLastUpdated('200609081026Z')
if mibBuilder.loadTexts: juniIpProfileMIB.setOrganization('Juniper Networks')
if mibBuilder.loadTexts: juniIpProfileMIB.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886-3146 USA Tel: +1 978 589 5800 Email: mib@Juniper.net')
if mibBuilder.loadTexts: juniIpProfileMIB.setDescription('The IP Profile MIB for the Juniper Networks enterprise.')
juniIpProfileObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1))
juniIpProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1))
juniIpProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1), )
if mibBuilder.loadTexts: juniIpProfileTable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the juniProfileNameTable in the Juniper-PROFILE-MIB.')
juniIpProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IP-PROFILE-MIB", "juniIpProfileId"))
if mibBuilder.loadTexts: juniIpProfileEntry.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileEntry.setDescription('A profile describing configuration of an IP interface.')
juniIpProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: juniIpProfileId.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the juniProfileNameTable.')
juniIpProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileRowStatus.setStatus('deprecated')
if mibBuilder.loadTexts: juniIpProfileRowStatus.setDescription("Controls creation/deletion of entries in this table. Only the values 'createAndGo' and 'destroy' may be SET. The value of juniIpProfileId must match that of a profile name configured in juniProfileNameTable.")
juniIpProfileRouterName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 3), JuniName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileRouterName.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileRouterName.setDescription('The virtual router to which an IP interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
juniIpProfileIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIpAddr.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIpAddr.setDescription('An IP address to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
juniIpProfileIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIpMask.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIpMask.setDescription('An IP address mask to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
juniIpProfileDirectedBcastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 6), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileDirectedBcastEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileDirectedBcastEnable.setDescription('Enable/disable forwarding of directed broadcasts on this IP network interface.')
juniIpProfileIcmpRedirectEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 7), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIcmpRedirectEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIcmpRedirectEnable.setDescription('Enable/disable transmission of ICMP Redirect messages on this IP network interface.')
juniIpProfileAccessRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 8), JuniEnable().clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileAccessRoute.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileAccessRoute.setDescription('Enable/disable whether a host route is automatically created for a remote host attached to an IP interface that is configured using this profile.')
juniIpProfileMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(512, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileMtu.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileMtu.setDescription('The configured MTU size for this IP network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
juniIpProfileLoopbackIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 10), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileLoopbackIfIndex.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileLoopbackIfIndex.setDescription('For unnumbered interfaces, the IfIndex of the IP loopback interface whose IP address is used as the source address for transmitted IP packets. A value of zero means the loopback interface is unspecified (e.g., when the interface is numbered).')
juniIpProfileLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileLoopback.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileLoopback.setDescription("The number of the loopback interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. For example, if the loopback interface for the associated router was configured via the console as 'loopback 2', this object would contain the integer value 2. A value of -1 indicates the loopback interface is unspecified, e.g., when the IP interface is numbered. This object has been replaced by juniIpProfileInheritNumString. This object is no longer represented in the juniIpProfileSetMap.")
juniIpProfileSetMap = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 12), JuniSetMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileSetMap.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the JuniSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure JuniSetMap, bits in JuniSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures JuniSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring JuniSetMap.")
juniIpProfileSrcAddrValidEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 13), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IP packets are validated. Validation is performed by looking up the source IP address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
juniIpProfileInheritNumString = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileInheritNumString.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileInheritNumString.setDescription("The text identifier of the numbered interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. Types/formats/examples for this string include: Loopback loopback <id> 'loopback 0' ATM Virtual Circuit atm <slot>/<port>.<distinguisher> 'atm 3/1.100' Ethernet { fastEthernet | gigabitEthernet } <slot>/<port> 'fastEthernet 3/0' 'gigabitEthernet 3/0' Ethernet VLAN { fastEthernet | gigabitEthernet } <slot>/<port>:<vlanID> 'fastEthernet 3/0:1000' 'gigabitEthernet 3/0:1000' Channelized Serial serial <slot>/<port>:<channelSpecifier>[/<channelSpecifier>]* 'serial 3/0:4' (T1/E1) 'serial 3/0:2/4' (T3/E3) 'serial 3/0:2/1/1/4' (OC3/OC12 - channelized DS3) 'serial 3/0:2/1/1/1/4' (OC3/OC12 - virtual tributaries) Other formats may be supported over time. An empty string indicates the referenced interface is unspecified, e.g., when this IP interface is numbered.")
juniIpProfileTcpMss = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(160, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileTcpMss.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileTcpMss.setDescription('Configures TCP MSS value for an IP interface. When configured, MSS value of TCP SYN packets received or transmitted on the interface will be compared with the configured value and lowest of the two will replace the value in the packet.')
juniIpProfileFilterOptionsAll = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 16), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileFilterOptionsAll.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileFilterOptionsAll.setDescription('Enable/disable whether IP packets containing options are to be discarded or sent to the control plane for processing.')
juniIpProfileFlowStats = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 17), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileFlowStats.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileFlowStats.setDescription('Enable/disable whether J-Flow is enabled on the interface')
juniIpProfileBlockMulticastSources = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 18), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileBlockMulticastSources.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileBlockMulticastSources.setDescription('Enable/disable Blocking Multicast traffic')
juniIpProfileMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4))
juniIpProfileMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1))
juniIpProfileMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2))
juniIpProfileCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 1)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance = juniIpProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileLoopback replaced juniIpProfileLoopbackIfIndex.')
juniIpProfileCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 2)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance1 = juniIpProfileCompliance1.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance1.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileRowStatus was deprecate and the juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable objects were added.')
juniIpProfileCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 3)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance2 = juniIpProfileCompliance2.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance2.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileLoopback was obsoleted and the juniIpProfileInheritNumString object was added.')
juniIpProfileCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 4)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup3"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance3 = juniIpProfileCompliance3.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance3.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileTcpMss was added.')
juniIpProfileCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 5)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup4"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance4 = juniIpProfileCompliance4.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance4.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileFilterOptionsAll was added.')
juniIpProfileCompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 6)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup5"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance5 = juniIpProfileCompliance5.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance5.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileFilterOptionsAll.')
juniIpProfileCompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 7)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup6"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance6 = juniIpProfileCompliance6.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance6.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileFlowStats.')
juniIpProfileCompliance7 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 8)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup7"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance7 = juniIpProfileCompliance7.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance7.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileBlockMulticastSources.')
juniIpProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 1)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopbackIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup = juniIpProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This group became obsolete when juniIpProfileLoopback replaced juniIpProfileLoopbackIfIndex.')
juniIpProfileGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 2)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopback"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup1 = juniIpProfileGroup1.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup1.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This group became obsolete when juniIpProfileRowStatus was deprecate and the juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable objects were added.')
juniIpProfileGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 3)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopback"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup2 = juniIpProfileGroup2.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup2.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileLoopback was obsoleted and the juniIpProfileInheritNumString object was added.')
juniIpProfileDeprecatedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 4)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileDeprecatedGroup = juniIpProfileDeprecatedGroup.setStatus('deprecated')
if mibBuilder.loadTexts: juniIpProfileDeprecatedGroup.setDescription('Deprecated object providing management of IP Profile functionality in a Juniper product. This group has been deprecated but may still be supported on some implementations.')
juniIpProfileGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 5)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup3 = juniIpProfileGroup3.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup3.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileTcpMss was added.')
juniIpProfileGroup4 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 6)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup4 = juniIpProfileGroup4.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup4.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became osolete when juniIpProfileFilterOptionsAll was added.')
juniIpProfileGroup5 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 7)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup5 = juniIpProfileGroup5.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup5.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became osolete when juniIpProfileFlowStats was added.')
juniIpProfileGroup6 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 8)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFlowStats"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup6 = juniIpProfileGroup6.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup6.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileBlockMulticastSources was added.')
juniIpProfileGroup7 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 9)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFlowStats"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileBlockMulticastSources"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup7 = juniIpProfileGroup7.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileGroup7.setDescription('The basic collection of objects providing management of IP Profile functionality in a Juniper product.')
mibBuilder.exportSymbols("Juniper-IP-PROFILE-MIB", juniIpProfileCompliance6=juniIpProfileCompliance6, juniIpProfileEntry=juniIpProfileEntry, juniIpProfileObjects=juniIpProfileObjects, juniIpProfileGroup3=juniIpProfileGroup3, juniIpProfileLoopback=juniIpProfileLoopback, juniIpProfile=juniIpProfile, juniIpProfileCompliance=juniIpProfileCompliance, juniIpProfileGroup1=juniIpProfileGroup1, juniIpProfileCompliance7=juniIpProfileCompliance7, juniIpProfileFlowStats=juniIpProfileFlowStats, juniIpProfileGroup5=juniIpProfileGroup5, juniIpProfileLoopbackIfIndex=juniIpProfileLoopbackIfIndex, juniIpProfileIpAddr=juniIpProfileIpAddr, juniIpProfileGroup6=juniIpProfileGroup6, juniIpProfileDirectedBcastEnable=juniIpProfileDirectedBcastEnable, juniIpProfileBlockMulticastSources=juniIpProfileBlockMulticastSources, juniIpProfileCompliance2=juniIpProfileCompliance2, juniIpProfileTcpMss=juniIpProfileTcpMss, juniIpProfileId=juniIpProfileId, PYSNMP_MODULE_ID=juniIpProfileMIB, juniIpProfileMtu=juniIpProfileMtu, juniIpProfileGroup2=juniIpProfileGroup2, juniIpProfileGroup4=juniIpProfileGroup4, juniIpProfileGroup7=juniIpProfileGroup7, juniIpProfileMIBConformance=juniIpProfileMIBConformance, juniIpProfileSetMap=juniIpProfileSetMap, juniIpProfileSrcAddrValidEnable=juniIpProfileSrcAddrValidEnable, juniIpProfileCompliance1=juniIpProfileCompliance1, juniIpProfileIpMask=juniIpProfileIpMask, juniIpProfileCompliance3=juniIpProfileCompliance3, juniIpProfileFilterOptionsAll=juniIpProfileFilterOptionsAll, juniIpProfileTable=juniIpProfileTable, juniIpProfileInheritNumString=juniIpProfileInheritNumString, juniIpProfileCompliance4=juniIpProfileCompliance4, juniIpProfileCompliance5=juniIpProfileCompliance5, juniIpProfileMIB=juniIpProfileMIB, juniIpProfileRouterName=juniIpProfileRouterName, juniIpProfileMIBCompliances=juniIpProfileMIBCompliances, juniIpProfileIcmpRedirectEnable=juniIpProfileIcmpRedirectEnable, juniIpProfileMIBGroups=juniIpProfileMIBGroups, juniIpProfileGroup=juniIpProfileGroup, juniIpProfileRowStatus=juniIpProfileRowStatus, juniIpProfileDeprecatedGroup=juniIpProfileDeprecatedGroup, juniIpProfileAccessRoute=juniIpProfileAccessRoute)
| 179.97619 | 2,142 | 0.789489 | #
# PySNMP MIB module Juniper-IP-PROFILE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IP-PROFILE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:03:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
JuniEnable, JuniName, JuniSetMap = mibBuilder.importSymbols("Juniper-TC", "JuniEnable", "JuniName", "JuniSetMap")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter64, Bits, Gauge32, ObjectIdentity, Integer32, TimeTicks, Counter32, MibIdentifier, IpAddress, ModuleIdentity, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter64", "Bits", "Gauge32", "ObjectIdentity", "Integer32", "TimeTicks", "Counter32", "MibIdentifier", "IpAddress", "ModuleIdentity", "NotificationType")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
juniIpProfileMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26))
juniIpProfileMIB.setRevisions(('2006-09-08 10:26', '2005-09-13 17:21', '2004-10-05 14:04', '2003-09-24 15:33', '2002-10-11 13:20', '2001-01-24 20:06', '2000-05-08 00:00', '1999-08-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniIpProfileMIB.setRevisionsDescriptions(('Added support for Blocking multicast sources on IP Interfaces - juniIpProfileBlockMulticastSources.', 'Added support for Flow Stats a.k.a. J-Flow for IP Interfaces by including juniIpProfileFlowStats.', 'Added support for IP filter options all for IP Interfaces by including juniIpProfileFilterOptionsAll.', 'Added support for TCP MSS configuration for IP interfaces by including juniIpProfileTcpMss.', 'Replaced Unisphere names with Juniper names. In juniIpProfileTable, to support unnumbered interfaces referencing numbered interfaces in addition to loopback interfaces, the following object is made obsolete: juniIpProfileLoopback and the following object is added: juniIpProfileInheritNumString', 'Deprecated juniIpProfileRowStatus; the table is now dense and populated as a side-effect of creation of an entry in the juniProfileNameTable in Juniper-PROFILE-MIB. Also, added juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable.', 'Obsoleted juniIpProfileLoopbackIfIndex, replacing it with juniIpProfileLoopback.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: juniIpProfileMIB.setLastUpdated('200609081026Z')
if mibBuilder.loadTexts: juniIpProfileMIB.setOrganization('Juniper Networks')
if mibBuilder.loadTexts: juniIpProfileMIB.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886-3146 USA Tel: +1 978 589 5800 Email: mib@Juniper.net')
if mibBuilder.loadTexts: juniIpProfileMIB.setDescription('The IP Profile MIB for the Juniper Networks enterprise.')
juniIpProfileObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1))
juniIpProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1))
juniIpProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1), )
if mibBuilder.loadTexts: juniIpProfileTable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the juniProfileNameTable in the Juniper-PROFILE-MIB.')
juniIpProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IP-PROFILE-MIB", "juniIpProfileId"))
if mibBuilder.loadTexts: juniIpProfileEntry.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileEntry.setDescription('A profile describing configuration of an IP interface.')
juniIpProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: juniIpProfileId.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the juniProfileNameTable.')
juniIpProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileRowStatus.setStatus('deprecated')
if mibBuilder.loadTexts: juniIpProfileRowStatus.setDescription("Controls creation/deletion of entries in this table. Only the values 'createAndGo' and 'destroy' may be SET. The value of juniIpProfileId must match that of a profile name configured in juniProfileNameTable.")
juniIpProfileRouterName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 3), JuniName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileRouterName.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileRouterName.setDescription('The virtual router to which an IP interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
juniIpProfileIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIpAddr.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIpAddr.setDescription('An IP address to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
juniIpProfileIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIpMask.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIpMask.setDescription('An IP address mask to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
juniIpProfileDirectedBcastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 6), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileDirectedBcastEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileDirectedBcastEnable.setDescription('Enable/disable forwarding of directed broadcasts on this IP network interface.')
juniIpProfileIcmpRedirectEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 7), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIcmpRedirectEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIcmpRedirectEnable.setDescription('Enable/disable transmission of ICMP Redirect messages on this IP network interface.')
juniIpProfileAccessRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 8), JuniEnable().clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileAccessRoute.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileAccessRoute.setDescription('Enable/disable whether a host route is automatically created for a remote host attached to an IP interface that is configured using this profile.')
juniIpProfileMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(512, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileMtu.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileMtu.setDescription('The configured MTU size for this IP network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
juniIpProfileLoopbackIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 10), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileLoopbackIfIndex.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileLoopbackIfIndex.setDescription('For unnumbered interfaces, the IfIndex of the IP loopback interface whose IP address is used as the source address for transmitted IP packets. A value of zero means the loopback interface is unspecified (e.g., when the interface is numbered).')
juniIpProfileLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileLoopback.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileLoopback.setDescription("The number of the loopback interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. For example, if the loopback interface for the associated router was configured via the console as 'loopback 2', this object would contain the integer value 2. A value of -1 indicates the loopback interface is unspecified, e.g., when the IP interface is numbered. This object has been replaced by juniIpProfileInheritNumString. This object is no longer represented in the juniIpProfileSetMap.")
juniIpProfileSetMap = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 12), JuniSetMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileSetMap.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the JuniSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure JuniSetMap, bits in JuniSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures JuniSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring JuniSetMap.")
juniIpProfileSrcAddrValidEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 13), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IP packets are validated. Validation is performed by looking up the source IP address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
juniIpProfileInheritNumString = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileInheritNumString.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileInheritNumString.setDescription("The text identifier of the numbered interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. Types/formats/examples for this string include: Loopback loopback <id> 'loopback 0' ATM Virtual Circuit atm <slot>/<port>.<distinguisher> 'atm 3/1.100' Ethernet { fastEthernet | gigabitEthernet } <slot>/<port> 'fastEthernet 3/0' 'gigabitEthernet 3/0' Ethernet VLAN { fastEthernet | gigabitEthernet } <slot>/<port>:<vlanID> 'fastEthernet 3/0:1000' 'gigabitEthernet 3/0:1000' Channelized Serial serial <slot>/<port>:<channelSpecifier>[/<channelSpecifier>]* 'serial 3/0:4' (T1/E1) 'serial 3/0:2/4' (T3/E3) 'serial 3/0:2/1/1/4' (OC3/OC12 - channelized DS3) 'serial 3/0:2/1/1/1/4' (OC3/OC12 - virtual tributaries) Other formats may be supported over time. An empty string indicates the referenced interface is unspecified, e.g., when this IP interface is numbered.")
juniIpProfileTcpMss = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(160, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileTcpMss.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileTcpMss.setDescription('Configures TCP MSS value for an IP interface. When configured, MSS value of TCP SYN packets received or transmitted on the interface will be compared with the configured value and lowest of the two will replace the value in the packet.')
juniIpProfileFilterOptionsAll = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 16), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileFilterOptionsAll.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileFilterOptionsAll.setDescription('Enable/disable whether IP packets containing options are to be discarded or sent to the control plane for processing.')
juniIpProfileFlowStats = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 17), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileFlowStats.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileFlowStats.setDescription('Enable/disable whether J-Flow is enabled on the interface')
juniIpProfileBlockMulticastSources = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 18), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileBlockMulticastSources.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileBlockMulticastSources.setDescription('Enable/disable Blocking Multicast traffic')
juniIpProfileMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4))
juniIpProfileMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1))
juniIpProfileMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2))
juniIpProfileCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 1)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance = juniIpProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileLoopback replaced juniIpProfileLoopbackIfIndex.')
juniIpProfileCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 2)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance1 = juniIpProfileCompliance1.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance1.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileRowStatus was deprecate and the juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable objects were added.')
juniIpProfileCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 3)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance2 = juniIpProfileCompliance2.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance2.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileLoopback was obsoleted and the juniIpProfileInheritNumString object was added.')
juniIpProfileCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 4)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup3"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance3 = juniIpProfileCompliance3.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance3.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileTcpMss was added.')
juniIpProfileCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 5)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup4"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance4 = juniIpProfileCompliance4.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance4.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileFilterOptionsAll was added.')
juniIpProfileCompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 6)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup5"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance5 = juniIpProfileCompliance5.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance5.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileFilterOptionsAll.')
juniIpProfileCompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 7)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup6"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance6 = juniIpProfileCompliance6.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance6.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileFlowStats.')
juniIpProfileCompliance7 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 8)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup7"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance7 = juniIpProfileCompliance7.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance7.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileBlockMulticastSources.')
juniIpProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 1)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopbackIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup = juniIpProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This group became obsolete when juniIpProfileLoopback replaced juniIpProfileLoopbackIfIndex.')
juniIpProfileGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 2)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopback"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup1 = juniIpProfileGroup1.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup1.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This group became obsolete when juniIpProfileRowStatus was deprecate and the juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable objects were added.')
juniIpProfileGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 3)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopback"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup2 = juniIpProfileGroup2.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup2.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileLoopback was obsoleted and the juniIpProfileInheritNumString object was added.')
juniIpProfileDeprecatedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 4)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileDeprecatedGroup = juniIpProfileDeprecatedGroup.setStatus('deprecated')
if mibBuilder.loadTexts: juniIpProfileDeprecatedGroup.setDescription('Deprecated object providing management of IP Profile functionality in a Juniper product. This group has been deprecated but may still be supported on some implementations.')
juniIpProfileGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 5)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup3 = juniIpProfileGroup3.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup3.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileTcpMss was added.')
juniIpProfileGroup4 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 6)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup4 = juniIpProfileGroup4.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup4.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became osolete when juniIpProfileFilterOptionsAll was added.')
juniIpProfileGroup5 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 7)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup5 = juniIpProfileGroup5.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup5.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became osolete when juniIpProfileFlowStats was added.')
juniIpProfileGroup6 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 8)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFlowStats"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup6 = juniIpProfileGroup6.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup6.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileBlockMulticastSources was added.')
juniIpProfileGroup7 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 9)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFlowStats"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileBlockMulticastSources"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup7 = juniIpProfileGroup7.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileGroup7.setDescription('The basic collection of objects providing management of IP Profile functionality in a Juniper product.')
mibBuilder.exportSymbols("Juniper-IP-PROFILE-MIB", juniIpProfileCompliance6=juniIpProfileCompliance6, juniIpProfileEntry=juniIpProfileEntry, juniIpProfileObjects=juniIpProfileObjects, juniIpProfileGroup3=juniIpProfileGroup3, juniIpProfileLoopback=juniIpProfileLoopback, juniIpProfile=juniIpProfile, juniIpProfileCompliance=juniIpProfileCompliance, juniIpProfileGroup1=juniIpProfileGroup1, juniIpProfileCompliance7=juniIpProfileCompliance7, juniIpProfileFlowStats=juniIpProfileFlowStats, juniIpProfileGroup5=juniIpProfileGroup5, juniIpProfileLoopbackIfIndex=juniIpProfileLoopbackIfIndex, juniIpProfileIpAddr=juniIpProfileIpAddr, juniIpProfileGroup6=juniIpProfileGroup6, juniIpProfileDirectedBcastEnable=juniIpProfileDirectedBcastEnable, juniIpProfileBlockMulticastSources=juniIpProfileBlockMulticastSources, juniIpProfileCompliance2=juniIpProfileCompliance2, juniIpProfileTcpMss=juniIpProfileTcpMss, juniIpProfileId=juniIpProfileId, PYSNMP_MODULE_ID=juniIpProfileMIB, juniIpProfileMtu=juniIpProfileMtu, juniIpProfileGroup2=juniIpProfileGroup2, juniIpProfileGroup4=juniIpProfileGroup4, juniIpProfileGroup7=juniIpProfileGroup7, juniIpProfileMIBConformance=juniIpProfileMIBConformance, juniIpProfileSetMap=juniIpProfileSetMap, juniIpProfileSrcAddrValidEnable=juniIpProfileSrcAddrValidEnable, juniIpProfileCompliance1=juniIpProfileCompliance1, juniIpProfileIpMask=juniIpProfileIpMask, juniIpProfileCompliance3=juniIpProfileCompliance3, juniIpProfileFilterOptionsAll=juniIpProfileFilterOptionsAll, juniIpProfileTable=juniIpProfileTable, juniIpProfileInheritNumString=juniIpProfileInheritNumString, juniIpProfileCompliance4=juniIpProfileCompliance4, juniIpProfileCompliance5=juniIpProfileCompliance5, juniIpProfileMIB=juniIpProfileMIB, juniIpProfileRouterName=juniIpProfileRouterName, juniIpProfileMIBCompliances=juniIpProfileMIBCompliances, juniIpProfileIcmpRedirectEnable=juniIpProfileIcmpRedirectEnable, juniIpProfileMIBGroups=juniIpProfileMIBGroups, juniIpProfileGroup=juniIpProfileGroup, juniIpProfileRowStatus=juniIpProfileRowStatus, juniIpProfileDeprecatedGroup=juniIpProfileDeprecatedGroup, juniIpProfileAccessRoute=juniIpProfileAccessRoute)
| 0 | 0 | 0 |
11f91dae0c7b880aa4315bbca353b562a60d8179 | 11,279 | py | Python | tfsnippet/utils/reuse.py | haowen-xu/tfsnippet-pre-alpha | 31eb2cf692ac25b95cc815aaca53754d6db42d9f | [
"MIT"
] | null | null | null | tfsnippet/utils/reuse.py | haowen-xu/tfsnippet-pre-alpha | 31eb2cf692ac25b95cc815aaca53754d6db42d9f | [
"MIT"
] | null | null | null | tfsnippet/utils/reuse.py | haowen-xu/tfsnippet-pre-alpha | 31eb2cf692ac25b95cc815aaca53754d6db42d9f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import inspect
import functools
import weakref
from contextlib import contextmanager
import six
import tensorflow as tf
from .scope import reopen_variable_scope, root_variable_scope
__all__ = [
'auto_reuse_variables', 'local_reuse', 'global_reuse', 'instance_reuse',
]
@contextmanager
def auto_reuse_variables(name_or_scope,
reopen_name_scope=False,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
dtype=tf.float32):
"""Open a variable scope, while automatically choosing `reuse` flag.
The `reuse` flag will be set to False if the variable scope is opened
for the first time, and it will be set to True each time the variable
scope is opened again.
Parameters
----------
name_or_scope : str | tf.VariableScope
The name of the variable scope, or the variable scope to open.
reopen_name_scope : bool
Whether or not to re-open the original name scope of `name_or_scope`?
This option is valid only if `name_or_scope` is actually an instance
of `tf.VariableScope`.
initializer, regularizer, caching_device, partitioner, custom_getter, dtype
Other parameters for opening the variable scope.
Yields
------
tf.VariableScope
The opened variable scope.
"""
if not name_or_scope:
raise ValueError('`name_or_scope` cannot be empty. If you want to '
'auto-reuse variables in root variable scope, you '
'should capture the root variable scope instance '
'and call `auto_reuse_variables` on that, instead '
'of calling with an empty name.')
if reopen_name_scope:
if not isinstance(name_or_scope, tf.VariableScope):
raise ValueError('`reopen_name_scope` can be set to True '
'only if `name_or_scope` is an instance of '
'`tf.VariableScope`.')
else:
with generate_context() as vs:
# check whether or not the variable scope has been initialized
graph = tf.get_default_graph()
if graph not in __auto_reuse_variables_graph_dict:
__auto_reuse_variables_graph_dict[graph] = set([])
initialized_scopes = __auto_reuse_variables_graph_dict[graph]
reuse = vs.name in initialized_scopes
# if `reuse` is True, set the reuse flag
if reuse:
vs.reuse_variables()
yield vs
else:
yield vs
initialized_scopes.add(vs.name)
#: dict to track the initialization state for each variable scope
#: belonging to every living graph.
__auto_reuse_variables_graph_dict = weakref.WeakKeyDictionary()
def local_reuse(method=None, scope=None):
"""Decorate a function within `auto_reuse_variables` scope locally.
Any function or method applied with this decorator will be called within
a variable scope opened by `auto_reuse_variables`. That is, the following
code:
@local_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
Note that the scope opened by `auto_reuse_variables` should be child
of the current opened variable scope, so that the following variables,
`bar_1` and `bar_2`, should be different variables, since they are
created within different variable scopes:
with tf.variable_scope('parent_1'):
bar_1 = foo() # bar_1.name == 'parent_1/foo/bar:0'
with tf.variable_scope('parent_2'):
bar_2 = foo() # bar_2.name == 'parent_2/foo/bar:0'
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
@local_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
@local_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@local_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
See Also
--------
global_reuse, instance_reuse, auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(local_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
return wrapper
def global_reuse(method=None, scope=None):
"""Decorate a function within `auto_reuse_variables` scope globally.
Any function or method applied with this decorator will be called within
a variable scope opened first by `root_variable_scope`, then by
`auto_reuse_variables`. That is, the following code:
@global_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
with root_variable_scope():
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
Thus the major difference between `global_reuse` and `local_reuse` is
that `global_reuse` will not follow the caller's active variable scope.
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
@global_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
@global_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@global_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
See Also
--------
local_reuse, instance_reuse,auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(local_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
return wrapper
def instance_reuse(method=None, scope=None):
"""Decorate an instance method within `auto_reuse_variables` scope.
This decorator should be applied to unbound instance methods, and
the instances that owns the methods are expected to have `variable_scope`
attribute. For example:
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@instance_reuse
def foo(self):
return tf.get_variable('bar', ...)
The above example is then equivalent to the following code:
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
def foo(self):
with reopen_variable_scope(self.variable_scope):
with auto_reuse_variables('foo'):
return tf.get_variable('bar', ...)
In which the `instance_reuse` decorator acts like `global_reuse`,
but will open the `variable_scope` of corresponding instance instead
of opening the root variable scope, before entering the desired
auto-reusing variable scope.
See Also
--------
global_reuse, local_reuse, auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(instance_reuse, scope=scope)
# check whether or not `method` looks like an instance method
if six.PY2:
getargspec = inspect.getargspec
else:
getargspec = inspect.getfullargspec
if inspect.ismethod(method):
raise TypeError('`method` is expected to be unbound instance method.')
argspec = getargspec(method)
if not argspec.args or argspec.args[0] != 'self':
raise TypeError('`method` seems not to be an instance method '
'(whose first argument should be `self`).')
# determine the scope name
scope = scope or method.__name__
@six.wraps(method)
return wrapper
| 33.769461 | 79 | 0.620534 | # -*- coding: utf-8 -*-
import inspect
import functools
import weakref
from contextlib import contextmanager
import six
import tensorflow as tf
from .scope import reopen_variable_scope, root_variable_scope
__all__ = [
'auto_reuse_variables', 'local_reuse', 'global_reuse', 'instance_reuse',
]
@contextmanager
def auto_reuse_variables(name_or_scope,
reopen_name_scope=False,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
dtype=tf.float32):
"""Open a variable scope, while automatically choosing `reuse` flag.
The `reuse` flag will be set to False if the variable scope is opened
for the first time, and it will be set to True each time the variable
scope is opened again.
Parameters
----------
name_or_scope : str | tf.VariableScope
The name of the variable scope, or the variable scope to open.
reopen_name_scope : bool
Whether or not to re-open the original name scope of `name_or_scope`?
This option is valid only if `name_or_scope` is actually an instance
of `tf.VariableScope`.
initializer, regularizer, caching_device, partitioner, custom_getter, dtype
Other parameters for opening the variable scope.
Yields
------
tf.VariableScope
The opened variable scope.
"""
if not name_or_scope:
raise ValueError('`name_or_scope` cannot be empty. If you want to '
'auto-reuse variables in root variable scope, you '
'should capture the root variable scope instance '
'and call `auto_reuse_variables` on that, instead '
'of calling with an empty name.')
if reopen_name_scope:
if not isinstance(name_or_scope, tf.VariableScope):
raise ValueError('`reopen_name_scope` can be set to True '
'only if `name_or_scope` is an instance of '
'`tf.VariableScope`.')
def generate_context():
return reopen_variable_scope(
name_or_scope,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
dtype=dtype
)
else:
def generate_context():
return tf.variable_scope(
name_or_scope,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
dtype=dtype
)
with generate_context() as vs:
# check whether or not the variable scope has been initialized
graph = tf.get_default_graph()
if graph not in __auto_reuse_variables_graph_dict:
__auto_reuse_variables_graph_dict[graph] = set([])
initialized_scopes = __auto_reuse_variables_graph_dict[graph]
reuse = vs.name in initialized_scopes
# if `reuse` is True, set the reuse flag
if reuse:
vs.reuse_variables()
yield vs
else:
yield vs
initialized_scopes.add(vs.name)
#: dict to track the initialization state for each variable scope
#: belonging to every living graph.
__auto_reuse_variables_graph_dict = weakref.WeakKeyDictionary()
def local_reuse(method=None, scope=None):
"""Decorate a function within `auto_reuse_variables` scope locally.
Any function or method applied with this decorator will be called within
a variable scope opened by `auto_reuse_variables`. That is, the following
code:
@local_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
Note that the scope opened by `auto_reuse_variables` should be child
of the current opened variable scope, so that the following variables,
`bar_1` and `bar_2`, should be different variables, since they are
created within different variable scopes:
with tf.variable_scope('parent_1'):
bar_1 = foo() # bar_1.name == 'parent_1/foo/bar:0'
with tf.variable_scope('parent_2'):
bar_2 = foo() # bar_2.name == 'parent_2/foo/bar:0'
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
@local_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
@local_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@local_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
See Also
--------
global_reuse, instance_reuse, auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(local_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
def wrapper(*args, **kwargs):
with auto_reuse_variables(scope):
return method(*args, **kwargs)
return wrapper
def global_reuse(method=None, scope=None):
"""Decorate a function within `auto_reuse_variables` scope globally.
Any function or method applied with this decorator will be called within
a variable scope opened first by `root_variable_scope`, then by
`auto_reuse_variables`. That is, the following code:
@global_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
with root_variable_scope():
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
Thus the major difference between `global_reuse` and `local_reuse` is
that `global_reuse` will not follow the caller's active variable scope.
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
@global_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
@global_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@global_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
See Also
--------
local_reuse, instance_reuse,auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(local_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
def wrapper(*args, **kwargs):
with root_variable_scope():
with auto_reuse_variables(scope):
return method(*args, **kwargs)
return wrapper
def instance_reuse(method=None, scope=None):
"""Decorate an instance method within `auto_reuse_variables` scope.
This decorator should be applied to unbound instance methods, and
the instances that owns the methods are expected to have `variable_scope`
attribute. For example:
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@instance_reuse
def foo(self):
return tf.get_variable('bar', ...)
The above example is then equivalent to the following code:
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
def foo(self):
with reopen_variable_scope(self.variable_scope):
with auto_reuse_variables('foo'):
return tf.get_variable('bar', ...)
In which the `instance_reuse` decorator acts like `global_reuse`,
but will open the `variable_scope` of corresponding instance instead
of opening the root variable scope, before entering the desired
auto-reusing variable scope.
See Also
--------
global_reuse, local_reuse, auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(instance_reuse, scope=scope)
# check whether or not `method` looks like an instance method
if six.PY2:
getargspec = inspect.getargspec
else:
getargspec = inspect.getfullargspec
if inspect.ismethod(method):
raise TypeError('`method` is expected to be unbound instance method.')
argspec = getargspec(method)
if not argspec.args or argspec.args[0] != 'self':
raise TypeError('`method` seems not to be an instance method '
'(whose first argument should be `self`).')
# determine the scope name
scope = scope or method.__name__
@six.wraps(method)
def wrapper(*args, **kwargs):
obj = args[0]
variable_scope = obj.variable_scope
if not isinstance(variable_scope, tf.VariableScope):
raise TypeError('`variable_scope` attribute of the instance %r '
'is expected to be a `tf.VariableScope`, but got '
'%r.' % (obj, variable_scope,))
with reopen_variable_scope(variable_scope):
with auto_reuse_variables(scope):
return method(*args, **kwargs)
return wrapper
| 1,387 | 0 | 139 |
2be175cce772400eb717edef1cda536954ea7271 | 13,316 | py | Python | tools/train_net.py | donghongwen/SSM | d0ef9fca75240ec2c0e15b2e26f78e19f2f6afc7 | [
"MIT"
] | 3 | 2019-11-01T11:50:43.000Z | 2021-05-07T07:26:51.000Z | tools/train_net.py | donghongwen/SSM | d0ef9fca75240ec2c0e15b2e26f78e19f2f6afc7 | [
"MIT"
] | null | null | null | tools/train_net.py | donghongwen/SSM | d0ef9fca75240ec2c0e15b2e26f78e19f2f6afc7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
from __future__ import division
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net, SolverWrapper, update_training_roidb
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
from utils.help import *
import caffe
import argparse
import pprint
import numpy as np
import sys, math, logging
import scipy
import operator
from bitmap import BitMap
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
######################## begin #############################
parser.add_argument('--enable_al', help='whether or not use al process',
action='store_true',default=True)
parser.add_argument('--enable_ss', help='whether or not use ss process',
action='store_true',default=True)
######################## end #############################
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
######################## begin #############################
######################## end #############################
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
######################## begin #############################
imdb = get_Imdbs(args.imdb_name)
roidb = get_training_roidb(imdb)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# some statistic to record
alamount = 0; ssamount = 0
discardamount = 0
# set bitmap for AL
bitmapImdb = BitMap(imdb.num_images)
# choose initiail samples:VOC2007
initial_num = len(imdb[imdb.item_name(0)].roidb)
print 'All VOC2007 images use for initial train, image numbers:%d'%(initial_num)
for i in range(initial_num):
bitmapImdb.set(i)
train_roidb = [roidb[i] for i in range(initial_num)]
pretrained_model_name = args.pretrained_model
# static parameters
tao = args.max_iters
# initial hypeparameters
gamma = 0.3; clslambda = np.array([-np.log(0.9)]*imdb.num_classes)
# train record
loopcounter = 0; train_iters = 0; iters_sum = train_iters
# control al proportion
al_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(0.1,2.3,12)]
# control ss proportion with respect to al proportion
ss_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(1,23,12)]
# get solver object
sw = SolverWrapper(args.solver, train_roidb, output_dir,
pretrained_model=pretrained_model_name)
# with voc2007 to pretrained an initial model
sw.train_model(70000)
while(True):
# detact unlabeledidx samples
unlabeledidx = list(set(range(imdb.num_images))-set(bitmapImdb.nonzero()))
# detect labeledidx
labeledidx = list(set(bitmapImdb.nonzero()))
# load latest trained model
trained_models = choose_model(output_dir)
pretrained_model_name = trained_models[-1]
modelpath = os.path.join(output_dir, pretrained_model_name)
protopath = os.path.join('models/pascal_voc/ResNet-101/rfcn_end2end',
'test_agnostic.prototxt')
print 'choose latest model:{}'.format(modelpath)
model = load_model(protopath,modelpath)
# return detect results of the unlabeledidx samples with the latest model
scoreMatrix, boxRecord, yVecs = bulk_detect(model, unlabeledidx, imdb, clslambda)
# logging.debug('scoreMatrix:{}, boxRecord:{}, yVecs:{}'.format(scoreMatrix.shape,
# boxRecord.shape, yVecs.shape))
# record some detect results for updatable
al_candidate_idx = [] # record al samples index in imdb
ss_candidate_idx = [] # record ss samples index in imdb
ss_fake_gt = [] # record fake labels for ss
cls_loss_sum = np.zeros((imdb.num_classes,)) # record loss for each cls
count_box_num = 0 # used for update clslambda
for i in range(len(unlabeledidx)):
img_boxes = []; cls=[]; # fake ground truth
count_box_num += len(boxRecord[i])
ss_idx_score_pair = [] # record re-detect score map to idx
avg_scores_idx = 0
for j,box in enumerate(boxRecord[i]):
boxscore = scoreMatrix[i][j] # score of a box
# fake label box
y = yVecs[i][j]
# the fai function
loss = -((1+y)/2 * np.log(boxscore) + (1-y)/2 * np.log(1-boxscore+1e-30))
# choose v by loss
sign, v = judge_v(loss, gamma, clslambda)
# print('v:{}'.format(v))
# ss process
if(sign!=1):
if(np.sum(y==1)==1 and np.where(y==1)[0]!=0): # not background
# add Imgae Cross Validation
print('ss process ...')
pre_cls = np.where(y==1)[0]
pre_box = box
curr_roidb = roidb[unlabeledidx[i]]
cross_validate,avg_score = image_cross_validation(model,roidb,labeledidx,curr_roidb,pre_box,pre_cls,resize=False)
if cross_validate:
img_boxes.append(box)
cls.append(np.where(y==1)[0])
avg_scores_idx += avg_score
else:
discardamount += 1
continue
elif(np.sum(y==1) != 1):
discardamount += 1
continue
else: # al process
#add image to al candidate
print('al process ...')
al_candidate_idx.append(unlabeledidx[i])
img_boxes=[]; cls=[]
break
# replace the fake ground truth for the ss_candidate
if len(img_boxes) != 0:
ss_idx_score_pair.append(avg_scores_idx/len(img_boxes))
ss_idx_score_pair.append(unlabeledidx[i])
ss_candidate_idx.append(ss_idx_score_pair)
overlaps = np.zeros((len(img_boxes), imdb.num_classes), dtype=np.float32)
for i in range(len(img_boxes)):
overlaps[i, cls[i]]=1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
ss_fake_gt.append({'score':ss_idx_score_pair[0],'boxes':np.array(img_boxes),
'gt_classes':np.array(cls,dtype=np.int).flatten(),
'gt_overlaps':overlaps, 'flipped':False})
if len(al_candidate_idx)<=10 or iters_sum>args.max_iters:
print ('all process finish at loop ',loopcounter)
print ('the num of al_candidate :',len(al_candidate_idx))
print ('the net train for {} epoches'.format(iters_sum))
break
# 50% enter al
r = np.random.rand(len(al_candidate_idx))
al_candidate_idx = [x for i,x in enumerate(al_candidate_idx) if r[i]>0.5]
# re-rank according to consistency-score
ss_candidate_idx = sorted(ss_candidate_idx,reverse=True)
ss_fake_gt.sort(key=operator.itemgetter('score'),reverse=True)
ss_candidate_idx = [x[1] for x in ss_candidate_idx]
if args.enable_al:
# control al proportion
print('alamount:',alamount,'al_candidate_idx:',len(al_candidate_idx),'al_proportion_checkpoint:',al_proportion_checkpoint[0])
if alamount+len(al_candidate_idx)>=al_proportion_checkpoint[0]:
al_candidate_idx = al_candidate_idx[:int(al_proportion_checkpoint[0]-alamount)]
tmp = al_proportion_checkpoint.pop(0)
print 'al_proportion_checkpoint: {}%% samples for al, model name:{}'.format(tmp/initial_num,pretrained_model_name )
print 'sample chosen for al: ', len(al_candidate_idx)
else:
al_candidate_idx = []
if args.enable_ss:
# control ss proportion
print('ssamount:',ssamount,'ss_candidate_idx:',len(ss_candidate_idx),'ss_proportion_checkpoint:',ss_proportion_checkpoint[0])
if ssamount+len(ss_candidate_idx)>=ss_proportion_checkpoint[0]:
ss_candidate_idx = ss_candidate_idx[:int(ss_proportion_checkpoint[0]-ssamount)]
ss_fake_gt = ss_fake_gt[:int(ss_proportion_checkpoint[0]-ssamount)]
tmp = ss_proportion_checkpoint.pop(0)
print 'ss_proportion_checkpoint: {}%% samples for ss, model name:{}'.format(tmp/initial_num,pretrained_model_name )
print 'sample chosen by ss: ',len(ss_candidate_idx)
else:
ss_candidate_idx=[]
ss_fake_gt = []
print 'sample discard:', discardamount
alamount += len(al_candidate_idx); ssamount += len(ss_candidate_idx)+discardamount
# record the proportion of al and ss
al_factor = float(alamount/initial_num)
ss_factor = float(ssamount/initial_num)
logging.info('last model name :{},al amount:{}/{},al_factor:{},ss amount: {}/{},ss_factor:{}'.format(pretrained_model_name,alamount,initial_num,al_factor,ssamount,initial_num,ss_factor))
# generate training set for next loop
for idx in al_candidate_idx:
bitmapImdb.set(idx)
next_train_idx = bitmapImdb.nonzero(); next_train_idx.extend(ss_candidate_idx)
# update the roidb with ss_fake_gt
roidb = update_training_roidb(imdb,ss_candidate_idx,ss_fake_gt)
train_roidb = [roidb[i] for i in next_train_idx]
loopcounter += 1
# add the labeled samples to finetune W
train_iters = min(15000 ,len(train_roidb)*15-train_iters)
iters_sum += train_iters
sw.update_roidb(train_roidb)
sw.train_model(iters_sum)
######################## end #############################
| 42.954839 | 194 | 0.591018 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
from __future__ import division
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net, SolverWrapper, update_training_roidb
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
from utils.help import *
import caffe
import argparse
import pprint
import numpy as np
import sys, math, logging
import scipy
import operator
from bitmap import BitMap
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
######################## begin #############################
parser.add_argument('--enable_al', help='whether or not use al process',
action='store_true',default=True)
parser.add_argument('--enable_ss', help='whether or not use ss process',
action='store_true',default=True)
######################## end #############################
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
######################## begin #############################
def get_Imdbs(imdb_names):
imdbs = [get_imdb(s) for s in imdb_names.split('+')]
for im in imdbs:
im.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
return datasets.imdb.Imdbs(imdbs)
######################## end #############################
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
######################## begin #############################
imdb = get_Imdbs(args.imdb_name)
roidb = get_training_roidb(imdb)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# some statistic to record
alamount = 0; ssamount = 0
discardamount = 0
# set bitmap for AL
bitmapImdb = BitMap(imdb.num_images)
# choose initiail samples:VOC2007
initial_num = len(imdb[imdb.item_name(0)].roidb)
print 'All VOC2007 images use for initial train, image numbers:%d'%(initial_num)
for i in range(initial_num):
bitmapImdb.set(i)
train_roidb = [roidb[i] for i in range(initial_num)]
pretrained_model_name = args.pretrained_model
# static parameters
tao = args.max_iters
# initial hypeparameters
gamma = 0.3; clslambda = np.array([-np.log(0.9)]*imdb.num_classes)
# train record
loopcounter = 0; train_iters = 0; iters_sum = train_iters
# control al proportion
al_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(0.1,2.3,12)]
# control ss proportion with respect to al proportion
ss_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(1,23,12)]
# get solver object
sw = SolverWrapper(args.solver, train_roidb, output_dir,
pretrained_model=pretrained_model_name)
# with voc2007 to pretrained an initial model
sw.train_model(70000)
while(True):
# detact unlabeledidx samples
unlabeledidx = list(set(range(imdb.num_images))-set(bitmapImdb.nonzero()))
# detect labeledidx
labeledidx = list(set(bitmapImdb.nonzero()))
# load latest trained model
trained_models = choose_model(output_dir)
pretrained_model_name = trained_models[-1]
modelpath = os.path.join(output_dir, pretrained_model_name)
protopath = os.path.join('models/pascal_voc/ResNet-101/rfcn_end2end',
'test_agnostic.prototxt')
print 'choose latest model:{}'.format(modelpath)
model = load_model(protopath,modelpath)
# return detect results of the unlabeledidx samples with the latest model
scoreMatrix, boxRecord, yVecs = bulk_detect(model, unlabeledidx, imdb, clslambda)
# logging.debug('scoreMatrix:{}, boxRecord:{}, yVecs:{}'.format(scoreMatrix.shape,
# boxRecord.shape, yVecs.shape))
# record some detect results for updatable
al_candidate_idx = [] # record al samples index in imdb
ss_candidate_idx = [] # record ss samples index in imdb
ss_fake_gt = [] # record fake labels for ss
cls_loss_sum = np.zeros((imdb.num_classes,)) # record loss for each cls
count_box_num = 0 # used for update clslambda
for i in range(len(unlabeledidx)):
img_boxes = []; cls=[]; # fake ground truth
count_box_num += len(boxRecord[i])
ss_idx_score_pair = [] # record re-detect score map to idx
avg_scores_idx = 0
for j,box in enumerate(boxRecord[i]):
boxscore = scoreMatrix[i][j] # score of a box
# fake label box
y = yVecs[i][j]
# the fai function
loss = -((1+y)/2 * np.log(boxscore) + (1-y)/2 * np.log(1-boxscore+1e-30))
# choose v by loss
sign, v = judge_v(loss, gamma, clslambda)
# print('v:{}'.format(v))
# ss process
if(sign!=1):
if(np.sum(y==1)==1 and np.where(y==1)[0]!=0): # not background
# add Imgae Cross Validation
print('ss process ...')
pre_cls = np.where(y==1)[0]
pre_box = box
curr_roidb = roidb[unlabeledidx[i]]
cross_validate,avg_score = image_cross_validation(model,roidb,labeledidx,curr_roidb,pre_box,pre_cls,resize=False)
if cross_validate:
img_boxes.append(box)
cls.append(np.where(y==1)[0])
avg_scores_idx += avg_score
else:
discardamount += 1
continue
elif(np.sum(y==1) != 1):
discardamount += 1
continue
else: # al process
#add image to al candidate
print('al process ...')
al_candidate_idx.append(unlabeledidx[i])
img_boxes=[]; cls=[]
break
# replace the fake ground truth for the ss_candidate
if len(img_boxes) != 0:
ss_idx_score_pair.append(avg_scores_idx/len(img_boxes))
ss_idx_score_pair.append(unlabeledidx[i])
ss_candidate_idx.append(ss_idx_score_pair)
overlaps = np.zeros((len(img_boxes), imdb.num_classes), dtype=np.float32)
for i in range(len(img_boxes)):
overlaps[i, cls[i]]=1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
ss_fake_gt.append({'score':ss_idx_score_pair[0],'boxes':np.array(img_boxes),
'gt_classes':np.array(cls,dtype=np.int).flatten(),
'gt_overlaps':overlaps, 'flipped':False})
if len(al_candidate_idx)<=10 or iters_sum>args.max_iters:
print ('all process finish at loop ',loopcounter)
print ('the num of al_candidate :',len(al_candidate_idx))
print ('the net train for {} epoches'.format(iters_sum))
break
# 50% enter al
r = np.random.rand(len(al_candidate_idx))
al_candidate_idx = [x for i,x in enumerate(al_candidate_idx) if r[i]>0.5]
# re-rank according to consistency-score
ss_candidate_idx = sorted(ss_candidate_idx,reverse=True)
ss_fake_gt.sort(key=operator.itemgetter('score'),reverse=True)
ss_candidate_idx = [x[1] for x in ss_candidate_idx]
if args.enable_al:
# control al proportion
print('alamount:',alamount,'al_candidate_idx:',len(al_candidate_idx),'al_proportion_checkpoint:',al_proportion_checkpoint[0])
if alamount+len(al_candidate_idx)>=al_proportion_checkpoint[0]:
al_candidate_idx = al_candidate_idx[:int(al_proportion_checkpoint[0]-alamount)]
tmp = al_proportion_checkpoint.pop(0)
print 'al_proportion_checkpoint: {}%% samples for al, model name:{}'.format(tmp/initial_num,pretrained_model_name )
print 'sample chosen for al: ', len(al_candidate_idx)
else:
al_candidate_idx = []
if args.enable_ss:
# control ss proportion
print('ssamount:',ssamount,'ss_candidate_idx:',len(ss_candidate_idx),'ss_proportion_checkpoint:',ss_proportion_checkpoint[0])
if ssamount+len(ss_candidate_idx)>=ss_proportion_checkpoint[0]:
ss_candidate_idx = ss_candidate_idx[:int(ss_proportion_checkpoint[0]-ssamount)]
ss_fake_gt = ss_fake_gt[:int(ss_proportion_checkpoint[0]-ssamount)]
tmp = ss_proportion_checkpoint.pop(0)
print 'ss_proportion_checkpoint: {}%% samples for ss, model name:{}'.format(tmp/initial_num,pretrained_model_name )
print 'sample chosen by ss: ',len(ss_candidate_idx)
else:
ss_candidate_idx=[]
ss_fake_gt = []
print 'sample discard:', discardamount
alamount += len(al_candidate_idx); ssamount += len(ss_candidate_idx)+discardamount
# record the proportion of al and ss
al_factor = float(alamount/initial_num)
ss_factor = float(ssamount/initial_num)
logging.info('last model name :{},al amount:{}/{},al_factor:{},ss amount: {}/{},ss_factor:{}'.format(pretrained_model_name,alamount,initial_num,al_factor,ssamount,initial_num,ss_factor))
# generate training set for next loop
for idx in al_candidate_idx:
bitmapImdb.set(idx)
next_train_idx = bitmapImdb.nonzero(); next_train_idx.extend(ss_candidate_idx)
# update the roidb with ss_fake_gt
roidb = update_training_roidb(imdb,ss_candidate_idx,ss_fake_gt)
train_roidb = [roidb[i] for i in next_train_idx]
loopcounter += 1
# add the labeled samples to finetune W
train_iters = min(15000 ,len(train_roidb)*15-train_iters)
iters_sum += train_iters
sw.update_roidb(train_roidb)
sw.train_model(iters_sum)
######################## end #############################
| 875 | 0 | 45 |
c595589da7b4d85ae7c0668163c3f9ca736b61d6 | 3,600 | py | Python | storops/vnx/resource/cifs_server.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 60 | 2016-04-18T23:42:10.000Z | 2022-03-23T02:26:03.000Z | storops/vnx/resource/cifs_server.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 317 | 2016-05-25T06:45:37.000Z | 2022-03-25T13:22:38.000Z | storops/vnx/resource/cifs_server.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 34 | 2016-03-18T02:39:12.000Z | 2022-01-07T12:54:14.000Z | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
from storops.vnx.resource.mover import VNXMoverRefList
from storops.vnx.resource import VNXCliResourceList, VNXResource
__author__ = 'Jay Xu'
log = logging.getLogger(__name__)
| 33.962264 | 79 | 0.655556 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
from storops.vnx.resource.mover import VNXMoverRefList
from storops.vnx.resource import VNXCliResourceList, VNXResource
__author__ = 'Jay Xu'
log = logging.getLogger(__name__)
class CifsDomain(object):
def __init__(self, name, comp_name=None, user=None, password=None):
self.name = name
self.comp_name = comp_name
self.user = user
self.password = password
class VNXCifsServerList(VNXCliResourceList):
def __init__(self, cli=None, mover_id=None, is_vdm=False):
super(VNXCifsServerList, self).__init__(cli=cli)
self.mover_id = mover_id
self.is_vdm = is_vdm
@classmethod
def get_resource_class(cls):
return VNXCifsServer
def _get_raw_resource(self):
return self._cli.get_cifs_server(mover_id=self.mover_id,
is_vdm=self.is_vdm)
class VNXCifsServer(VNXResource):
def __init__(self, name=None, cli=None):
super(VNXCifsServer, self).__init__()
self._name = name
self._cli = cli
def _get_raw_resource(self):
return self._cli.get_cifs_server(name=self._name)
@staticmethod
def get(cli, name=None, mover_id=None, is_vdm=False):
if name is not None:
ret = VNXCifsServer(name=name, cli=cli)
else:
ret = VNXCifsServerList(cli=cli, mover_id=mover_id, is_vdm=is_vdm)
return ret
@staticmethod
def create(cli, name, mover_id=None, is_vdm=False,
workgroup=None, domain=None,
interfaces=None, alias_name=None,
local_admin_password=None):
# default to first physical data mover
if mover_id is None:
movers = VNXMoverRefList(cli=cli)
if not movers:
raise ValueError('no data mover available.')
mover_id = movers[0].mover_id
is_vdm = False
resp = cli.create_cifs_server(
name=name, mover_id=mover_id, is_vdm=is_vdm,
workgroup=workgroup, domain=domain,
ip_list=interfaces, alias_name=alias_name,
local_admin_password=local_admin_password)
resp.raise_if_err()
return VNXCifsServer(name=name, cli=cli)
def delete(self, mover_id=None, is_vdm=False):
if mover_id is None:
mover_id = self.mover_id
is_vdm = self.is_vdm
resp = self._cli.delete_cifs_server(self._get_name(), mover_id, is_vdm)
resp.raise_if_err()
return resp
def modify(self, name, mover_id=None, is_vdm=True,
join_domain=False, username=None, password=None):
if mover_id is None:
mover_id = self.mover_id
is_vdm = self.is_vdm
resp = self._cli.modify_domain_cifs_server(
name, mover_id, is_vdm, join_domain, username, password)
resp.raise_if_err()
return resp
| 2,284 | 333 | 95 |
f786169a8c203fe08bb2f78833d9abf361b486f1 | 1,023 | py | Python | examples/dags/spark_example.py | FreeUniDataEngineering/airflow-spark2.4-hue-hive | 031d7f9d90ace6f4f5cfe6ae2f2998ec8b53edbe | [
"Apache-2.0"
] | null | null | null | examples/dags/spark_example.py | FreeUniDataEngineering/airflow-spark2.4-hue-hive | 031d7f9d90ace6f4f5cfe6ae2f2998ec8b53edbe | [
"Apache-2.0"
] | null | null | null | examples/dags/spark_example.py | FreeUniDataEngineering/airflow-spark2.4-hue-hive | 031d7f9d90ace6f4f5cfe6ae2f2998ec8b53edbe | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.spark.operators.spark_jdbc import SparkJDBCOperator
from airflow.providers.apache.spark.operators.spark_sql import SparkSqlOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
import os
with DAG(
dag_id='spark_test',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['FreeUni'],
) as dag:
# [START howto_operator_spark_submit]
submit_job = SparkSubmitOperator(
application="/airflow/jobs/test_job.py", task_id="submit_job"
)
# [END howto_operator_spark_submit]
submit_job_2 = SparkSubmitOperator(
application=f"{os.getenv('SPARK_HOME')}/examples/src/main/python/pi.py", task_id="submit_job_2"
)
submit_job_3 = SparkSubmitOperator(
application=f"/airflow/jobs/breaking_news.py", task_id="breaking_news"
)
[submit_job, submit_job_2] >> submit_job_3 | 33 | 104 | 0.72825 | from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.spark.operators.spark_jdbc import SparkJDBCOperator
from airflow.providers.apache.spark.operators.spark_sql import SparkSqlOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
import os
with DAG(
dag_id='spark_test',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['FreeUni'],
) as dag:
# [START howto_operator_spark_submit]
submit_job = SparkSubmitOperator(
application="/airflow/jobs/test_job.py", task_id="submit_job"
)
# [END howto_operator_spark_submit]
submit_job_2 = SparkSubmitOperator(
application=f"{os.getenv('SPARK_HOME')}/examples/src/main/python/pi.py", task_id="submit_job_2"
)
submit_job_3 = SparkSubmitOperator(
application=f"/airflow/jobs/breaking_news.py", task_id="breaking_news"
)
[submit_job, submit_job_2] >> submit_job_3 | 0 | 0 | 0 |
fdd34b5cf650058e67e9c8f00cb7ffdbcaddec58 | 4,235 | py | Python | tests/test_help_mixin.py | TheFriendlyCoder/FriendlyShell | 8508c09e787cb8f0fc44c2a9e2587e8d4eec555c | [
"Apache-2.0"
] | null | null | null | tests/test_help_mixin.py | TheFriendlyCoder/FriendlyShell | 8508c09e787cb8f0fc44c2a9e2587e8d4eec555c | [
"Apache-2.0"
] | 93 | 2018-04-21T01:03:06.000Z | 2019-06-23T14:22:37.000Z | tests/test_help_mixin.py | TheFriendlyCoder/FriendlyShell | 8508c09e787cb8f0fc44c2a9e2587e8d4eec555c | [
"Apache-2.0"
] | null | null | null | import logging
from friendlyshell.base_shell import BaseShell
from friendlyshell.shell_help_mixin import ShellHelpMixin
from friendlyshell.basic_logger_mixin import BasicLoggerMixin
from mock import patch
import pytest
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| 29.006849 | 65 | 0.661865 | import logging
from friendlyshell.base_shell import BaseShell
from friendlyshell.shell_help_mixin import ShellHelpMixin
from friendlyshell.basic_logger_mixin import BasicLoggerMixin
from mock import patch
import pytest
def test_list_commands(caplog):
caplog.set_level(logging.INFO)
class MyShell (BasicLoggerMixin, BaseShell, ShellHelpMixin):
def do_something(self):
"""Here's online help for my 'something' command"""
pass
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help', 'exit']
obj.run()
assert 'exit' in caplog.text
assert 'help' in caplog.text
assert 'something' in caplog.text
assert obj.do_something.__doc__ in caplog.text
def test_help_missing_command(caplog):
caplog.set_level(logging.INFO)
class MyShell (BasicLoggerMixin, BaseShell, ShellHelpMixin):
pass
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help something', 'exit']
obj.run()
assert "Command does not exist: something" in caplog.text
def test_missing_help(caplog):
caplog.set_level(logging.INFO)
class MyShell(BasicLoggerMixin, BaseShell, ShellHelpMixin):
def do_something(self):
pass
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help something', 'exit']
obj.run()
assert 'No online help' in caplog.text
def test_default_help(caplog):
caplog.set_level(logging.INFO)
class MyShell(BasicLoggerMixin, BaseShell, ShellHelpMixin):
def do_something(self):
"""Here's online help"""
pass
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help something', 'exit']
obj.run()
assert "Here's online help" in caplog.text
def test_help_alias(caplog):
caplog.set_level(logging.INFO)
class MyShell(BasicLoggerMixin, BaseShell, ShellHelpMixin):
def do_something(self):
"""Here's online help"""
pass
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['? something', 'exit']
obj.run()
assert "Here's online help" in caplog.text
def test_command_help(caplog):
caplog.set_level(logging.INFO)
expected_help = "Here's my verbose help for something..."
class MyShell(BasicLoggerMixin, BaseShell, ShellHelpMixin):
def do_something(self):
"""Here's online help"""
pass
def help_something(self):
return expected_help
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help something', 'exit']
obj.run()
assert expected_help in caplog.text
def test_help_help(caplog):
caplog.set_level(logging.INFO)
class MyShell(BasicLoggerMixin, BaseShell, ShellHelpMixin):
pass
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help help', 'exit']
obj.run()
assert "Online help generation tool" in caplog.text
def test_occluded_help(caplog):
caplog.set_level(logging.INFO)
class MyShell(BasicLoggerMixin, BaseShell, ShellHelpMixin):
def do_something(self):
"""Here's online help"""
pass
help_something = "Should not see me"
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help something', 'exit']
obj.run()
assert "Error: " in caplog.text
def test_occluded_command(caplog):
caplog.set_level(logging.INFO)
class MyShell (BasicLoggerMixin, BaseShell, ShellHelpMixin):
do_something = "Hello"
obj = MyShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['help something', 'exit']
obj.run()
assert "Error: " in caplog.text
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| 3,735 | 0 | 207 |
e692149ccb7316fa1ad458af36150929e61cfa52 | 4,990 | py | Python | src/tess_bite/core.py | SSDataLab/tess-bite | bb2314472da1da8addc14b14407879cdc2fb78a1 | [
"MIT"
] | null | null | null | src/tess_bite/core.py | SSDataLab/tess-bite | bb2314472da1da8addc14b14407879cdc2fb78a1 | [
"MIT"
] | null | null | null | src/tess_bite/core.py | SSDataLab/tess-bite | bb2314472da1da8addc14b14407879cdc2fb78a1 | [
"MIT"
] | null | null | null | import re
import io
import struct
import numpy as np
import httpx
import logging
from . import log
# FITS standard specifies that header and data units
# shall be a multiple of 2880 bytes long.
FITS_BLOCK_SIZE = 2880 # bytes
# TESS FFI dimensions
FFI_COLUMNS = 2136 # i.e. NAXIS1
FFI_ROWS = 2078 # i.e. NAXIS2
BYTES_PER_PIX = 4 # float32
def http_get_range(url: str, byterange: list = None) -> bytes:
"""Download one or more byte ranges."""
http_headers = {}
if byterange:
rangestr = ",".join([f"{r[0]}-{r[1]}" for r in byterange])
http_headers["Range"] = f"bytes={rangestr}"
resp = httpx.get(url, headers=http_headers)
if "multipart/byteranges" not in resp.headers["content-type"]:
data = [resp.content]
else:
lines = resp.content.split(b"\r\n")
data = []
for idx in range(len(lines)):
# Data follows an empty line
try:
if idx > 0 and idx < len(lines) - 1 and lines[idx] == b"":
data.append(lines[idx + 1])
except UnicodeDecodeError:
pass
return data
| 33.046358 | 99 | 0.574749 | import re
import io
import struct
import numpy as np
import httpx
import logging
from . import log
# FITS standard specifies that header and data units
# shall be a multiple of 2880 bytes long.
FITS_BLOCK_SIZE = 2880 # bytes
# TESS FFI dimensions
FFI_COLUMNS = 2136 # i.e. NAXIS1
FFI_ROWS = 2078 # i.e. NAXIS2
BYTES_PER_PIX = 4 # float32
class RemoteTessImage:
def __init__(self, url, data_offset=None):
self.url = url
self.data_offset = data_offset if data_offset else self._find_data_offset(ext=1)
def _download_range_multiple(self, byteranges: list) -> bytes:
if isinstance(self.url, str):
return http_get_range(self.url, byteranges)
else:
result = []
for r in byteranges:
self.url.seek(r[0], 0)
result.append(self.url.read(r[1] - r[0] + 1))
return result
def _download_range(self, begin: int, end: int) -> bytes:
return self._download_range_multiple([(begin, end)])[0]
def _find_data_offset(self, ext=1) -> int:
"""Returns the byte offset of the start of the data section."""
# We'll assume the data starts within the first 10 FITS BLOCKs.
# This means the method will currently only work for extensions 0 and 1 of a TESS FFI file.
max_seek = FITS_BLOCK_SIZE * 12
data = self._download_range(0, max_seek)
current_ext = 0
offset = 0
while offset <= max_seek:
block = data[offset : offset + FITS_BLOCK_SIZE]
offset += FITS_BLOCK_SIZE
# Header sections end with "END" followed by whitespace until the end of the block
if re.search("END\s*$", block.decode("ascii")):
if current_ext == ext:
return offset
current_ext += 1
return None
def _find_pixel_offset(self, col, row) -> int:
"""Returns the byte offset of a specific pixel position."""
pixel_offset = col + row * FFI_COLUMNS
return self.data_offset + BYTES_PER_PIX * pixel_offset
def _find_pixel_range(self, col, row, shape=(1, 1)) -> list:
"""Returns the byte ranges of a rectangle."""
result = []
col1 = int(col) - shape[0] // 2
row1 = int(row) - shape[1] // 2
if col1 < 0 or col1 >= FFI_COLUMNS:
raise ValueError(
f"column out of bounds (col must be in range 0-{FFI_COLUMNS})"
)
if row1 < 0 or row1 >= FFI_ROWS:
raise ValueError(f"row out of bounds (row must be in range 0-{FFI_ROWS})")
for myrow in range(row1, row1 + shape[1]):
myrange = (
self._find_pixel_offset(col1, myrow),
self._find_pixel_offset(col1 + shape[0], myrow) - 1,
)
result.append(myrange)
return result
def download_cutout_array(self, col, row, shape=(5, 5)) -> np.array:
"""Returns a 2D array of pixel values."""
byterange = self._find_pixel_range(col=col, row=row, shape=shape)
bytedata = self._download_range_multiple(byterange)
data = []
for b in bytedata:
n_pixels = len(b) // BYTES_PER_PIX
values = struct.unpack(">" + "f" * n_pixels, b)
data.append(values)
return np.array(data)
def download_cutout(self, col, row, shape=(5, 5)) -> "ImageCutout":
"""Returns a 2D array of pixel values."""
flux = self.download_cutout_array(col=col, row=row, shape=shape)
time = 0
cadenceno = 0
quality = 0
flux_err = flux.copy()
flux_err[:] = np.nan
return Cutout(
time=time,
cadenceno=cadenceno,
flux=flux,
flux_err=flux_err,
quality=quality,
)
def http_get_range(url: str, byterange: list = None) -> bytes:
"""Download one or more byte ranges."""
http_headers = {}
if byterange:
rangestr = ",".join([f"{r[0]}-{r[1]}" for r in byterange])
http_headers["Range"] = f"bytes={rangestr}"
resp = httpx.get(url, headers=http_headers)
if "multipart/byteranges" not in resp.headers["content-type"]:
data = [resp.content]
else:
lines = resp.content.split(b"\r\n")
data = []
for idx in range(len(lines)):
# Data follows an empty line
try:
if idx > 0 and idx < len(lines) - 1 and lines[idx] == b"":
data.append(lines[idx + 1])
except UnicodeDecodeError:
pass
return data
class Cutout:
def __init__(
self,
time: float,
cadenceno: int,
flux: np.ndarray,
flux_err: np.ndarray,
quality: int,
meta: dict = None,
):
self.time = time
self.cadenceno = cadenceno
self.flux = flux
self.flux_err = flux_err
self.quality = quality
self.meta = meta
| 903 | 2,885 | 72 |
a36d766253f894dd156d7b19146b347988d5b41c | 1,823 | py | Python | cngi/vis/ddiregrid.py | wxiongccnu1990/cngi_prototype | 7a7230485acc9f8f2be534a832522339153d521e | [
"Apache-2.0"
] | null | null | null | cngi/vis/ddiregrid.py | wxiongccnu1990/cngi_prototype | 7a7230485acc9f8f2be534a832522339153d521e | [
"Apache-2.0"
] | null | null | null | cngi/vis/ddiregrid.py | wxiongccnu1990/cngi_prototype | 7a7230485acc9f8f2be534a832522339153d521e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################
def ddiregrid(xds, mode='channel', nchan=None, start=0, width=1, interpolation='linear', phasecenter=None, restfreq=None, outframe=None, veltype='radio'):
"""
.. todo::
This function is not yet implemented
Transform channel labels and visibilities to a spectral reference frame which is appropriate for analysis, e.g. from TOPO to LSRK or to correct for doppler shifts throughout the time of observation
Parameters
----------
xds : xarray.core.dataset.Dataset
input Visibility Dataset
mode : str
regridding mode
nchan : int
number of channels in output spw. None=all
start : int
first input channel to use
width : int
number of input channels to average
interpolation : str
spectral interpolation method
phasecenter : int
image phase center position or field index
restfreq : float
rest frequency
outframe : str
output frame, None=keep input frame
veltype : str
velocity definition
Returns
-------
xarray.core.dataset.Dataset
New Visibility Dataset with updated data
"""
return {}
| 33.759259 | 201 | 0.666484 | # Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################
def ddiregrid(xds, mode='channel', nchan=None, start=0, width=1, interpolation='linear', phasecenter=None, restfreq=None, outframe=None, veltype='radio'):
"""
.. todo::
This function is not yet implemented
Transform channel labels and visibilities to a spectral reference frame which is appropriate for analysis, e.g. from TOPO to LSRK or to correct for doppler shifts throughout the time of observation
Parameters
----------
xds : xarray.core.dataset.Dataset
input Visibility Dataset
mode : str
regridding mode
nchan : int
number of channels in output spw. None=all
start : int
first input channel to use
width : int
number of input channels to average
interpolation : str
spectral interpolation method
phasecenter : int
image phase center position or field index
restfreq : float
rest frequency
outframe : str
output frame, None=keep input frame
veltype : str
velocity definition
Returns
-------
xarray.core.dataset.Dataset
New Visibility Dataset with updated data
"""
return {}
| 0 | 0 | 0 |
f726670921d44f21aa09f17d795a742ee0c1fa0c | 8,397 | py | Python | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
| 37.995475 | 400 | 0.609265 | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
# mock out the request
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
# mock out the request
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
# mock out the request
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
# mock out the request
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
| 7,325 | 704 | 180 |
ffb0d45447adc197d2881748bf726e3906e01ee8 | 18,785 | py | Python | building.py | Miracle2333/BSPPN | f29182972fd4a13c47d5fb08b7e4faea2aabd77d | [
"MIT"
] | null | null | null | building.py | Miracle2333/BSPPN | f29182972fd4a13c47d5fb08b7e4faea2aabd77d | [
"MIT"
] | null | null | null | building.py | Miracle2333/BSPPN | f29182972fd4a13c47d5fb08b7e4faea2aabd77d | [
"MIT"
] | null | null | null | """
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import time
import sys
import json
import datetime
import numpy as np
import skimage.io
from imgaug import augmenters as iaa
import re
import tqdm
import timeit
import logging
import cv2
import csv
from skimage.measure import find_contours
import skimage.draw
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
from keras.utils import plot_model
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
from config import Config
import utils
import model as modellib
import visualize
import torch
# Root directory of the project
ROOT_DIR = os.getcwd()
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.pth")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use one GPU with 8GB memory, which can fit one image.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1+1 # COCO has 80 classes
#steps per epoch
STEPS_PER_EPOCH = 500
VALIDATION_STEPS = 30
############################################################
# Dataset
############################################################
############################################################
# COCO Evaluation
############################################################
def test_building(model, dataset, output, limit=0):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
times = []
count = 0
a = enumerate(image_ids)
for i, image_id in a:
image_id = 100
start = timeit.default_timer()
image = dataset.load_image(image_id)
source_id_temp = dataset.image_info[image_id]["id"] # source ID = original image name
source_id = source_id_temp.split('.')[0]
print(source_id)
# image_name = source_id.split('_', 1)[1]
r = model.detect([image], source_id)[0]
stop = timeit.default_timer()
if count > 0:
times.append(stop - start)
# boxes = r['rois']
# masks = r['masks']
# scores = r['scores']
# class_ids = r['class_ids']
visualize.display_detection(image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, source_id,
output, r['scores'])
if count > 0:
print(sum(times) / float(len(times)))
count = count + 1
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=False,
metavar="/path/to/weights.pth",
help="Path to weights .pth file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
parser.add_argument('--subset', required=False,
metavar="Dataset sub-directory",
help="Subset of dataset to run prediction on")
parser.add_argument('--output', required=False,
metavar="/path/to/result",
help="Path to save the detection result ")
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
if args.subset:
print("Subset: ", args.subset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
if config.GPU_COUNT:
model = model.cuda()
# Select weights file to load
if args.model:
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = config.IMAGENET_MODEL_PATH
else:
model_path = args.model
else:
model_path = ""
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_building(args.dataset, args.subset)
# dataset_train.load_building(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
dataset_val.load_building(args.dataset, 'val')
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=50,
layers='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=55,
layers='4+')
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=60,
layers='all')
elif args.command == "test":
# Validation dataset
dataset_test = CocoDataset()
dataset_test.load_building(args.dataset, "test")
dataset_test.prepare()
print("Running COCO detection on {} images.".format(args.limit))
# evaluate_coco(model, dataset_test, "bbox", limit=int(args.limit))
test_building(model, dataset_test, limit=int(args.limit), output=args.output)
print("Detection results are saved at {}".format(args.output))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
| 37.796781 | 115 | 0.544903 | """
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import time
import sys
import json
import datetime
import numpy as np
import skimage.io
from imgaug import augmenters as iaa
import re
import tqdm
import timeit
import logging
import cv2
import csv
from skimage.measure import find_contours
import skimage.draw
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
from keras.utils import plot_model
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
from config import Config
import utils
import model as modellib
import visualize
import torch
# Root directory of the project
ROOT_DIR = os.getcwd()
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.pth")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use one GPU with 8GB memory, which can fit one image.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1+1 # COCO has 80 classes
#steps per epoch
STEPS_PER_EPOCH = 500
VALIDATION_STEPS = 30
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_building(self, dataset_dir, subset):
"""Load a subset of the Building dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val or test
"""
# Add classes. We have only one class to add.
# self refers to NucleusDataset
self.add_class("building", 1, "building")
# Train or validation dataset?
assert subset in ["train", "val", "test"]
if subset == "test":
dataset_dir = os.path.join(dataset_dir, subset)
test_files = os.listdir(dataset_dir)
for f in test_files:
filename = f
image_path = os.path.join(dataset_dir, filename)
height = 650
width = 650
self.add_image(
"building",
image_id=filename, # use file name as a unique image id
path=image_path,
width=width, height=height)
else:
dataset_dir = os.path.join(dataset_dir, subset)
#can be modified to read any file ending with .json
annotations = json.load(open(os.path.join(dataset_dir,
"AOI_2_Vegas_Train_Building_Solutions_modified.json")))
# Add images
polygons = []
flag = 0
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
# polygons = [r['shape_attributes'] for r in a['regions'].values()]
if a['BuildingId'] != '1':
poly = {}.fromkeys(['x', 'y'])
poly['x'] = [float(s) for s in re.findall(r'-?\d+\.?\d*', a['X'])]
poly['y'] = [float(s) for s in re.findall(r'-?\d+\.?\d*', a['Y'])]
x = poly['x']
y = poly['y']
if (len(x) == 0|len(y) == 0):
continue
elif (np.size(x, 0) < 2 | np.size(y, 0) < 2):
continue
elif ((np.abs(np.max(x) - np.min(x)) < 1.6) | (np.abs(np.max(y) - np.min(y)) < 1.6)):
continue
else:
polygons.append(poly)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
filename = 'RGB-PanSharpen_' + a['ImageId'] + '.tif'
image_path = os.path.join(dataset_dir, filename)
# image = skimage.io.imread(image_path)
# height, width = image.shape[:2]
height = 650
width = 650
else:
if ((polygons != [])):
self.add_image(
"building",
image_id=filename, # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
flag = 0
polygons = []
poly = {}.fromkeys(['x', 'y'])
poly['x'] = [float(s) for s in re.findall(r'-?\d+\.?\d*', a['X'])]
poly['y'] = [float(s) for s in re.findall(r'-?\d+\.?\d*', a['Y'])]
x = poly['x']
y = poly['y']
if (len(x) == 0|len(y) == 0):
flag = 1
continue
elif (np.size(x, 0) < 2 | np.size(y, 0) < 2):
flag = 1
continue
elif ((np.abs(np.max(x) - np.min(x)) < 1.6)|(np.abs(np.max(y) - np.min(y)) < 1.6)):
flag = 1
continue
else:
polygons.append(poly)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
filename = 'RGB-PanSharpen_' + a['ImageId'] + '.tif'
image_path = os.path.join(dataset_dir, filename)
# image = skimage.io.imread(image_path)
# height, width = image.shape[:2]
height = 650
width = 650
b = 1
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
# image_id1 = 1254
image_info = self.image_info[image_id]
if image_info["source"] != "building":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info['height'], info['width'], len(info['polygons'])],
dtype=np.uint8)
for i, a in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
# avoid the area of mask is 0 or return of rr, cc has no value
# x = a['x']
# y = a['y']
# if(np.size(x, 0)<2|np.size(y, 0)<2):
# continue
# elif((np.abs(np.max(x)-np.min(x)) < 2)|(np.abs(np.max(y)-np.min(y)) < 2)):
# continue
# else:
rr, cc = skimage.draw.polygon((a['y']), (a['x']))
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def test_building(model, dataset, output, limit=0):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
times = []
count = 0
a = enumerate(image_ids)
for i, image_id in a:
image_id = 100
start = timeit.default_timer()
image = dataset.load_image(image_id)
source_id_temp = dataset.image_info[image_id]["id"] # source ID = original image name
source_id = source_id_temp.split('.')[0]
print(source_id)
# image_name = source_id.split('_', 1)[1]
r = model.detect([image], source_id)[0]
stop = timeit.default_timer()
if count > 0:
times.append(stop - start)
# boxes = r['rois']
# masks = r['masks']
# scores = r['scores']
# class_ids = r['class_ids']
visualize.display_detection(image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, source_id,
output, r['scores'])
if count > 0:
print(sum(times) / float(len(times)))
count = count + 1
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=False,
metavar="/path/to/weights.pth",
help="Path to weights .pth file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
parser.add_argument('--subset', required=False,
metavar="Dataset sub-directory",
help="Subset of dataset to run prediction on")
parser.add_argument('--output', required=False,
metavar="/path/to/result",
help="Path to save the detection result ")
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
if args.subset:
print("Subset: ", args.subset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
if config.GPU_COUNT:
model = model.cuda()
# Select weights file to load
if args.model:
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = config.IMAGENET_MODEL_PATH
else:
model_path = args.model
else:
model_path = ""
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_building(args.dataset, args.subset)
# dataset_train.load_building(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
dataset_val.load_building(args.dataset, 'val')
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=50,
layers='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=55,
layers='4+')
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=60,
layers='all')
elif args.command == "test":
# Validation dataset
dataset_test = CocoDataset()
dataset_test.load_building(args.dataset, "test")
dataset_test.prepare()
print("Running COCO detection on {} images.".format(args.limit))
# evaluate_coco(model, dataset_test, "bbox", limit=int(args.limit))
test_building(model, dataset_test, limit=int(args.limit), output=args.output)
print("Detection results are saved at {}".format(args.output))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
| 0 | 8,389 | 53 |
f47ae364c1bf7232c3bfb14b310a63fe108d871a | 5,911 | py | Python | datasets/coco/coco_dataset.py | periakiva/finding_berries | 1dfc7cf00c384321e39872921051dc9535355e53 | [
"MIT"
] | 11 | 2020-05-11T21:57:44.000Z | 2022-01-05T14:44:28.000Z | datasets/coco/coco_dataset.py | periakiva/finding_berries | 1dfc7cf00c384321e39872921051dc9535355e53 | [
"MIT"
] | 2 | 2020-07-29T10:08:36.000Z | 2022-01-18T15:38:57.000Z | datasets/coco/coco_dataset.py | periakiva/finding_berries | 1dfc7cf00c384321e39872921051dc9535355e53 | [
"MIT"
] | 2 | 2021-08-29T17:20:38.000Z | 2021-09-21T21:07:30.000Z | # from .vision import VisionDataset
from PIL import Image
import os
import os.path
from torch.utils.data import Dataset
from torchvision import transforms
import utils.utils as utils
import torch
import numpy as np
from pycocotools import mask
import matplotlib.pyplot as plt
import random
class CocoDetection(Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
# print(ann_ids)
target = coco.loadAnns(ann_ids)
# plt.imshow(img) ## to show correct instance seg
# coco.showAnns(target) ## to show correct instance seg
# plt.show() ## to show correct instance seg
# target_mask =coco.annToMask(target)
# print(img.size)
target_mask = Image.fromarray(self.generate_segmentation_mask(target,img.size[1],img.size[0]))
# print(target_mask)
utils.show_image(target_mask)
# print(target_mask.shape)
if self.transform is not None:
seed = np.random.randint(2341234532453245324)
random.seed(seed)
transformed_img = self.transform(img).float()
random.seed(seed)
tranformed_mask = self.target_transform(mask).long()
return img, target
| 38.383117 | 168 | 0.607342 | # from .vision import VisionDataset
from PIL import Image
import os
import os.path
from torch.utils.data import Dataset
from torchvision import transforms
import utils.utils as utils
import torch
import numpy as np
from pycocotools import mask
import matplotlib.pyplot as plt
import random
def build_train_validation_loaders(config):
transformations = {
'img':transforms.Compose([
transforms.ToPILImage(),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
# transforms.Normalize([0.485,0.485,0.406], [0.229, 0.224, 0.225])
]),
'mask':transforms.Compose([
transforms.ToPILImage(),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
}
location = config['location']
type = config[location]['dataset']['type']
data_dir = config[location]['dataset']['data_dir']
year = config[location]['dataset']['year']
type = config[location]['dataset']['type']
batch_size = config['data_loaders']['batch_size']
num_workers = config['data_loaders']['num_workers']
annotation_dir = data_dir + "/"+ str(year)+"/annotations/"
annotaiton_files = utils.dictionary_contents(annotation_dir,types=['*.json'])
datasets_types = ['train','val','test']
datasets = {}
for annotation_file in annotaiton_files:
for datasets_type in datasets_types:
if (datasets_type+str(year) in annotation_file) and (type in annotation_file):
root_dir = data_dir +str(year)+'/' + datasets_type+str(year)+"/"
# print(root_dir)
datasets[datasets_type] = CocoDetection(root=root_dir,annFile=annotation_file,transform=transformations['img'],target_transform=transformations['mask'])
# print(datasets)
dataloaders = {}
for datasets_type in datasets.keys():
dataloaders[datasets_type] = torch.utils.data.DataLoader(datasets[datasets_type],batch_size, shuffle=True,num_workers=num_workers)
if 'test' in datasets.keys():
return dataloaders['train'],dataloaders['val'],dataloaders['test']
else:
return dataloaders['train'],dataloaders['val']
class CocoDetection(Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None):
super(CocoDetection, self).__init__()
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.root = root
self.annFile= annFile
self.transform = transform
self.target_transform=target_transform
self.transforms = transforms
self.ids = list(sorted(self.coco.imgs.keys()))
self.coco_mask = mask
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
# print(ann_ids)
target = coco.loadAnns(ann_ids)
# plt.imshow(img) ## to show correct instance seg
# coco.showAnns(target) ## to show correct instance seg
# plt.show() ## to show correct instance seg
# target_mask =coco.annToMask(target)
# print(img.size)
target_mask = Image.fromarray(self.generate_segmentation_mask(target,img.size[1],img.size[0]))
# print(target_mask)
utils.show_image(target_mask)
# print(target_mask.shape)
if self.transform is not None:
seed = np.random.randint(2341234532453245324)
random.seed(seed)
transformed_img = self.transform(img).float()
random.seed(seed)
tranformed_mask = self.target_transform(mask).long()
return img, target
def __len__(self):
return len(self.ids)
def generate_segmentation_mask(self,target,height,width):
mask = np.zeros((height,width),dtype=np.uint8)
# mask = self.coco.annToMask(target[0])
for ann in target:
mask = np.maximum(mask,self.coco.annToMask(ann)*ann['category_id'])
# for i in range(len(target)):
# mask+=self.coco.annToMask(target[i])
# for instance in target:
# # print(instance)
# try:
# rle = coco_mask.frPyObjects(instance['segmentation'],height,width)
# m = coco_mask.decode(rle)
# cat = instance['category_id']
# except Exception as e:
# print(instance)
# print(e)
# exit()
# if len(m.shape)<3:
# mask[:,:]+(mask==0)*(m*c)
# else:
# mask[:,:]+=(mask==0)*(((np.sum(n,axis=2))>0)*c).astype(np.uint8)
return mask
| 3,515 | 0 | 104 |